From ecfa5f794ed397b0bb884b79a64cf569b927de82 Mon Sep 17 00:00:00 2001 From: ISMAIL KABOUBI Date: Wed, 3 Jan 2024 19:51:36 +0100 Subject: [PATCH] feat(apis): adding kubernetes apis --- .../v1alpha1/zz_generated.deepcopy.go | 3986 +++++++++++++ .../v1alpha1/zz_generated.managed.go | 908 +++ .../v1alpha1/zz_generated.managedlist.go | 143 + .../v1alpha1/zz_generated_terraformed.go | 1278 +++++ .../v1alpha1/zz_groupversion_info.go | 36 + .../v1alpha1/zz_projectdatabase_types.go | 286 + .../zz_projectdatabasedatabase_types.go | 122 + .../zz_projectdatabaseintegration_types.go | 164 + .../zz_projectdatabaseiprestriction_types.go | 143 + .../zz_projectdatabasekafkaacl_types.go | 141 + ...ectdatabasekafkaschemaregistryacl_types.go | 141 + .../zz_projectdatabasekafkatopic_types.go | 158 + .../zz_projectdatabasem3dbnamespace_types.go | 193 + .../zz_projectdatabasem3dbuser_types.go | 134 + .../zz_projectdatabasemongodbuser_types.go | 134 + ..._projectdatabaseopensearchpattern_types.go | 129 + .../zz_projectdatabaseopensearchuser_types.go | 163 + .../zz_projectdatabasepostgresqluser_types.go | 134 + .../zz_projectdatabaseredisuser_types.go | 164 + .../v1alpha1/zz_projectdatabaseuser_types.go | 135 + apis/kube/v1alpha1/zz_generated.deepcopy.go | 2526 +++++++++ apis/kube/v1alpha1/zz_generated.managed.go | 248 + .../kube/v1alpha1/zz_generated.managedlist.go | 44 + .../kube/v1alpha1/zz_generated_terraformed.go | 354 ++ apis/kube/v1alpha1/zz_groupversion_info.go | 36 + apis/kube/v1alpha1/zz_projectkube_types.go | 391 ++ .../zz_projectkubeiprestrictions_types.go | 121 + .../v1alpha1/zz_projectkubenodepool_types.go | 316 ++ .../kube/v1alpha1/zz_projectkubeoidc_types.go | 168 + apis/lb/v1alpha1/zz_generated.deepcopy.go | 4990 +++++++++++++++++ apis/lb/v1alpha1/zz_generated.managed.go | 788 +++ apis/lb/v1alpha1/zz_generated.managedlist.go | 125 + apis/lb/v1alpha1/zz_generated_terraformed.go | 1110 ++++ apis/lb/v1alpha1/zz_groupversion_info.go | 36 + apis/lb/v1alpha1/zz_httpfarm_types.go | 215 + apis/lb/v1alpha1/zz_httpfarmserver_types.go | 184 + apis/lb/v1alpha1/zz_httpfrontend_types.go | 181 + apis/lb/v1alpha1/zz_httproute_types.go | 206 + apis/lb/v1alpha1/zz_httprouterule_types.go | 147 + apis/lb/v1alpha1/zz_iploadbalancing_types.go | 406 ++ apis/lb/v1alpha1/zz_refresh_types.go | 103 + apis/lb/v1alpha1/zz_tcpfarm_types.go | 215 + apis/lb/v1alpha1/zz_tcpfarmserver_types.go | 182 + apis/lb/v1alpha1/zz_tcpfrontend_types.go | 160 + apis/lb/v1alpha1/zz_tcproute_types.go | 196 + apis/lb/v1alpha1/zz_tcprouterule_types.go | 147 + apis/lb/v1alpha1/zz_vracknetwork_types.go | 155 + apis/logs/v1alpha1/zz_generated.deepcopy.go | 886 +++ apis/logs/v1alpha1/zz_generated.managed.go | 128 + .../logs/v1alpha1/zz_generated.managedlist.go | 26 + .../logs/v1alpha1/zz_generated_terraformed.go | 186 + apis/logs/v1alpha1/zz_groupversion_info.go | 36 + apis/logs/v1alpha1/zz_logscluster_types.go | 142 + apis/logs/v1alpha1/zz_logsinput_types.go | 300 + apis/zz_register.go | 2 + config/external_name.go | 4 + config/kube/config.go | 23 + config/provider.go | 3 + .../projectdatabase/zz_controller.go | 67 + .../projectdatabasedatabase/zz_controller.go | 67 + .../zz_controller.go | 67 + .../zz_controller.go | 67 + .../projectdatabasekafkaacl/zz_controller.go | 67 + .../zz_controller.go | 67 + .../zz_controller.go | 67 + .../zz_controller.go | 67 + .../projectdatabasem3dbuser/zz_controller.go | 67 + .../zz_controller.go | 67 + .../zz_controller.go | 67 + .../zz_controller.go | 67 + .../zz_controller.go | 67 + .../projectdatabaseredisuser/zz_controller.go | 67 + .../projectdatabaseuser/zz_controller.go | 67 + .../kube/projectkube/zz_controller.go | 67 + .../zz_controller.go | 67 + .../kube/projectkubenodepool/zz_controller.go | 67 + .../kube/projectkubeoidc/zz_controller.go | 67 + .../controller/lb/httpfarm/zz_controller.go | 67 + .../lb/httpfarmserver/zz_controller.go | 67 + .../lb/httpfrontend/zz_controller.go | 67 + .../controller/lb/httproute/zz_controller.go | 67 + .../lb/httprouterule/zz_controller.go | 67 + .../lb/iploadbalancing/zz_controller.go | 67 + .../controller/lb/refresh/zz_controller.go | 67 + .../controller/lb/tcpfarm/zz_controller.go | 67 + .../lb/tcpfarmserver/zz_controller.go | 67 + .../lb/tcpfrontend/zz_controller.go | 67 + .../controller/lb/tcproute/zz_controller.go | 67 + .../lb/tcprouterule/zz_controller.go | 67 + .../lb/vracknetwork/zz_controller.go | 67 + .../logs/logscluster/zz_controller.go | 67 + .../logs/logsinput/zz_controller.go | 67 + internal/controller/zz_setup.go | 8 + ...vh.edixos.io_projectdatabasedatabases.yaml | 337 ++ ...edixos.io_projectdatabaseintegrations.yaml | 387 ++ ...ixos.io_projectdatabaseiprestrictions.yaml | 359 ++ ...vh.edixos.io_projectdatabasekafkaacls.yaml | 360 ++ ...rojectdatabasekafkaschemaregistryacls.yaml | 360 ++ ....edixos.io_projectdatabasekafkatopics.yaml | 372 ++ ...ixos.io_projectdatabasem3dbnamespaces.yaml | 422 ++ ...vh.edixos.io_projectdatabasem3dbusers.yaml | 348 ++ ...edixos.io_projectdatabasemongodbusers.yaml | 357 ++ ....io_projectdatabaseopensearchpatterns.yaml | 343 ++ ...xos.io_projectdatabaseopensearchusers.yaml | 375 ++ ...xos.io_projectdatabasepostgresqlusers.yaml | 354 ++ ...h.edixos.io_projectdatabaseredisusers.yaml | 399 ++ ...abases.ovh.edixos.io_projectdatabases.yaml | 507 ++ ...es.ovh.edixos.io_projectdatabaseusers.yaml | 350 ++ ...h.edixos.io_projectkubeiprestrictions.yaml | 343 ++ ...be.ovh.edixos.io_projectkubenodepools.yaml | 547 ++ .../kube.ovh.edixos.io_projectkubeoidcs.yaml | 396 ++ .../crds/kube.ovh.edixos.io_projectkubes.yaml | 614 ++ package/crds/lb.ovh.edixos.io_httpfarms.yaml | 414 ++ .../lb.ovh.edixos.io_httpfarmservers.yaml | 392 ++ .../crds/lb.ovh.edixos.io_httpfrontends.yaml | 403 ++ .../crds/lb.ovh.edixos.io_httprouterules.yaml | 359 ++ package/crds/lb.ovh.edixos.io_httproutes.yaml | 426 ++ .../lb.ovh.edixos.io_iploadbalancings.yaml | 608 ++ package/crds/lb.ovh.edixos.io_refreshes.yaml | 321 ++ package/crds/lb.ovh.edixos.io_tcpfarms.yaml | 414 ++ .../crds/lb.ovh.edixos.io_tcpfarmservers.yaml | 389 ++ .../crds/lb.ovh.edixos.io_tcpfrontends.yaml | 379 ++ .../crds/lb.ovh.edixos.io_tcprouterules.yaml | 359 ++ package/crds/lb.ovh.edixos.io_tcproutes.yaml | 408 ++ .../crds/lb.ovh.edixos.io_vracknetworks.yaml | 386 ++ .../crds/logs.ovh.edixos.io_logsclusters.yaml | 367 ++ .../crds/logs.ovh.edixos.io_logsinputs.yaml | 515 ++ 127 files changed, 40134 insertions(+) create mode 100644 apis/databases/v1alpha1/zz_generated.deepcopy.go create mode 100644 apis/databases/v1alpha1/zz_generated.managed.go create mode 100644 apis/databases/v1alpha1/zz_generated.managedlist.go create mode 100755 apis/databases/v1alpha1/zz_generated_terraformed.go create mode 100755 apis/databases/v1alpha1/zz_groupversion_info.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabase_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabasedatabase_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabaseintegration_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabaseiprestriction_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabasekafkaacl_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabasekafkaschemaregistryacl_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabasekafkatopic_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabasem3dbnamespace_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabasem3dbuser_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabasemongodbuser_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabaseopensearchpattern_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabaseopensearchuser_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabasepostgresqluser_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabaseredisuser_types.go create mode 100755 apis/databases/v1alpha1/zz_projectdatabaseuser_types.go create mode 100644 apis/kube/v1alpha1/zz_generated.deepcopy.go create mode 100644 apis/kube/v1alpha1/zz_generated.managed.go create mode 100644 apis/kube/v1alpha1/zz_generated.managedlist.go create mode 100755 apis/kube/v1alpha1/zz_generated_terraformed.go create mode 100755 apis/kube/v1alpha1/zz_groupversion_info.go create mode 100755 apis/kube/v1alpha1/zz_projectkube_types.go create mode 100755 apis/kube/v1alpha1/zz_projectkubeiprestrictions_types.go create mode 100755 apis/kube/v1alpha1/zz_projectkubenodepool_types.go create mode 100755 apis/kube/v1alpha1/zz_projectkubeoidc_types.go create mode 100644 apis/lb/v1alpha1/zz_generated.deepcopy.go create mode 100644 apis/lb/v1alpha1/zz_generated.managed.go create mode 100644 apis/lb/v1alpha1/zz_generated.managedlist.go create mode 100755 apis/lb/v1alpha1/zz_generated_terraformed.go create mode 100755 apis/lb/v1alpha1/zz_groupversion_info.go create mode 100755 apis/lb/v1alpha1/zz_httpfarm_types.go create mode 100755 apis/lb/v1alpha1/zz_httpfarmserver_types.go create mode 100755 apis/lb/v1alpha1/zz_httpfrontend_types.go create mode 100755 apis/lb/v1alpha1/zz_httproute_types.go create mode 100755 apis/lb/v1alpha1/zz_httprouterule_types.go create mode 100755 apis/lb/v1alpha1/zz_iploadbalancing_types.go create mode 100755 apis/lb/v1alpha1/zz_refresh_types.go create mode 100755 apis/lb/v1alpha1/zz_tcpfarm_types.go create mode 100755 apis/lb/v1alpha1/zz_tcpfarmserver_types.go create mode 100755 apis/lb/v1alpha1/zz_tcpfrontend_types.go create mode 100755 apis/lb/v1alpha1/zz_tcproute_types.go create mode 100755 apis/lb/v1alpha1/zz_tcprouterule_types.go create mode 100755 apis/lb/v1alpha1/zz_vracknetwork_types.go create mode 100644 apis/logs/v1alpha1/zz_generated.deepcopy.go create mode 100644 apis/logs/v1alpha1/zz_generated.managed.go create mode 100644 apis/logs/v1alpha1/zz_generated.managedlist.go create mode 100755 apis/logs/v1alpha1/zz_generated_terraformed.go create mode 100755 apis/logs/v1alpha1/zz_groupversion_info.go create mode 100755 apis/logs/v1alpha1/zz_logscluster_types.go create mode 100755 apis/logs/v1alpha1/zz_logsinput_types.go create mode 100644 config/kube/config.go create mode 100755 internal/controller/databases/projectdatabase/zz_controller.go create mode 100755 internal/controller/databases/projectdatabasedatabase/zz_controller.go create mode 100755 internal/controller/databases/projectdatabaseintegration/zz_controller.go create mode 100755 internal/controller/databases/projectdatabaseiprestriction/zz_controller.go create mode 100755 internal/controller/databases/projectdatabasekafkaacl/zz_controller.go create mode 100755 internal/controller/databases/projectdatabasekafkaschemaregistryacl/zz_controller.go create mode 100755 internal/controller/databases/projectdatabasekafkatopic/zz_controller.go create mode 100755 internal/controller/databases/projectdatabasem3dbnamespace/zz_controller.go create mode 100755 internal/controller/databases/projectdatabasem3dbuser/zz_controller.go create mode 100755 internal/controller/databases/projectdatabasemongodbuser/zz_controller.go create mode 100755 internal/controller/databases/projectdatabaseopensearchpattern/zz_controller.go create mode 100755 internal/controller/databases/projectdatabaseopensearchuser/zz_controller.go create mode 100755 internal/controller/databases/projectdatabasepostgresqluser/zz_controller.go create mode 100755 internal/controller/databases/projectdatabaseredisuser/zz_controller.go create mode 100755 internal/controller/databases/projectdatabaseuser/zz_controller.go create mode 100755 internal/controller/kube/projectkube/zz_controller.go create mode 100755 internal/controller/kube/projectkubeiprestrictions/zz_controller.go create mode 100755 internal/controller/kube/projectkubenodepool/zz_controller.go create mode 100755 internal/controller/kube/projectkubeoidc/zz_controller.go create mode 100755 internal/controller/lb/httpfarm/zz_controller.go create mode 100755 internal/controller/lb/httpfarmserver/zz_controller.go create mode 100755 internal/controller/lb/httpfrontend/zz_controller.go create mode 100755 internal/controller/lb/httproute/zz_controller.go create mode 100755 internal/controller/lb/httprouterule/zz_controller.go create mode 100755 internal/controller/lb/iploadbalancing/zz_controller.go create mode 100755 internal/controller/lb/refresh/zz_controller.go create mode 100755 internal/controller/lb/tcpfarm/zz_controller.go create mode 100755 internal/controller/lb/tcpfarmserver/zz_controller.go create mode 100755 internal/controller/lb/tcpfrontend/zz_controller.go create mode 100755 internal/controller/lb/tcproute/zz_controller.go create mode 100755 internal/controller/lb/tcprouterule/zz_controller.go create mode 100755 internal/controller/lb/vracknetwork/zz_controller.go create mode 100755 internal/controller/logs/logscluster/zz_controller.go create mode 100755 internal/controller/logs/logsinput/zz_controller.go create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabasedatabases.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabaseintegrations.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabaseiprestrictions.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabasekafkaacls.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabasekafkaschemaregistryacls.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabasekafkatopics.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabasem3dbnamespaces.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabasem3dbusers.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabasemongodbusers.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabaseopensearchpatterns.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabaseopensearchusers.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabasepostgresqlusers.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabaseredisusers.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabases.yaml create mode 100644 package/crds/databases.ovh.edixos.io_projectdatabaseusers.yaml create mode 100644 package/crds/kube.ovh.edixos.io_projectkubeiprestrictions.yaml create mode 100644 package/crds/kube.ovh.edixos.io_projectkubenodepools.yaml create mode 100644 package/crds/kube.ovh.edixos.io_projectkubeoidcs.yaml create mode 100644 package/crds/kube.ovh.edixos.io_projectkubes.yaml create mode 100644 package/crds/lb.ovh.edixos.io_httpfarms.yaml create mode 100644 package/crds/lb.ovh.edixos.io_httpfarmservers.yaml create mode 100644 package/crds/lb.ovh.edixos.io_httpfrontends.yaml create mode 100644 package/crds/lb.ovh.edixos.io_httprouterules.yaml create mode 100644 package/crds/lb.ovh.edixos.io_httproutes.yaml create mode 100644 package/crds/lb.ovh.edixos.io_iploadbalancings.yaml create mode 100644 package/crds/lb.ovh.edixos.io_refreshes.yaml create mode 100644 package/crds/lb.ovh.edixos.io_tcpfarms.yaml create mode 100644 package/crds/lb.ovh.edixos.io_tcpfarmservers.yaml create mode 100644 package/crds/lb.ovh.edixos.io_tcpfrontends.yaml create mode 100644 package/crds/lb.ovh.edixos.io_tcprouterules.yaml create mode 100644 package/crds/lb.ovh.edixos.io_tcproutes.yaml create mode 100644 package/crds/lb.ovh.edixos.io_vracknetworks.yaml create mode 100644 package/crds/logs.ovh.edixos.io_logsclusters.yaml create mode 100644 package/crds/logs.ovh.edixos.io_logsinputs.yaml diff --git a/apis/databases/v1alpha1/zz_generated.deepcopy.go b/apis/databases/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..59dbfce --- /dev/null +++ b/apis/databases/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,3986 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AclsInitParameters) DeepCopyInto(out *AclsInitParameters) { + *out = *in + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AclsInitParameters. +func (in *AclsInitParameters) DeepCopy() *AclsInitParameters { + if in == nil { + return nil + } + out := new(AclsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AclsObservation) DeepCopyInto(out *AclsObservation) { + *out = *in + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AclsObservation. +func (in *AclsObservation) DeepCopy() *AclsObservation { + if in == nil { + return nil + } + out := new(AclsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AclsParameters) DeepCopyInto(out *AclsParameters) { + *out = *in + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AclsParameters. +func (in *AclsParameters) DeepCopy() *AclsParameters { + if in == nil { + return nil + } + out := new(AclsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsInitParameters) DeepCopyInto(out *EndpointsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsInitParameters. +func (in *EndpointsInitParameters) DeepCopy() *EndpointsInitParameters { + if in == nil { + return nil + } + out := new(EndpointsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsObservation) DeepCopyInto(out *EndpointsObservation) { + *out = *in + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.SSLMode != nil { + in, out := &in.SSLMode, &out.SSLMode + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsObservation. +func (in *EndpointsObservation) DeepCopy() *EndpointsObservation { + if in == nil { + return nil + } + out := new(EndpointsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsParameters) DeepCopyInto(out *EndpointsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsParameters. +func (in *EndpointsParameters) DeepCopy() *EndpointsParameters { + if in == nil { + return nil + } + out := new(EndpointsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodesInitParameters) DeepCopyInto(out *NodesInitParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodesInitParameters. +func (in *NodesInitParameters) DeepCopy() *NodesInitParameters { + if in == nil { + return nil + } + out := new(NodesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodesObservation) DeepCopyInto(out *NodesObservation) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodesObservation. +func (in *NodesObservation) DeepCopy() *NodesObservation { + if in == nil { + return nil + } + out := new(NodesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodesParameters) DeepCopyInto(out *NodesParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodesParameters. +func (in *NodesParameters) DeepCopy() *NodesParameters { + if in == nil { + return nil + } + out := new(NodesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabase) DeepCopyInto(out *ProjectDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabase. +func (in *ProjectDatabase) DeepCopy() *ProjectDatabase { + if in == nil { + return nil + } + out := new(ProjectDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseDatabase) DeepCopyInto(out *ProjectDatabaseDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseDatabase. +func (in *ProjectDatabaseDatabase) DeepCopy() *ProjectDatabaseDatabase { + if in == nil { + return nil + } + out := new(ProjectDatabaseDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseDatabaseInitParameters) DeepCopyInto(out *ProjectDatabaseDatabaseInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseDatabaseInitParameters. +func (in *ProjectDatabaseDatabaseInitParameters) DeepCopy() *ProjectDatabaseDatabaseInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseDatabaseList) DeepCopyInto(out *ProjectDatabaseDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseDatabaseList. +func (in *ProjectDatabaseDatabaseList) DeepCopy() *ProjectDatabaseDatabaseList { + if in == nil { + return nil + } + out := new(ProjectDatabaseDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseDatabaseObservation) DeepCopyInto(out *ProjectDatabaseDatabaseObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(bool) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseDatabaseObservation. +func (in *ProjectDatabaseDatabaseObservation) DeepCopy() *ProjectDatabaseDatabaseObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseDatabaseParameters) DeepCopyInto(out *ProjectDatabaseDatabaseParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseDatabaseParameters. +func (in *ProjectDatabaseDatabaseParameters) DeepCopy() *ProjectDatabaseDatabaseParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseDatabaseSpec) DeepCopyInto(out *ProjectDatabaseDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseDatabaseSpec. +func (in *ProjectDatabaseDatabaseSpec) DeepCopy() *ProjectDatabaseDatabaseSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseDatabaseStatus) DeepCopyInto(out *ProjectDatabaseDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseDatabaseStatus. +func (in *ProjectDatabaseDatabaseStatus) DeepCopy() *ProjectDatabaseDatabaseStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIPRestriction) DeepCopyInto(out *ProjectDatabaseIPRestriction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIPRestriction. +func (in *ProjectDatabaseIPRestriction) DeepCopy() *ProjectDatabaseIPRestriction { + if in == nil { + return nil + } + out := new(ProjectDatabaseIPRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseIPRestriction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIPRestrictionInitParameters) DeepCopyInto(out *ProjectDatabaseIPRestrictionInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.IP != nil { + in, out := &in.IP, &out.IP + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIPRestrictionInitParameters. +func (in *ProjectDatabaseIPRestrictionInitParameters) DeepCopy() *ProjectDatabaseIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIPRestrictionList) DeepCopyInto(out *ProjectDatabaseIPRestrictionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseIPRestriction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIPRestrictionList. +func (in *ProjectDatabaseIPRestrictionList) DeepCopy() *ProjectDatabaseIPRestrictionList { + if in == nil { + return nil + } + out := new(ProjectDatabaseIPRestrictionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseIPRestrictionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIPRestrictionObservation) DeepCopyInto(out *ProjectDatabaseIPRestrictionObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IP != nil { + in, out := &in.IP, &out.IP + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIPRestrictionObservation. +func (in *ProjectDatabaseIPRestrictionObservation) DeepCopy() *ProjectDatabaseIPRestrictionObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIPRestrictionParameters) DeepCopyInto(out *ProjectDatabaseIPRestrictionParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.IP != nil { + in, out := &in.IP, &out.IP + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIPRestrictionParameters. +func (in *ProjectDatabaseIPRestrictionParameters) DeepCopy() *ProjectDatabaseIPRestrictionParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIPRestrictionSpec) DeepCopyInto(out *ProjectDatabaseIPRestrictionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIPRestrictionSpec. +func (in *ProjectDatabaseIPRestrictionSpec) DeepCopy() *ProjectDatabaseIPRestrictionSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseIPRestrictionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIPRestrictionStatus) DeepCopyInto(out *ProjectDatabaseIPRestrictionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIPRestrictionStatus. +func (in *ProjectDatabaseIPRestrictionStatus) DeepCopy() *ProjectDatabaseIPRestrictionStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseIPRestrictionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseInitParameters) DeepCopyInto(out *ProjectDatabaseInitParameters) { + *out = *in + if in.AdvancedConfiguration != nil { + in, out := &in.AdvancedConfiguration, &out.AdvancedConfiguration + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.Flavor != nil { + in, out := &in.Flavor, &out.Flavor + *out = new(string) + **out = **in + } + if in.KafkaRestAPI != nil { + in, out := &in.KafkaRestAPI, &out.KafkaRestAPI + *out = new(bool) + **out = **in + } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]NodesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OpensearchAclsEnabled != nil { + in, out := &in.OpensearchAclsEnabled, &out.OpensearchAclsEnabled + *out = new(bool) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseInitParameters. +func (in *ProjectDatabaseInitParameters) DeepCopy() *ProjectDatabaseInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIntegration) DeepCopyInto(out *ProjectDatabaseIntegration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIntegration. +func (in *ProjectDatabaseIntegration) DeepCopy() *ProjectDatabaseIntegration { + if in == nil { + return nil + } + out := new(ProjectDatabaseIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseIntegration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIntegrationInitParameters) DeepCopyInto(out *ProjectDatabaseIntegrationInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.DestinationServiceID != nil { + in, out := &in.DestinationServiceID, &out.DestinationServiceID + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SourceServiceID != nil { + in, out := &in.SourceServiceID, &out.SourceServiceID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIntegrationInitParameters. +func (in *ProjectDatabaseIntegrationInitParameters) DeepCopy() *ProjectDatabaseIntegrationInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseIntegrationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIntegrationList) DeepCopyInto(out *ProjectDatabaseIntegrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseIntegration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIntegrationList. +func (in *ProjectDatabaseIntegrationList) DeepCopy() *ProjectDatabaseIntegrationList { + if in == nil { + return nil + } + out := new(ProjectDatabaseIntegrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseIntegrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIntegrationObservation) DeepCopyInto(out *ProjectDatabaseIntegrationObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.DestinationServiceID != nil { + in, out := &in.DestinationServiceID, &out.DestinationServiceID + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SourceServiceID != nil { + in, out := &in.SourceServiceID, &out.SourceServiceID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIntegrationObservation. +func (in *ProjectDatabaseIntegrationObservation) DeepCopy() *ProjectDatabaseIntegrationObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseIntegrationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIntegrationParameters) DeepCopyInto(out *ProjectDatabaseIntegrationParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.DestinationServiceID != nil { + in, out := &in.DestinationServiceID, &out.DestinationServiceID + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SourceServiceID != nil { + in, out := &in.SourceServiceID, &out.SourceServiceID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIntegrationParameters. +func (in *ProjectDatabaseIntegrationParameters) DeepCopy() *ProjectDatabaseIntegrationParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseIntegrationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIntegrationSpec) DeepCopyInto(out *ProjectDatabaseIntegrationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIntegrationSpec. +func (in *ProjectDatabaseIntegrationSpec) DeepCopy() *ProjectDatabaseIntegrationSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseIntegrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseIntegrationStatus) DeepCopyInto(out *ProjectDatabaseIntegrationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseIntegrationStatus. +func (in *ProjectDatabaseIntegrationStatus) DeepCopy() *ProjectDatabaseIntegrationStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseIntegrationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaACL) DeepCopyInto(out *ProjectDatabaseKafkaACL) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaACL. +func (in *ProjectDatabaseKafkaACL) DeepCopy() *ProjectDatabaseKafkaACL { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaACL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseKafkaACL) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaACLInitParameters) DeepCopyInto(out *ProjectDatabaseKafkaACLInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaACLInitParameters. +func (in *ProjectDatabaseKafkaACLInitParameters) DeepCopy() *ProjectDatabaseKafkaACLInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaACLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaACLList) DeepCopyInto(out *ProjectDatabaseKafkaACLList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseKafkaACL, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaACLList. +func (in *ProjectDatabaseKafkaACLList) DeepCopy() *ProjectDatabaseKafkaACLList { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaACLList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseKafkaACLList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaACLObservation) DeepCopyInto(out *ProjectDatabaseKafkaACLObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaACLObservation. +func (in *ProjectDatabaseKafkaACLObservation) DeepCopy() *ProjectDatabaseKafkaACLObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaACLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaACLParameters) DeepCopyInto(out *ProjectDatabaseKafkaACLParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaACLParameters. +func (in *ProjectDatabaseKafkaACLParameters) DeepCopy() *ProjectDatabaseKafkaACLParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaACLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaACLSpec) DeepCopyInto(out *ProjectDatabaseKafkaACLSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaACLSpec. +func (in *ProjectDatabaseKafkaACLSpec) DeepCopy() *ProjectDatabaseKafkaACLSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaACLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaACLStatus) DeepCopyInto(out *ProjectDatabaseKafkaACLStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaACLStatus. +func (in *ProjectDatabaseKafkaACLStatus) DeepCopy() *ProjectDatabaseKafkaACLStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaACLStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaSchemaregistryacl) DeepCopyInto(out *ProjectDatabaseKafkaSchemaregistryacl) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaSchemaregistryacl. +func (in *ProjectDatabaseKafkaSchemaregistryacl) DeepCopy() *ProjectDatabaseKafkaSchemaregistryacl { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaSchemaregistryacl) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseKafkaSchemaregistryacl) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaSchemaregistryaclInitParameters) DeepCopyInto(out *ProjectDatabaseKafkaSchemaregistryaclInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaSchemaregistryaclInitParameters. +func (in *ProjectDatabaseKafkaSchemaregistryaclInitParameters) DeepCopy() *ProjectDatabaseKafkaSchemaregistryaclInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaSchemaregistryaclInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaSchemaregistryaclList) DeepCopyInto(out *ProjectDatabaseKafkaSchemaregistryaclList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseKafkaSchemaregistryacl, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaSchemaregistryaclList. +func (in *ProjectDatabaseKafkaSchemaregistryaclList) DeepCopy() *ProjectDatabaseKafkaSchemaregistryaclList { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaSchemaregistryaclList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseKafkaSchemaregistryaclList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaSchemaregistryaclObservation) DeepCopyInto(out *ProjectDatabaseKafkaSchemaregistryaclObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaSchemaregistryaclObservation. +func (in *ProjectDatabaseKafkaSchemaregistryaclObservation) DeepCopy() *ProjectDatabaseKafkaSchemaregistryaclObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaSchemaregistryaclObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaSchemaregistryaclParameters) DeepCopyInto(out *ProjectDatabaseKafkaSchemaregistryaclParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaSchemaregistryaclParameters. +func (in *ProjectDatabaseKafkaSchemaregistryaclParameters) DeepCopy() *ProjectDatabaseKafkaSchemaregistryaclParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaSchemaregistryaclParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaSchemaregistryaclSpec) DeepCopyInto(out *ProjectDatabaseKafkaSchemaregistryaclSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaSchemaregistryaclSpec. +func (in *ProjectDatabaseKafkaSchemaregistryaclSpec) DeepCopy() *ProjectDatabaseKafkaSchemaregistryaclSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaSchemaregistryaclSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaSchemaregistryaclStatus) DeepCopyInto(out *ProjectDatabaseKafkaSchemaregistryaclStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaSchemaregistryaclStatus. +func (in *ProjectDatabaseKafkaSchemaregistryaclStatus) DeepCopy() *ProjectDatabaseKafkaSchemaregistryaclStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaSchemaregistryaclStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaTopic) DeepCopyInto(out *ProjectDatabaseKafkaTopic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaTopic. +func (in *ProjectDatabaseKafkaTopic) DeepCopy() *ProjectDatabaseKafkaTopic { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaTopic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseKafkaTopic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaTopicInitParameters) DeepCopyInto(out *ProjectDatabaseKafkaTopicInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.MinInsyncReplicas != nil { + in, out := &in.MinInsyncReplicas, &out.MinInsyncReplicas + *out = new(float64) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(float64) + **out = **in + } + if in.Replication != nil { + in, out := &in.Replication, &out.Replication + *out = new(float64) + **out = **in + } + if in.RetentionBytes != nil { + in, out := &in.RetentionBytes, &out.RetentionBytes + *out = new(float64) + **out = **in + } + if in.RetentionHours != nil { + in, out := &in.RetentionHours, &out.RetentionHours + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaTopicInitParameters. +func (in *ProjectDatabaseKafkaTopicInitParameters) DeepCopy() *ProjectDatabaseKafkaTopicInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaTopicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaTopicList) DeepCopyInto(out *ProjectDatabaseKafkaTopicList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseKafkaTopic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaTopicList. +func (in *ProjectDatabaseKafkaTopicList) DeepCopy() *ProjectDatabaseKafkaTopicList { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaTopicList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseKafkaTopicList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaTopicObservation) DeepCopyInto(out *ProjectDatabaseKafkaTopicObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MinInsyncReplicas != nil { + in, out := &in.MinInsyncReplicas, &out.MinInsyncReplicas + *out = new(float64) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(float64) + **out = **in + } + if in.Replication != nil { + in, out := &in.Replication, &out.Replication + *out = new(float64) + **out = **in + } + if in.RetentionBytes != nil { + in, out := &in.RetentionBytes, &out.RetentionBytes + *out = new(float64) + **out = **in + } + if in.RetentionHours != nil { + in, out := &in.RetentionHours, &out.RetentionHours + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaTopicObservation. +func (in *ProjectDatabaseKafkaTopicObservation) DeepCopy() *ProjectDatabaseKafkaTopicObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaTopicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaTopicParameters) DeepCopyInto(out *ProjectDatabaseKafkaTopicParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.MinInsyncReplicas != nil { + in, out := &in.MinInsyncReplicas, &out.MinInsyncReplicas + *out = new(float64) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(float64) + **out = **in + } + if in.Replication != nil { + in, out := &in.Replication, &out.Replication + *out = new(float64) + **out = **in + } + if in.RetentionBytes != nil { + in, out := &in.RetentionBytes, &out.RetentionBytes + *out = new(float64) + **out = **in + } + if in.RetentionHours != nil { + in, out := &in.RetentionHours, &out.RetentionHours + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaTopicParameters. +func (in *ProjectDatabaseKafkaTopicParameters) DeepCopy() *ProjectDatabaseKafkaTopicParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaTopicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaTopicSpec) DeepCopyInto(out *ProjectDatabaseKafkaTopicSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaTopicSpec. +func (in *ProjectDatabaseKafkaTopicSpec) DeepCopy() *ProjectDatabaseKafkaTopicSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaTopicSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseKafkaTopicStatus) DeepCopyInto(out *ProjectDatabaseKafkaTopicStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseKafkaTopicStatus. +func (in *ProjectDatabaseKafkaTopicStatus) DeepCopy() *ProjectDatabaseKafkaTopicStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseKafkaTopicStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseList) DeepCopyInto(out *ProjectDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseList. +func (in *ProjectDatabaseList) DeepCopy() *ProjectDatabaseList { + if in == nil { + return nil + } + out := new(ProjectDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbNamespace) DeepCopyInto(out *ProjectDatabaseM3DbNamespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbNamespace. +func (in *ProjectDatabaseM3DbNamespace) DeepCopy() *ProjectDatabaseM3DbNamespace { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseM3DbNamespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbNamespaceInitParameters) DeepCopyInto(out *ProjectDatabaseM3DbNamespaceInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } + if in.RetentionBlockDataExpirationDuration != nil { + in, out := &in.RetentionBlockDataExpirationDuration, &out.RetentionBlockDataExpirationDuration + *out = new(string) + **out = **in + } + if in.RetentionBlockSizeDuration != nil { + in, out := &in.RetentionBlockSizeDuration, &out.RetentionBlockSizeDuration + *out = new(string) + **out = **in + } + if in.RetentionBufferFutureDuration != nil { + in, out := &in.RetentionBufferFutureDuration, &out.RetentionBufferFutureDuration + *out = new(string) + **out = **in + } + if in.RetentionBufferPastDuration != nil { + in, out := &in.RetentionBufferPastDuration, &out.RetentionBufferPastDuration + *out = new(string) + **out = **in + } + if in.RetentionPeriodDuration != nil { + in, out := &in.RetentionPeriodDuration, &out.RetentionPeriodDuration + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SnapshotEnabled != nil { + in, out := &in.SnapshotEnabled, &out.SnapshotEnabled + *out = new(bool) + **out = **in + } + if in.WritesToCommitLogEnabled != nil { + in, out := &in.WritesToCommitLogEnabled, &out.WritesToCommitLogEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbNamespaceInitParameters. +func (in *ProjectDatabaseM3DbNamespaceInitParameters) DeepCopy() *ProjectDatabaseM3DbNamespaceInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbNamespaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbNamespaceList) DeepCopyInto(out *ProjectDatabaseM3DbNamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseM3DbNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbNamespaceList. +func (in *ProjectDatabaseM3DbNamespaceList) DeepCopy() *ProjectDatabaseM3DbNamespaceList { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbNamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseM3DbNamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbNamespaceObservation) DeepCopyInto(out *ProjectDatabaseM3DbNamespaceObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } + if in.RetentionBlockDataExpirationDuration != nil { + in, out := &in.RetentionBlockDataExpirationDuration, &out.RetentionBlockDataExpirationDuration + *out = new(string) + **out = **in + } + if in.RetentionBlockSizeDuration != nil { + in, out := &in.RetentionBlockSizeDuration, &out.RetentionBlockSizeDuration + *out = new(string) + **out = **in + } + if in.RetentionBufferFutureDuration != nil { + in, out := &in.RetentionBufferFutureDuration, &out.RetentionBufferFutureDuration + *out = new(string) + **out = **in + } + if in.RetentionBufferPastDuration != nil { + in, out := &in.RetentionBufferPastDuration, &out.RetentionBufferPastDuration + *out = new(string) + **out = **in + } + if in.RetentionPeriodDuration != nil { + in, out := &in.RetentionPeriodDuration, &out.RetentionPeriodDuration + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SnapshotEnabled != nil { + in, out := &in.SnapshotEnabled, &out.SnapshotEnabled + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WritesToCommitLogEnabled != nil { + in, out := &in.WritesToCommitLogEnabled, &out.WritesToCommitLogEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbNamespaceObservation. +func (in *ProjectDatabaseM3DbNamespaceObservation) DeepCopy() *ProjectDatabaseM3DbNamespaceObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbNamespaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbNamespaceParameters) DeepCopyInto(out *ProjectDatabaseM3DbNamespaceParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } + if in.RetentionBlockDataExpirationDuration != nil { + in, out := &in.RetentionBlockDataExpirationDuration, &out.RetentionBlockDataExpirationDuration + *out = new(string) + **out = **in + } + if in.RetentionBlockSizeDuration != nil { + in, out := &in.RetentionBlockSizeDuration, &out.RetentionBlockSizeDuration + *out = new(string) + **out = **in + } + if in.RetentionBufferFutureDuration != nil { + in, out := &in.RetentionBufferFutureDuration, &out.RetentionBufferFutureDuration + *out = new(string) + **out = **in + } + if in.RetentionBufferPastDuration != nil { + in, out := &in.RetentionBufferPastDuration, &out.RetentionBufferPastDuration + *out = new(string) + **out = **in + } + if in.RetentionPeriodDuration != nil { + in, out := &in.RetentionPeriodDuration, &out.RetentionPeriodDuration + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SnapshotEnabled != nil { + in, out := &in.SnapshotEnabled, &out.SnapshotEnabled + *out = new(bool) + **out = **in + } + if in.WritesToCommitLogEnabled != nil { + in, out := &in.WritesToCommitLogEnabled, &out.WritesToCommitLogEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbNamespaceParameters. +func (in *ProjectDatabaseM3DbNamespaceParameters) DeepCopy() *ProjectDatabaseM3DbNamespaceParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbNamespaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbNamespaceSpec) DeepCopyInto(out *ProjectDatabaseM3DbNamespaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbNamespaceSpec. +func (in *ProjectDatabaseM3DbNamespaceSpec) DeepCopy() *ProjectDatabaseM3DbNamespaceSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbNamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbNamespaceStatus) DeepCopyInto(out *ProjectDatabaseM3DbNamespaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbNamespaceStatus. +func (in *ProjectDatabaseM3DbNamespaceStatus) DeepCopy() *ProjectDatabaseM3DbNamespaceStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbNamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbUser) DeepCopyInto(out *ProjectDatabaseM3DbUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbUser. +func (in *ProjectDatabaseM3DbUser) DeepCopy() *ProjectDatabaseM3DbUser { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseM3DbUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbUserInitParameters) DeepCopyInto(out *ProjectDatabaseM3DbUserInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbUserInitParameters. +func (in *ProjectDatabaseM3DbUserInitParameters) DeepCopy() *ProjectDatabaseM3DbUserInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbUserList) DeepCopyInto(out *ProjectDatabaseM3DbUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseM3DbUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbUserList. +func (in *ProjectDatabaseM3DbUserList) DeepCopy() *ProjectDatabaseM3DbUserList { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseM3DbUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbUserObservation) DeepCopyInto(out *ProjectDatabaseM3DbUserObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbUserObservation. +func (in *ProjectDatabaseM3DbUserObservation) DeepCopy() *ProjectDatabaseM3DbUserObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbUserParameters) DeepCopyInto(out *ProjectDatabaseM3DbUserParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbUserParameters. +func (in *ProjectDatabaseM3DbUserParameters) DeepCopy() *ProjectDatabaseM3DbUserParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbUserSpec) DeepCopyInto(out *ProjectDatabaseM3DbUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbUserSpec. +func (in *ProjectDatabaseM3DbUserSpec) DeepCopy() *ProjectDatabaseM3DbUserSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseM3DbUserStatus) DeepCopyInto(out *ProjectDatabaseM3DbUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseM3DbUserStatus. +func (in *ProjectDatabaseM3DbUserStatus) DeepCopy() *ProjectDatabaseM3DbUserStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseM3DbUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseMongodbUser) DeepCopyInto(out *ProjectDatabaseMongodbUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseMongodbUser. +func (in *ProjectDatabaseMongodbUser) DeepCopy() *ProjectDatabaseMongodbUser { + if in == nil { + return nil + } + out := new(ProjectDatabaseMongodbUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseMongodbUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseMongodbUserInitParameters) DeepCopyInto(out *ProjectDatabaseMongodbUserInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseMongodbUserInitParameters. +func (in *ProjectDatabaseMongodbUserInitParameters) DeepCopy() *ProjectDatabaseMongodbUserInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseMongodbUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseMongodbUserList) DeepCopyInto(out *ProjectDatabaseMongodbUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseMongodbUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseMongodbUserList. +func (in *ProjectDatabaseMongodbUserList) DeepCopy() *ProjectDatabaseMongodbUserList { + if in == nil { + return nil + } + out := new(ProjectDatabaseMongodbUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseMongodbUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseMongodbUserObservation) DeepCopyInto(out *ProjectDatabaseMongodbUserObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseMongodbUserObservation. +func (in *ProjectDatabaseMongodbUserObservation) DeepCopy() *ProjectDatabaseMongodbUserObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseMongodbUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseMongodbUserParameters) DeepCopyInto(out *ProjectDatabaseMongodbUserParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseMongodbUserParameters. +func (in *ProjectDatabaseMongodbUserParameters) DeepCopy() *ProjectDatabaseMongodbUserParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseMongodbUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseMongodbUserSpec) DeepCopyInto(out *ProjectDatabaseMongodbUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseMongodbUserSpec. +func (in *ProjectDatabaseMongodbUserSpec) DeepCopy() *ProjectDatabaseMongodbUserSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseMongodbUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseMongodbUserStatus) DeepCopyInto(out *ProjectDatabaseMongodbUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseMongodbUserStatus. +func (in *ProjectDatabaseMongodbUserStatus) DeepCopy() *ProjectDatabaseMongodbUserStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseMongodbUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseObservation) DeepCopyInto(out *ProjectDatabaseObservation) { + *out = *in + if in.AdvancedConfiguration != nil { + in, out := &in.AdvancedConfiguration, &out.AdvancedConfiguration + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.BackupTime != nil { + in, out := &in.BackupTime, &out.BackupTime + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskType != nil { + in, out := &in.DiskType, &out.DiskType + *out = new(string) + **out = **in + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]EndpointsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.Flavor != nil { + in, out := &in.Flavor, &out.Flavor + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KafkaRestAPI != nil { + in, out := &in.KafkaRestAPI, &out.KafkaRestAPI + *out = new(bool) + **out = **in + } + if in.MaintenanceTime != nil { + in, out := &in.MaintenanceTime, &out.MaintenanceTime + *out = new(string) + **out = **in + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]NodesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OpensearchAclsEnabled != nil { + in, out := &in.OpensearchAclsEnabled, &out.OpensearchAclsEnabled + *out = new(bool) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseObservation. +func (in *ProjectDatabaseObservation) DeepCopy() *ProjectDatabaseObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchPattern) DeepCopyInto(out *ProjectDatabaseOpensearchPattern) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchPattern. +func (in *ProjectDatabaseOpensearchPattern) DeepCopy() *ProjectDatabaseOpensearchPattern { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchPattern) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseOpensearchPattern) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchPatternInitParameters) DeepCopyInto(out *ProjectDatabaseOpensearchPatternInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.MaxIndexCount != nil { + in, out := &in.MaxIndexCount, &out.MaxIndexCount + *out = new(float64) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchPatternInitParameters. +func (in *ProjectDatabaseOpensearchPatternInitParameters) DeepCopy() *ProjectDatabaseOpensearchPatternInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchPatternInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchPatternList) DeepCopyInto(out *ProjectDatabaseOpensearchPatternList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseOpensearchPattern, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchPatternList. +func (in *ProjectDatabaseOpensearchPatternList) DeepCopy() *ProjectDatabaseOpensearchPatternList { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchPatternList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseOpensearchPatternList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchPatternObservation) DeepCopyInto(out *ProjectDatabaseOpensearchPatternObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxIndexCount != nil { + in, out := &in.MaxIndexCount, &out.MaxIndexCount + *out = new(float64) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchPatternObservation. +func (in *ProjectDatabaseOpensearchPatternObservation) DeepCopy() *ProjectDatabaseOpensearchPatternObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchPatternObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchPatternParameters) DeepCopyInto(out *ProjectDatabaseOpensearchPatternParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.MaxIndexCount != nil { + in, out := &in.MaxIndexCount, &out.MaxIndexCount + *out = new(float64) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchPatternParameters. +func (in *ProjectDatabaseOpensearchPatternParameters) DeepCopy() *ProjectDatabaseOpensearchPatternParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchPatternParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchPatternSpec) DeepCopyInto(out *ProjectDatabaseOpensearchPatternSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchPatternSpec. +func (in *ProjectDatabaseOpensearchPatternSpec) DeepCopy() *ProjectDatabaseOpensearchPatternSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchPatternSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchPatternStatus) DeepCopyInto(out *ProjectDatabaseOpensearchPatternStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchPatternStatus. +func (in *ProjectDatabaseOpensearchPatternStatus) DeepCopy() *ProjectDatabaseOpensearchPatternStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchPatternStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchUser) DeepCopyInto(out *ProjectDatabaseOpensearchUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchUser. +func (in *ProjectDatabaseOpensearchUser) DeepCopy() *ProjectDatabaseOpensearchUser { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseOpensearchUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchUserInitParameters) DeepCopyInto(out *ProjectDatabaseOpensearchUserInitParameters) { + *out = *in + if in.Acls != nil { + in, out := &in.Acls, &out.Acls + *out = make([]AclsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchUserInitParameters. +func (in *ProjectDatabaseOpensearchUserInitParameters) DeepCopy() *ProjectDatabaseOpensearchUserInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchUserList) DeepCopyInto(out *ProjectDatabaseOpensearchUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseOpensearchUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchUserList. +func (in *ProjectDatabaseOpensearchUserList) DeepCopy() *ProjectDatabaseOpensearchUserList { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseOpensearchUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchUserObservation) DeepCopyInto(out *ProjectDatabaseOpensearchUserObservation) { + *out = *in + if in.Acls != nil { + in, out := &in.Acls, &out.Acls + *out = make([]AclsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchUserObservation. +func (in *ProjectDatabaseOpensearchUserObservation) DeepCopy() *ProjectDatabaseOpensearchUserObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchUserParameters) DeepCopyInto(out *ProjectDatabaseOpensearchUserParameters) { + *out = *in + if in.Acls != nil { + in, out := &in.Acls, &out.Acls + *out = make([]AclsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchUserParameters. +func (in *ProjectDatabaseOpensearchUserParameters) DeepCopy() *ProjectDatabaseOpensearchUserParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchUserSpec) DeepCopyInto(out *ProjectDatabaseOpensearchUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchUserSpec. +func (in *ProjectDatabaseOpensearchUserSpec) DeepCopy() *ProjectDatabaseOpensearchUserSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseOpensearchUserStatus) DeepCopyInto(out *ProjectDatabaseOpensearchUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseOpensearchUserStatus. +func (in *ProjectDatabaseOpensearchUserStatus) DeepCopy() *ProjectDatabaseOpensearchUserStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseOpensearchUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseParameters) DeepCopyInto(out *ProjectDatabaseParameters) { + *out = *in + if in.AdvancedConfiguration != nil { + in, out := &in.AdvancedConfiguration, &out.AdvancedConfiguration + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.Flavor != nil { + in, out := &in.Flavor, &out.Flavor + *out = new(string) + **out = **in + } + if in.KafkaRestAPI != nil { + in, out := &in.KafkaRestAPI, &out.KafkaRestAPI + *out = new(bool) + **out = **in + } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]NodesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OpensearchAclsEnabled != nil { + in, out := &in.OpensearchAclsEnabled, &out.OpensearchAclsEnabled + *out = new(bool) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseParameters. +func (in *ProjectDatabaseParameters) DeepCopy() *ProjectDatabaseParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabasePostgresqlUser) DeepCopyInto(out *ProjectDatabasePostgresqlUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabasePostgresqlUser. +func (in *ProjectDatabasePostgresqlUser) DeepCopy() *ProjectDatabasePostgresqlUser { + if in == nil { + return nil + } + out := new(ProjectDatabasePostgresqlUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabasePostgresqlUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabasePostgresqlUserInitParameters) DeepCopyInto(out *ProjectDatabasePostgresqlUserInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabasePostgresqlUserInitParameters. +func (in *ProjectDatabasePostgresqlUserInitParameters) DeepCopy() *ProjectDatabasePostgresqlUserInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabasePostgresqlUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabasePostgresqlUserList) DeepCopyInto(out *ProjectDatabasePostgresqlUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabasePostgresqlUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabasePostgresqlUserList. +func (in *ProjectDatabasePostgresqlUserList) DeepCopy() *ProjectDatabasePostgresqlUserList { + if in == nil { + return nil + } + out := new(ProjectDatabasePostgresqlUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabasePostgresqlUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabasePostgresqlUserObservation) DeepCopyInto(out *ProjectDatabasePostgresqlUserObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabasePostgresqlUserObservation. +func (in *ProjectDatabasePostgresqlUserObservation) DeepCopy() *ProjectDatabasePostgresqlUserObservation { + if in == nil { + return nil + } + out := new(ProjectDatabasePostgresqlUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabasePostgresqlUserParameters) DeepCopyInto(out *ProjectDatabasePostgresqlUserParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabasePostgresqlUserParameters. +func (in *ProjectDatabasePostgresqlUserParameters) DeepCopy() *ProjectDatabasePostgresqlUserParameters { + if in == nil { + return nil + } + out := new(ProjectDatabasePostgresqlUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabasePostgresqlUserSpec) DeepCopyInto(out *ProjectDatabasePostgresqlUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabasePostgresqlUserSpec. +func (in *ProjectDatabasePostgresqlUserSpec) DeepCopy() *ProjectDatabasePostgresqlUserSpec { + if in == nil { + return nil + } + out := new(ProjectDatabasePostgresqlUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabasePostgresqlUserStatus) DeepCopyInto(out *ProjectDatabasePostgresqlUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabasePostgresqlUserStatus. +func (in *ProjectDatabasePostgresqlUserStatus) DeepCopy() *ProjectDatabasePostgresqlUserStatus { + if in == nil { + return nil + } + out := new(ProjectDatabasePostgresqlUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseRedisUser) DeepCopyInto(out *ProjectDatabaseRedisUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseRedisUser. +func (in *ProjectDatabaseRedisUser) DeepCopy() *ProjectDatabaseRedisUser { + if in == nil { + return nil + } + out := new(ProjectDatabaseRedisUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseRedisUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseRedisUserInitParameters) DeepCopyInto(out *ProjectDatabaseRedisUserInitParameters) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseRedisUserInitParameters. +func (in *ProjectDatabaseRedisUserInitParameters) DeepCopy() *ProjectDatabaseRedisUserInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseRedisUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseRedisUserList) DeepCopyInto(out *ProjectDatabaseRedisUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseRedisUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseRedisUserList. +func (in *ProjectDatabaseRedisUserList) DeepCopy() *ProjectDatabaseRedisUserList { + if in == nil { + return nil + } + out := new(ProjectDatabaseRedisUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseRedisUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseRedisUserObservation) DeepCopyInto(out *ProjectDatabaseRedisUserObservation) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseRedisUserObservation. +func (in *ProjectDatabaseRedisUserObservation) DeepCopy() *ProjectDatabaseRedisUserObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseRedisUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseRedisUserParameters) DeepCopyInto(out *ProjectDatabaseRedisUserParameters) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseRedisUserParameters. +func (in *ProjectDatabaseRedisUserParameters) DeepCopy() *ProjectDatabaseRedisUserParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseRedisUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseRedisUserSpec) DeepCopyInto(out *ProjectDatabaseRedisUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseRedisUserSpec. +func (in *ProjectDatabaseRedisUserSpec) DeepCopy() *ProjectDatabaseRedisUserSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseRedisUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseRedisUserStatus) DeepCopyInto(out *ProjectDatabaseRedisUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseRedisUserStatus. +func (in *ProjectDatabaseRedisUserStatus) DeepCopy() *ProjectDatabaseRedisUserStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseRedisUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseSpec) DeepCopyInto(out *ProjectDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseSpec. +func (in *ProjectDatabaseSpec) DeepCopy() *ProjectDatabaseSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseStatus) DeepCopyInto(out *ProjectDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseStatus. +func (in *ProjectDatabaseStatus) DeepCopy() *ProjectDatabaseStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseUser) DeepCopyInto(out *ProjectDatabaseUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseUser. +func (in *ProjectDatabaseUser) DeepCopy() *ProjectDatabaseUser { + if in == nil { + return nil + } + out := new(ProjectDatabaseUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseUserInitParameters) DeepCopyInto(out *ProjectDatabaseUserInitParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseUserInitParameters. +func (in *ProjectDatabaseUserInitParameters) DeepCopy() *ProjectDatabaseUserInitParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseUserList) DeepCopyInto(out *ProjectDatabaseUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectDatabaseUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseUserList. +func (in *ProjectDatabaseUserList) DeepCopy() *ProjectDatabaseUserList { + if in == nil { + return nil + } + out := new(ProjectDatabaseUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectDatabaseUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseUserObservation) DeepCopyInto(out *ProjectDatabaseUserObservation) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseUserObservation. +func (in *ProjectDatabaseUserObservation) DeepCopy() *ProjectDatabaseUserObservation { + if in == nil { + return nil + } + out := new(ProjectDatabaseUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseUserParameters) DeepCopyInto(out *ProjectDatabaseUserParameters) { + *out = *in + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.PasswordReset != nil { + in, out := &in.PasswordReset, &out.PasswordReset + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseUserParameters. +func (in *ProjectDatabaseUserParameters) DeepCopy() *ProjectDatabaseUserParameters { + if in == nil { + return nil + } + out := new(ProjectDatabaseUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseUserSpec) DeepCopyInto(out *ProjectDatabaseUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseUserSpec. +func (in *ProjectDatabaseUserSpec) DeepCopy() *ProjectDatabaseUserSpec { + if in == nil { + return nil + } + out := new(ProjectDatabaseUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectDatabaseUserStatus) DeepCopyInto(out *ProjectDatabaseUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectDatabaseUserStatus. +func (in *ProjectDatabaseUserStatus) DeepCopy() *ProjectDatabaseUserStatus { + if in == nil { + return nil + } + out := new(ProjectDatabaseUserStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/databases/v1alpha1/zz_generated.managed.go b/apis/databases/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..38446c0 --- /dev/null +++ b/apis/databases/v1alpha1/zz_generated.managed.go @@ -0,0 +1,908 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ProjectDatabase. +func (mg *ProjectDatabase) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabase. +func (mg *ProjectDatabase) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabase. +func (mg *ProjectDatabase) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabase. +func (mg *ProjectDatabase) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabase. +func (mg *ProjectDatabase) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabase. +func (mg *ProjectDatabase) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabase. +func (mg *ProjectDatabase) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabase. +func (mg *ProjectDatabase) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabase. +func (mg *ProjectDatabase) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabase. +func (mg *ProjectDatabase) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabase. +func (mg *ProjectDatabase) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabase. +func (mg *ProjectDatabase) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseDatabase. +func (mg *ProjectDatabaseDatabase) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseIPRestriction. +func (mg *ProjectDatabaseIPRestriction) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseIntegration. +func (mg *ProjectDatabaseIntegration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseKafkaACL. +func (mg *ProjectDatabaseKafkaACL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseKafkaSchemaregistryacl. +func (mg *ProjectDatabaseKafkaSchemaregistryacl) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseKafkaTopic. +func (mg *ProjectDatabaseKafkaTopic) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseM3DbNamespace. +func (mg *ProjectDatabaseM3DbNamespace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseM3DbUser. +func (mg *ProjectDatabaseM3DbUser) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseMongodbUser. +func (mg *ProjectDatabaseMongodbUser) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseOpensearchPattern. +func (mg *ProjectDatabaseOpensearchPattern) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseOpensearchUser. +func (mg *ProjectDatabaseOpensearchUser) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabasePostgresqlUser. +func (mg *ProjectDatabasePostgresqlUser) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseRedisUser. +func (mg *ProjectDatabaseRedisUser) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectDatabaseUser. +func (mg *ProjectDatabaseUser) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/databases/v1alpha1/zz_generated.managedlist.go b/apis/databases/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..79c7a03 --- /dev/null +++ b/apis/databases/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,143 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ProjectDatabaseDatabaseList. +func (l *ProjectDatabaseDatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseIPRestrictionList. +func (l *ProjectDatabaseIPRestrictionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseIntegrationList. +func (l *ProjectDatabaseIntegrationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseKafkaACLList. +func (l *ProjectDatabaseKafkaACLList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseKafkaSchemaregistryaclList. +func (l *ProjectDatabaseKafkaSchemaregistryaclList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseKafkaTopicList. +func (l *ProjectDatabaseKafkaTopicList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseList. +func (l *ProjectDatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseM3DbNamespaceList. +func (l *ProjectDatabaseM3DbNamespaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseM3DbUserList. +func (l *ProjectDatabaseM3DbUserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseMongodbUserList. +func (l *ProjectDatabaseMongodbUserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseOpensearchPatternList. +func (l *ProjectDatabaseOpensearchPatternList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseOpensearchUserList. +func (l *ProjectDatabaseOpensearchUserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabasePostgresqlUserList. +func (l *ProjectDatabasePostgresqlUserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseRedisUserList. +func (l *ProjectDatabaseRedisUserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectDatabaseUserList. +func (l *ProjectDatabaseUserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/databases/v1alpha1/zz_generated_terraformed.go b/apis/databases/v1alpha1/zz_generated_terraformed.go new file mode 100755 index 0000000..21f8a81 --- /dev/null +++ b/apis/databases/v1alpha1/zz_generated_terraformed.go @@ -0,0 +1,1278 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabase +func (mg *ProjectDatabase) GetTerraformResourceType() string { + return "ovh_cloud_project_database" +} + +// GetConnectionDetailsMapping for this ProjectDatabase +func (tr *ProjectDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectDatabase +func (tr *ProjectDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabase +func (tr *ProjectDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabase +func (tr *ProjectDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabase +func (tr *ProjectDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabase +func (tr *ProjectDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabase +func (tr *ProjectDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabase) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseDatabase +func (mg *ProjectDatabaseDatabase) GetTerraformResourceType() string { + return "ovh_cloud_project_database_database" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseDatabase +func (tr *ProjectDatabaseDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectDatabaseDatabase +func (tr *ProjectDatabaseDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseDatabase +func (tr *ProjectDatabaseDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseDatabase +func (tr *ProjectDatabaseDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseDatabase +func (tr *ProjectDatabaseDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseDatabase +func (tr *ProjectDatabaseDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseDatabase +func (tr *ProjectDatabaseDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseDatabase) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseIntegration +func (mg *ProjectDatabaseIntegration) GetTerraformResourceType() string { + return "ovh_cloud_project_database_integration" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseIntegration +func (tr *ProjectDatabaseIntegration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectDatabaseIntegration +func (tr *ProjectDatabaseIntegration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseIntegration +func (tr *ProjectDatabaseIntegration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseIntegration +func (tr *ProjectDatabaseIntegration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseIntegration +func (tr *ProjectDatabaseIntegration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseIntegration +func (tr *ProjectDatabaseIntegration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseIntegration +func (tr *ProjectDatabaseIntegration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseIntegration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseIntegration) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseIntegrationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseIntegration) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseIPRestriction +func (mg *ProjectDatabaseIPRestriction) GetTerraformResourceType() string { + return "ovh_cloud_project_database_ip_restriction" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseIPRestriction +func (tr *ProjectDatabaseIPRestriction) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectDatabaseIPRestriction +func (tr *ProjectDatabaseIPRestriction) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseIPRestriction +func (tr *ProjectDatabaseIPRestriction) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseIPRestriction +func (tr *ProjectDatabaseIPRestriction) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseIPRestriction +func (tr *ProjectDatabaseIPRestriction) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseIPRestriction +func (tr *ProjectDatabaseIPRestriction) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseIPRestriction +func (tr *ProjectDatabaseIPRestriction) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseIPRestriction using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseIPRestriction) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseIPRestrictionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseIPRestriction) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseKafkaACL +func (mg *ProjectDatabaseKafkaACL) GetTerraformResourceType() string { + return "ovh_cloud_project_database_kafka_acl" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseKafkaACL +func (tr *ProjectDatabaseKafkaACL) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectDatabaseKafkaACL +func (tr *ProjectDatabaseKafkaACL) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseKafkaACL +func (tr *ProjectDatabaseKafkaACL) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseKafkaACL +func (tr *ProjectDatabaseKafkaACL) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseKafkaACL +func (tr *ProjectDatabaseKafkaACL) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseKafkaACL +func (tr *ProjectDatabaseKafkaACL) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseKafkaACL +func (tr *ProjectDatabaseKafkaACL) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseKafkaACL using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseKafkaACL) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseKafkaACLParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseKafkaACL) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseKafkaSchemaregistryacl +func (mg *ProjectDatabaseKafkaSchemaregistryacl) GetTerraformResourceType() string { + return "ovh_cloud_project_database_kafka_schemaregistryacl" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseKafkaSchemaregistryacl +func (tr *ProjectDatabaseKafkaSchemaregistryacl) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectDatabaseKafkaSchemaregistryacl +func (tr *ProjectDatabaseKafkaSchemaregistryacl) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseKafkaSchemaregistryacl +func (tr *ProjectDatabaseKafkaSchemaregistryacl) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseKafkaSchemaregistryacl +func (tr *ProjectDatabaseKafkaSchemaregistryacl) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseKafkaSchemaregistryacl +func (tr *ProjectDatabaseKafkaSchemaregistryacl) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseKafkaSchemaregistryacl +func (tr *ProjectDatabaseKafkaSchemaregistryacl) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseKafkaSchemaregistryacl +func (tr *ProjectDatabaseKafkaSchemaregistryacl) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseKafkaSchemaregistryacl using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseKafkaSchemaregistryacl) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseKafkaSchemaregistryaclParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseKafkaSchemaregistryacl) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseKafkaTopic +func (mg *ProjectDatabaseKafkaTopic) GetTerraformResourceType() string { + return "ovh_cloud_project_database_kafka_topic" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseKafkaTopic +func (tr *ProjectDatabaseKafkaTopic) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectDatabaseKafkaTopic +func (tr *ProjectDatabaseKafkaTopic) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseKafkaTopic +func (tr *ProjectDatabaseKafkaTopic) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseKafkaTopic +func (tr *ProjectDatabaseKafkaTopic) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseKafkaTopic +func (tr *ProjectDatabaseKafkaTopic) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseKafkaTopic +func (tr *ProjectDatabaseKafkaTopic) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseKafkaTopic +func (tr *ProjectDatabaseKafkaTopic) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseKafkaTopic using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseKafkaTopic) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseKafkaTopicParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseKafkaTopic) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseM3DbNamespace +func (mg *ProjectDatabaseM3DbNamespace) GetTerraformResourceType() string { + return "ovh_cloud_project_database_m3db_namespace" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseM3DbNamespace +func (tr *ProjectDatabaseM3DbNamespace) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectDatabaseM3DbNamespace +func (tr *ProjectDatabaseM3DbNamespace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseM3DbNamespace +func (tr *ProjectDatabaseM3DbNamespace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseM3DbNamespace +func (tr *ProjectDatabaseM3DbNamespace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseM3DbNamespace +func (tr *ProjectDatabaseM3DbNamespace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseM3DbNamespace +func (tr *ProjectDatabaseM3DbNamespace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseM3DbNamespace +func (tr *ProjectDatabaseM3DbNamespace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseM3DbNamespace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseM3DbNamespace) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseM3DbNamespaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseM3DbNamespace) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseM3DbUser +func (mg *ProjectDatabaseM3DbUser) GetTerraformResourceType() string { + return "ovh_cloud_project_database_m3db_user" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseM3DbUser +func (tr *ProjectDatabaseM3DbUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "status.atProvider.password"} +} + +// GetObservation of this ProjectDatabaseM3DbUser +func (tr *ProjectDatabaseM3DbUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseM3DbUser +func (tr *ProjectDatabaseM3DbUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseM3DbUser +func (tr *ProjectDatabaseM3DbUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseM3DbUser +func (tr *ProjectDatabaseM3DbUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseM3DbUser +func (tr *ProjectDatabaseM3DbUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseM3DbUser +func (tr *ProjectDatabaseM3DbUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseM3DbUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseM3DbUser) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseM3DbUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseM3DbUser) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseMongodbUser +func (mg *ProjectDatabaseMongodbUser) GetTerraformResourceType() string { + return "ovh_cloud_project_database_mongodb_user" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseMongodbUser +func (tr *ProjectDatabaseMongodbUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "status.atProvider.password"} +} + +// GetObservation of this ProjectDatabaseMongodbUser +func (tr *ProjectDatabaseMongodbUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseMongodbUser +func (tr *ProjectDatabaseMongodbUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseMongodbUser +func (tr *ProjectDatabaseMongodbUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseMongodbUser +func (tr *ProjectDatabaseMongodbUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseMongodbUser +func (tr *ProjectDatabaseMongodbUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseMongodbUser +func (tr *ProjectDatabaseMongodbUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseMongodbUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseMongodbUser) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseMongodbUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseMongodbUser) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseOpensearchPattern +func (mg *ProjectDatabaseOpensearchPattern) GetTerraformResourceType() string { + return "ovh_cloud_project_database_opensearch_pattern" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseOpensearchPattern +func (tr *ProjectDatabaseOpensearchPattern) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectDatabaseOpensearchPattern +func (tr *ProjectDatabaseOpensearchPattern) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseOpensearchPattern +func (tr *ProjectDatabaseOpensearchPattern) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseOpensearchPattern +func (tr *ProjectDatabaseOpensearchPattern) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseOpensearchPattern +func (tr *ProjectDatabaseOpensearchPattern) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseOpensearchPattern +func (tr *ProjectDatabaseOpensearchPattern) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseOpensearchPattern +func (tr *ProjectDatabaseOpensearchPattern) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseOpensearchPattern using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseOpensearchPattern) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseOpensearchPatternParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseOpensearchPattern) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseOpensearchUser +func (mg *ProjectDatabaseOpensearchUser) GetTerraformResourceType() string { + return "ovh_cloud_project_database_opensearch_user" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseOpensearchUser +func (tr *ProjectDatabaseOpensearchUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "status.atProvider.password"} +} + +// GetObservation of this ProjectDatabaseOpensearchUser +func (tr *ProjectDatabaseOpensearchUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseOpensearchUser +func (tr *ProjectDatabaseOpensearchUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseOpensearchUser +func (tr *ProjectDatabaseOpensearchUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseOpensearchUser +func (tr *ProjectDatabaseOpensearchUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseOpensearchUser +func (tr *ProjectDatabaseOpensearchUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseOpensearchUser +func (tr *ProjectDatabaseOpensearchUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseOpensearchUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseOpensearchUser) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseOpensearchUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseOpensearchUser) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabasePostgresqlUser +func (mg *ProjectDatabasePostgresqlUser) GetTerraformResourceType() string { + return "ovh_cloud_project_database_postgresql_user" +} + +// GetConnectionDetailsMapping for this ProjectDatabasePostgresqlUser +func (tr *ProjectDatabasePostgresqlUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "status.atProvider.password"} +} + +// GetObservation of this ProjectDatabasePostgresqlUser +func (tr *ProjectDatabasePostgresqlUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabasePostgresqlUser +func (tr *ProjectDatabasePostgresqlUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabasePostgresqlUser +func (tr *ProjectDatabasePostgresqlUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabasePostgresqlUser +func (tr *ProjectDatabasePostgresqlUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabasePostgresqlUser +func (tr *ProjectDatabasePostgresqlUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabasePostgresqlUser +func (tr *ProjectDatabasePostgresqlUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabasePostgresqlUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabasePostgresqlUser) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabasePostgresqlUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabasePostgresqlUser) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseRedisUser +func (mg *ProjectDatabaseRedisUser) GetTerraformResourceType() string { + return "ovh_cloud_project_database_redis_user" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseRedisUser +func (tr *ProjectDatabaseRedisUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "status.atProvider.password"} +} + +// GetObservation of this ProjectDatabaseRedisUser +func (tr *ProjectDatabaseRedisUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseRedisUser +func (tr *ProjectDatabaseRedisUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseRedisUser +func (tr *ProjectDatabaseRedisUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseRedisUser +func (tr *ProjectDatabaseRedisUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseRedisUser +func (tr *ProjectDatabaseRedisUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseRedisUser +func (tr *ProjectDatabaseRedisUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseRedisUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseRedisUser) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseRedisUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseRedisUser) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectDatabaseUser +func (mg *ProjectDatabaseUser) GetTerraformResourceType() string { + return "ovh_cloud_project_database_user" +} + +// GetConnectionDetailsMapping for this ProjectDatabaseUser +func (tr *ProjectDatabaseUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "status.atProvider.password"} +} + +// GetObservation of this ProjectDatabaseUser +func (tr *ProjectDatabaseUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectDatabaseUser +func (tr *ProjectDatabaseUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectDatabaseUser +func (tr *ProjectDatabaseUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectDatabaseUser +func (tr *ProjectDatabaseUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectDatabaseUser +func (tr *ProjectDatabaseUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectDatabaseUser +func (tr *ProjectDatabaseUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectDatabaseUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectDatabaseUser) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectDatabaseUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectDatabaseUser) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/databases/v1alpha1/zz_groupversion_info.go b/apis/databases/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..9d77223 --- /dev/null +++ b/apis/databases/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=databases.ovh.edixos.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "databases.ovh.edixos.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/databases/v1alpha1/zz_projectdatabase_types.go b/apis/databases/v1alpha1/zz_projectdatabase_types.go new file mode 100755 index 0000000..829cdf6 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabase_types.go @@ -0,0 +1,286 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EndpointsInitParameters struct { +} + +type EndpointsObservation struct { + Component *string `json:"component,omitempty" tf:"component,omitempty"` + + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"` + + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type EndpointsParameters struct { +} + +type NodesInitParameters struct { + + // Private network ID in which the node is. It's the regional openstackId of the private network. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Region of the node + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Private subnet ID in which the node is + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type NodesObservation struct { + + // Private network ID in which the node is. It's the regional openstackId of the private network. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Region of the node + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Private subnet ID in which the node is + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type NodesParameters struct { + + // Private network ID in which the node is. It's the regional openstackId of the private network. + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Region of the node + // +kubebuilder:validation:Optional + Region *string `json:"region" tf:"region,omitempty"` + + // Private subnet ID in which the node is + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type ProjectDatabaseInitParameters struct { + + // Advanced configuration key / value + AdvancedConfiguration map[string]*string `json:"advancedConfiguration,omitempty" tf:"advanced_configuration,omitempty"` + + // Description of the cluster + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Disk size attributes of the cluster + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The node flavor used for this cluster + Flavor *string `json:"flavor,omitempty" tf:"flavor,omitempty"` + + // Defines whether the REST API is enabled on a Kafka cluster + KafkaRestAPI *bool `json:"kafkaRestApi,omitempty" tf:"kafka_rest_api,omitempty"` + + // List of nodes composing the service + Nodes []NodesInitParameters `json:"nodes,omitempty" tf:"nodes,omitempty"` + + // Defines whether the ACLs are enabled on an Opensearch cluster + OpensearchAclsEnabled *bool `json:"opensearchAclsEnabled,omitempty" tf:"opensearch_acls_enabled,omitempty"` + + // Plan of the cluster + Plan *string `json:"plan,omitempty" tf:"plan,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Version of the engine deployed on the cluster + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ProjectDatabaseObservation struct { + + // Advanced configuration key / value + AdvancedConfiguration map[string]*string `json:"advancedConfiguration,omitempty" tf:"advanced_configuration,omitempty"` + + // Time on which backups start every day + BackupTime *string `json:"backupTime,omitempty" tf:"backup_time,omitempty"` + + // Date of the creation of the cluster + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the cluster + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Disk size attributes of the cluster + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Disk type attributes of the cluster + DiskType *string `json:"diskType,omitempty" tf:"disk_type,omitempty"` + + // List of all endpoints of the service + Endpoints []EndpointsObservation `json:"endpoints,omitempty" tf:"endpoints,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The node flavor used for this cluster + Flavor *string `json:"flavor,omitempty" tf:"flavor,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Defines whether the REST API is enabled on a Kafka cluster + KafkaRestAPI *bool `json:"kafkaRestApi,omitempty" tf:"kafka_rest_api,omitempty"` + + // Time on which maintenances can start every day + MaintenanceTime *string `json:"maintenanceTime,omitempty" tf:"maintenance_time,omitempty"` + + // Type of network of the cluster + NetworkType *string `json:"networkType,omitempty" tf:"network_type,omitempty"` + + // List of nodes composing the service + Nodes []NodesObservation `json:"nodes,omitempty" tf:"nodes,omitempty"` + + // Defines whether the ACLs are enabled on an Opensearch cluster + OpensearchAclsEnabled *bool `json:"opensearchAclsEnabled,omitempty" tf:"opensearch_acls_enabled,omitempty"` + + // Plan of the cluster + Plan *string `json:"plan,omitempty" tf:"plan,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Current status of the cluster + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Version of the engine deployed on the cluster + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ProjectDatabaseParameters struct { + + // Advanced configuration key / value + // +kubebuilder:validation:Optional + AdvancedConfiguration map[string]*string `json:"advancedConfiguration,omitempty" tf:"advanced_configuration,omitempty"` + + // Description of the cluster + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Disk size attributes of the cluster + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Name of the engine of the service + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The node flavor used for this cluster + // +kubebuilder:validation:Optional + Flavor *string `json:"flavor,omitempty" tf:"flavor,omitempty"` + + // Defines whether the REST API is enabled on a Kafka cluster + // +kubebuilder:validation:Optional + KafkaRestAPI *bool `json:"kafkaRestApi,omitempty" tf:"kafka_rest_api,omitempty"` + + // List of nodes composing the service + // +kubebuilder:validation:Optional + Nodes []NodesParameters `json:"nodes,omitempty" tf:"nodes,omitempty"` + + // Defines whether the ACLs are enabled on an Opensearch cluster + // +kubebuilder:validation:Optional + OpensearchAclsEnabled *bool `json:"opensearchAclsEnabled,omitempty" tf:"opensearch_acls_enabled,omitempty"` + + // Plan of the cluster + // +kubebuilder:validation:Optional + Plan *string `json:"plan,omitempty" tf:"plan,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Version of the engine deployed on the cluster + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +// ProjectDatabaseSpec defines the desired state of ProjectDatabase +type ProjectDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseStatus defines the observed state of ProjectDatabase. +type ProjectDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabase is the Schema for the ProjectDatabases API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engine) || (has(self.initProvider) && has(self.initProvider.engine))",message="spec.forProvider.engine is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.flavor) || (has(self.initProvider) && has(self.initProvider.flavor))",message="spec.forProvider.flavor is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.nodes) || (has(self.initProvider) && has(self.initProvider.nodes))",message="spec.forProvider.nodes is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.plan) || (has(self.initProvider) && has(self.initProvider.plan))",message="spec.forProvider.plan is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec ProjectDatabaseSpec `json:"spec"` + Status ProjectDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseList contains a list of ProjectDatabases +type ProjectDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabase `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabase_Kind = "ProjectDatabase" + ProjectDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabase_Kind}.String() + ProjectDatabase_KindAPIVersion = ProjectDatabase_Kind + "." + CRDGroupVersion.String() + ProjectDatabase_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabase{}, &ProjectDatabaseList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabasedatabase_types.go b/apis/databases/v1alpha1/zz_projectdatabasedatabase_types.go new file mode 100755 index 0000000..357b68a --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabasedatabase_types.go @@ -0,0 +1,122 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseDatabaseInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseDatabaseObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Defines if the database has been created by default + Default *bool `json:"default,omitempty" tf:"default,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseDatabaseParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Name of the engine of the service + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabaseDatabaseSpec defines the desired state of ProjectDatabaseDatabase +type ProjectDatabaseDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseDatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseDatabaseInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseDatabaseStatus defines the observed state of ProjectDatabaseDatabase. +type ProjectDatabaseDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseDatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseDatabase is the Schema for the ProjectDatabaseDatabases API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engine) || (has(self.initProvider) && has(self.initProvider.engine))",message="spec.forProvider.engine is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseDatabaseSpec `json:"spec"` + Status ProjectDatabaseDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseDatabaseList contains a list of ProjectDatabaseDatabases +type ProjectDatabaseDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseDatabase `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseDatabase_Kind = "ProjectDatabaseDatabase" + ProjectDatabaseDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseDatabase_Kind}.String() + ProjectDatabaseDatabase_KindAPIVersion = ProjectDatabaseDatabase_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseDatabase_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseDatabase{}, &ProjectDatabaseDatabaseList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabaseintegration_types.go b/apis/databases/v1alpha1/zz_projectdatabaseintegration_types.go new file mode 100755 index 0000000..f7ed4b4 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabaseintegration_types.go @@ -0,0 +1,164 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseIntegrationInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // ID of the destination service + DestinationServiceID *string `json:"destinationServiceId,omitempty" tf:"destination_service_id,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Parameters for the integration + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // ID of the source service + SourceServiceID *string `json:"sourceServiceId,omitempty" tf:"source_service_id,omitempty"` + + // Type of the integration + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProjectDatabaseIntegrationObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // ID of the destination service + DestinationServiceID *string `json:"destinationServiceId,omitempty" tf:"destination_service_id,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Parameters for the integration + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // ID of the source service + SourceServiceID *string `json:"sourceServiceId,omitempty" tf:"source_service_id,omitempty"` + + // Current status of the integration + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Type of the integration + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProjectDatabaseIntegrationParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // ID of the destination service + // +kubebuilder:validation:Optional + DestinationServiceID *string `json:"destinationServiceId,omitempty" tf:"destination_service_id,omitempty"` + + // Name of the engine of the service + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Parameters for the integration + // +kubebuilder:validation:Optional + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // ID of the source service + // +kubebuilder:validation:Optional + SourceServiceID *string `json:"sourceServiceId,omitempty" tf:"source_service_id,omitempty"` + + // Type of the integration + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// ProjectDatabaseIntegrationSpec defines the desired state of ProjectDatabaseIntegration +type ProjectDatabaseIntegrationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseIntegrationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseIntegrationInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseIntegrationStatus defines the observed state of ProjectDatabaseIntegration. +type ProjectDatabaseIntegrationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseIntegrationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseIntegration is the Schema for the ProjectDatabaseIntegrations API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseIntegration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destinationServiceId) || (has(self.initProvider) && has(self.initProvider.destinationServiceId))",message="spec.forProvider.destinationServiceId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engine) || (has(self.initProvider) && has(self.initProvider.engine))",message="spec.forProvider.engine is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sourceServiceId) || (has(self.initProvider) && has(self.initProvider.sourceServiceId))",message="spec.forProvider.sourceServiceId is a required parameter" + Spec ProjectDatabaseIntegrationSpec `json:"spec"` + Status ProjectDatabaseIntegrationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseIntegrationList contains a list of ProjectDatabaseIntegrations +type ProjectDatabaseIntegrationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseIntegration `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseIntegration_Kind = "ProjectDatabaseIntegration" + ProjectDatabaseIntegration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseIntegration_Kind}.String() + ProjectDatabaseIntegration_KindAPIVersion = ProjectDatabaseIntegration_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseIntegration_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseIntegration_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseIntegration{}, &ProjectDatabaseIntegrationList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabaseiprestriction_types.go b/apis/databases/v1alpha1/zz_projectdatabaseiprestriction_types.go new file mode 100755 index 0000000..b4b6549 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabaseiprestriction_types.go @@ -0,0 +1,143 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseIPRestrictionInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Description of the IP restriction + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Authorized IP + IP *string `json:"ip,omitempty" tf:"ip,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseIPRestrictionObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Description of the IP restriction + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Authorized IP + IP *string `json:"ip,omitempty" tf:"ip,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Current status of the IP restriction + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ProjectDatabaseIPRestrictionParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Description of the IP restriction + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the engine of the service + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Authorized IP + // +kubebuilder:validation:Optional + IP *string `json:"ip,omitempty" tf:"ip,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabaseIPRestrictionSpec defines the desired state of ProjectDatabaseIPRestriction +type ProjectDatabaseIPRestrictionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseIPRestrictionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseIPRestrictionInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseIPRestrictionStatus defines the observed state of ProjectDatabaseIPRestriction. +type ProjectDatabaseIPRestrictionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseIPRestrictionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseIPRestriction is the Schema for the ProjectDatabaseIPRestrictions API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseIPRestriction struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engine) || (has(self.initProvider) && has(self.initProvider.engine))",message="spec.forProvider.engine is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ip) || (has(self.initProvider) && has(self.initProvider.ip))",message="spec.forProvider.ip is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseIPRestrictionSpec `json:"spec"` + Status ProjectDatabaseIPRestrictionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseIPRestrictionList contains a list of ProjectDatabaseIPRestrictions +type ProjectDatabaseIPRestrictionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseIPRestriction `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseIPRestriction_Kind = "ProjectDatabaseIPRestriction" + ProjectDatabaseIPRestriction_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseIPRestriction_Kind}.String() + ProjectDatabaseIPRestriction_KindAPIVersion = ProjectDatabaseIPRestriction_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseIPRestriction_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseIPRestriction_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseIPRestriction{}, &ProjectDatabaseIPRestrictionList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabasekafkaacl_types.go b/apis/databases/v1alpha1/zz_projectdatabasekafkaacl_types.go new file mode 100755 index 0000000..cb9231f --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabasekafkaacl_types.go @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseKafkaACLInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Permission to give to this username on this topic + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Topic affected by this acl + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` + + // Username affected by this acl + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ProjectDatabaseKafkaACLObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Permission to give to this username on this topic + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Topic affected by this acl + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` + + // Username affected by this acl + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ProjectDatabaseKafkaACLParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Permission to give to this username on this topic + // +kubebuilder:validation:Optional + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Topic affected by this acl + // +kubebuilder:validation:Optional + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` + + // Username affected by this acl + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +// ProjectDatabaseKafkaACLSpec defines the desired state of ProjectDatabaseKafkaACL +type ProjectDatabaseKafkaACLSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseKafkaACLParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseKafkaACLInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseKafkaACLStatus defines the observed state of ProjectDatabaseKafkaACL. +type ProjectDatabaseKafkaACLStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseKafkaACLObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseKafkaACL is the Schema for the ProjectDatabaseKafkaACLs API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseKafkaACL struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.permission) || (has(self.initProvider) && has(self.initProvider.permission))",message="spec.forProvider.permission is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.topic) || (has(self.initProvider) && has(self.initProvider.topic))",message="spec.forProvider.topic is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.username) || (has(self.initProvider) && has(self.initProvider.username))",message="spec.forProvider.username is a required parameter" + Spec ProjectDatabaseKafkaACLSpec `json:"spec"` + Status ProjectDatabaseKafkaACLStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseKafkaACLList contains a list of ProjectDatabaseKafkaACLs +type ProjectDatabaseKafkaACLList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseKafkaACL `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseKafkaACL_Kind = "ProjectDatabaseKafkaACL" + ProjectDatabaseKafkaACL_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseKafkaACL_Kind}.String() + ProjectDatabaseKafkaACL_KindAPIVersion = ProjectDatabaseKafkaACL_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseKafkaACL_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseKafkaACL_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseKafkaACL{}, &ProjectDatabaseKafkaACLList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabasekafkaschemaregistryacl_types.go b/apis/databases/v1alpha1/zz_projectdatabasekafkaschemaregistryacl_types.go new file mode 100755 index 0000000..b4b4130 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabasekafkaschemaregistryacl_types.go @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseKafkaSchemaregistryaclInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Permission to give to this username on this resource + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` + + // Resource affected by this acl + Resource *string `json:"resource,omitempty" tf:"resource,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Username affected by this acl + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ProjectDatabaseKafkaSchemaregistryaclObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Permission to give to this username on this resource + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` + + // Resource affected by this acl + Resource *string `json:"resource,omitempty" tf:"resource,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Username affected by this acl + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ProjectDatabaseKafkaSchemaregistryaclParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Permission to give to this username on this resource + // +kubebuilder:validation:Optional + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` + + // Resource affected by this acl + // +kubebuilder:validation:Optional + Resource *string `json:"resource,omitempty" tf:"resource,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Username affected by this acl + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +// ProjectDatabaseKafkaSchemaregistryaclSpec defines the desired state of ProjectDatabaseKafkaSchemaregistryacl +type ProjectDatabaseKafkaSchemaregistryaclSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseKafkaSchemaregistryaclParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseKafkaSchemaregistryaclInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseKafkaSchemaregistryaclStatus defines the observed state of ProjectDatabaseKafkaSchemaregistryacl. +type ProjectDatabaseKafkaSchemaregistryaclStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseKafkaSchemaregistryaclObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseKafkaSchemaregistryacl is the Schema for the ProjectDatabaseKafkaSchemaregistryacls API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseKafkaSchemaregistryacl struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.permission) || (has(self.initProvider) && has(self.initProvider.permission))",message="spec.forProvider.permission is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resource) || (has(self.initProvider) && has(self.initProvider.resource))",message="spec.forProvider.resource is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.username) || (has(self.initProvider) && has(self.initProvider.username))",message="spec.forProvider.username is a required parameter" + Spec ProjectDatabaseKafkaSchemaregistryaclSpec `json:"spec"` + Status ProjectDatabaseKafkaSchemaregistryaclStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseKafkaSchemaregistryaclList contains a list of ProjectDatabaseKafkaSchemaregistryacls +type ProjectDatabaseKafkaSchemaregistryaclList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseKafkaSchemaregistryacl `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseKafkaSchemaregistryacl_Kind = "ProjectDatabaseKafkaSchemaregistryacl" + ProjectDatabaseKafkaSchemaregistryacl_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseKafkaSchemaregistryacl_Kind}.String() + ProjectDatabaseKafkaSchemaregistryacl_KindAPIVersion = ProjectDatabaseKafkaSchemaregistryacl_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseKafkaSchemaregistryacl_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseKafkaSchemaregistryacl_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseKafkaSchemaregistryacl{}, &ProjectDatabaseKafkaSchemaregistryaclList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabasekafkatopic_types.go b/apis/databases/v1alpha1/zz_projectdatabasekafkatopic_types.go new file mode 100755 index 0000000..c8efc6b --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabasekafkatopic_types.go @@ -0,0 +1,158 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseKafkaTopicInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Minimum insync replica accepted for this topic + MinInsyncReplicas *float64 `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` + + // Number of partitions for this topic + Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` + + // Number of replication for this topic + Replication *float64 `json:"replication,omitempty" tf:"replication,omitempty"` + + // Number of bytes for the retention of the data for this topic + RetentionBytes *float64 `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` + + // Number of hours for the retention of the data for this topic + RetentionHours *float64 `json:"retentionHours,omitempty" tf:"retention_hours,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseKafkaTopicObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Minimum insync replica accepted for this topic + MinInsyncReplicas *float64 `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` + + // Number of partitions for this topic + Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` + + // Number of replication for this topic + Replication *float64 `json:"replication,omitempty" tf:"replication,omitempty"` + + // Number of bytes for the retention of the data for this topic + RetentionBytes *float64 `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` + + // Number of hours for the retention of the data for this topic + RetentionHours *float64 `json:"retentionHours,omitempty" tf:"retention_hours,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseKafkaTopicParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Minimum insync replica accepted for this topic + // +kubebuilder:validation:Optional + MinInsyncReplicas *float64 `json:"minInsyncReplicas,omitempty" tf:"min_insync_replicas,omitempty"` + + // Number of partitions for this topic + // +kubebuilder:validation:Optional + Partitions *float64 `json:"partitions,omitempty" tf:"partitions,omitempty"` + + // Number of replication for this topic + // +kubebuilder:validation:Optional + Replication *float64 `json:"replication,omitempty" tf:"replication,omitempty"` + + // Number of bytes for the retention of the data for this topic + // +kubebuilder:validation:Optional + RetentionBytes *float64 `json:"retentionBytes,omitempty" tf:"retention_bytes,omitempty"` + + // Number of hours for the retention of the data for this topic + // +kubebuilder:validation:Optional + RetentionHours *float64 `json:"retentionHours,omitempty" tf:"retention_hours,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabaseKafkaTopicSpec defines the desired state of ProjectDatabaseKafkaTopic +type ProjectDatabaseKafkaTopicSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseKafkaTopicParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseKafkaTopicInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseKafkaTopicStatus defines the observed state of ProjectDatabaseKafkaTopic. +type ProjectDatabaseKafkaTopicStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseKafkaTopicObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseKafkaTopic is the Schema for the ProjectDatabaseKafkaTopics API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseKafkaTopic struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseKafkaTopicSpec `json:"spec"` + Status ProjectDatabaseKafkaTopicStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseKafkaTopicList contains a list of ProjectDatabaseKafkaTopics +type ProjectDatabaseKafkaTopicList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseKafkaTopic `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseKafkaTopic_Kind = "ProjectDatabaseKafkaTopic" + ProjectDatabaseKafkaTopic_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseKafkaTopic_Kind}.String() + ProjectDatabaseKafkaTopic_KindAPIVersion = ProjectDatabaseKafkaTopic_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseKafkaTopic_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseKafkaTopic_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseKafkaTopic{}, &ProjectDatabaseKafkaTopicList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabasem3dbnamespace_types.go b/apis/databases/v1alpha1/zz_projectdatabasem3dbnamespace_types.go new file mode 100755 index 0000000..8a26e70 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabasem3dbnamespace_types.go @@ -0,0 +1,193 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseM3DbNamespaceInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Resolution for an aggregated namespace + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` + + // Controls how long we wait before expiring stale data + RetentionBlockDataExpirationDuration *string `json:"retentionBlockDataExpirationDuration,omitempty" tf:"retention_block_data_expiration_duration,omitempty"` + + // Controls how long to keep a block in memory before flushing to a fileset on disk + RetentionBlockSizeDuration *string `json:"retentionBlockSizeDuration,omitempty" tf:"retention_block_size_duration,omitempty"` + + // Controls how far into the future writes to the namespace will be accepted + RetentionBufferFutureDuration *string `json:"retentionBufferFutureDuration,omitempty" tf:"retention_buffer_future_duration,omitempty"` + + // Controls how far into the past writes to the namespace will be accepted + RetentionBufferPastDuration *string `json:"retentionBufferPastDuration,omitempty" tf:"retention_buffer_past_duration,omitempty"` + + // Controls the duration of time that M3DB will retain data for the namespace + RetentionPeriodDuration *string `json:"retentionPeriodDuration,omitempty" tf:"retention_period_duration,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Defines whether M3db will create snapshot files for this namespace + SnapshotEnabled *bool `json:"snapshotEnabled,omitempty" tf:"snapshot_enabled,omitempty"` + + // Defines whether M3db will include writes to this namespace in the commit log + WritesToCommitLogEnabled *bool `json:"writesToCommitLogEnabled,omitempty" tf:"writes_to_commit_log_enabled,omitempty"` +} + +type ProjectDatabaseM3DbNamespaceObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Resolution for an aggregated namespace + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` + + // Controls how long we wait before expiring stale data + RetentionBlockDataExpirationDuration *string `json:"retentionBlockDataExpirationDuration,omitempty" tf:"retention_block_data_expiration_duration,omitempty"` + + // Controls how long to keep a block in memory before flushing to a fileset on disk + RetentionBlockSizeDuration *string `json:"retentionBlockSizeDuration,omitempty" tf:"retention_block_size_duration,omitempty"` + + // Controls how far into the future writes to the namespace will be accepted + RetentionBufferFutureDuration *string `json:"retentionBufferFutureDuration,omitempty" tf:"retention_buffer_future_duration,omitempty"` + + // Controls how far into the past writes to the namespace will be accepted + RetentionBufferPastDuration *string `json:"retentionBufferPastDuration,omitempty" tf:"retention_buffer_past_duration,omitempty"` + + // Controls the duration of time that M3DB will retain data for the namespace + RetentionPeriodDuration *string `json:"retentionPeriodDuration,omitempty" tf:"retention_period_duration,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Defines whether M3db will create snapshot files for this namespace + SnapshotEnabled *bool `json:"snapshotEnabled,omitempty" tf:"snapshot_enabled,omitempty"` + + // Type of namespace + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Defines whether M3db will include writes to this namespace in the commit log + WritesToCommitLogEnabled *bool `json:"writesToCommitLogEnabled,omitempty" tf:"writes_to_commit_log_enabled,omitempty"` +} + +type ProjectDatabaseM3DbNamespaceParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Resolution for an aggregated namespace + // +kubebuilder:validation:Optional + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` + + // Controls how long we wait before expiring stale data + // +kubebuilder:validation:Optional + RetentionBlockDataExpirationDuration *string `json:"retentionBlockDataExpirationDuration,omitempty" tf:"retention_block_data_expiration_duration,omitempty"` + + // Controls how long to keep a block in memory before flushing to a fileset on disk + // +kubebuilder:validation:Optional + RetentionBlockSizeDuration *string `json:"retentionBlockSizeDuration,omitempty" tf:"retention_block_size_duration,omitempty"` + + // Controls how far into the future writes to the namespace will be accepted + // +kubebuilder:validation:Optional + RetentionBufferFutureDuration *string `json:"retentionBufferFutureDuration,omitempty" tf:"retention_buffer_future_duration,omitempty"` + + // Controls how far into the past writes to the namespace will be accepted + // +kubebuilder:validation:Optional + RetentionBufferPastDuration *string `json:"retentionBufferPastDuration,omitempty" tf:"retention_buffer_past_duration,omitempty"` + + // Controls the duration of time that M3DB will retain data for the namespace + // +kubebuilder:validation:Optional + RetentionPeriodDuration *string `json:"retentionPeriodDuration,omitempty" tf:"retention_period_duration,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Defines whether M3db will create snapshot files for this namespace + // +kubebuilder:validation:Optional + SnapshotEnabled *bool `json:"snapshotEnabled,omitempty" tf:"snapshot_enabled,omitempty"` + + // Defines whether M3db will include writes to this namespace in the commit log + // +kubebuilder:validation:Optional + WritesToCommitLogEnabled *bool `json:"writesToCommitLogEnabled,omitempty" tf:"writes_to_commit_log_enabled,omitempty"` +} + +// ProjectDatabaseM3DbNamespaceSpec defines the desired state of ProjectDatabaseM3DbNamespace +type ProjectDatabaseM3DbNamespaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseM3DbNamespaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseM3DbNamespaceInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseM3DbNamespaceStatus defines the observed state of ProjectDatabaseM3DbNamespace. +type ProjectDatabaseM3DbNamespaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseM3DbNamespaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseM3DbNamespace is the Schema for the ProjectDatabaseM3DbNamespaces API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseM3DbNamespace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resolution) || (has(self.initProvider) && has(self.initProvider.resolution))",message="spec.forProvider.resolution is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.retentionPeriodDuration) || (has(self.initProvider) && has(self.initProvider.retentionPeriodDuration))",message="spec.forProvider.retentionPeriodDuration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseM3DbNamespaceSpec `json:"spec"` + Status ProjectDatabaseM3DbNamespaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseM3DbNamespaceList contains a list of ProjectDatabaseM3DbNamespaces +type ProjectDatabaseM3DbNamespaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseM3DbNamespace `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseM3DbNamespace_Kind = "ProjectDatabaseM3DbNamespace" + ProjectDatabaseM3DbNamespace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseM3DbNamespace_Kind}.String() + ProjectDatabaseM3DbNamespace_KindAPIVersion = ProjectDatabaseM3DbNamespace_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseM3DbNamespace_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseM3DbNamespace_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseM3DbNamespace{}, &ProjectDatabaseM3DbNamespaceList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabasem3dbuser_types.go b/apis/databases/v1alpha1/zz_projectdatabasem3dbuser_types.go new file mode 100755 index 0000000..1657809 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabasem3dbuser_types.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseM3DbUserInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Group of the user + Group *string `json:"group,omitempty" tf:"group,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseM3DbUserObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Date of the creation of the user + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Group of the user + Group *string `json:"group,omitempty" tf:"group,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Current status of the user + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ProjectDatabaseM3DbUserParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Group of the user + // +kubebuilder:validation:Optional + Group *string `json:"group,omitempty" tf:"group,omitempty"` + + // Arbitrary string to change to trigger a password update + // +kubebuilder:validation:Optional + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabaseM3DbUserSpec defines the desired state of ProjectDatabaseM3DbUser +type ProjectDatabaseM3DbUserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseM3DbUserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseM3DbUserInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseM3DbUserStatus defines the observed state of ProjectDatabaseM3DbUser. +type ProjectDatabaseM3DbUserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseM3DbUserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseM3DbUser is the Schema for the ProjectDatabaseM3DbUsers API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseM3DbUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseM3DbUserSpec `json:"spec"` + Status ProjectDatabaseM3DbUserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseM3DbUserList contains a list of ProjectDatabaseM3DbUsers +type ProjectDatabaseM3DbUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseM3DbUser `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseM3DbUser_Kind = "ProjectDatabaseM3DbUser" + ProjectDatabaseM3DbUser_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseM3DbUser_Kind}.String() + ProjectDatabaseM3DbUser_KindAPIVersion = ProjectDatabaseM3DbUser_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseM3DbUser_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseM3DbUser_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseM3DbUser{}, &ProjectDatabaseM3DbUserList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabasemongodbuser_types.go b/apis/databases/v1alpha1/zz_projectdatabasemongodbuser_types.go new file mode 100755 index 0000000..333c69d --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabasemongodbuser_types.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseMongodbUserInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // Roles the user belongs to (without authentication database) + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseMongodbUserObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Date of the creation of the user + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // Roles the user belongs to (without authentication database) + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Current status of the user + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ProjectDatabaseMongodbUserParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Arbitrary string to change to trigger a password update + // +kubebuilder:validation:Optional + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // Roles the user belongs to (without authentication database) + // +kubebuilder:validation:Optional + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabaseMongodbUserSpec defines the desired state of ProjectDatabaseMongodbUser +type ProjectDatabaseMongodbUserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseMongodbUserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseMongodbUserInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseMongodbUserStatus defines the observed state of ProjectDatabaseMongodbUser. +type ProjectDatabaseMongodbUserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseMongodbUserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseMongodbUser is the Schema for the ProjectDatabaseMongodbUsers API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseMongodbUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseMongodbUserSpec `json:"spec"` + Status ProjectDatabaseMongodbUserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseMongodbUserList contains a list of ProjectDatabaseMongodbUsers +type ProjectDatabaseMongodbUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseMongodbUser `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseMongodbUser_Kind = "ProjectDatabaseMongodbUser" + ProjectDatabaseMongodbUser_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseMongodbUser_Kind}.String() + ProjectDatabaseMongodbUser_KindAPIVersion = ProjectDatabaseMongodbUser_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseMongodbUser_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseMongodbUser_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseMongodbUser{}, &ProjectDatabaseMongodbUserList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabaseopensearchpattern_types.go b/apis/databases/v1alpha1/zz_projectdatabaseopensearchpattern_types.go new file mode 100755 index 0000000..a4f3c97 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabaseopensearchpattern_types.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseOpensearchPatternInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Maximum number of index for this pattern + MaxIndexCount *float64 `json:"maxIndexCount,omitempty" tf:"max_index_count,omitempty"` + + // Pattern format + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseOpensearchPatternObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Maximum number of index for this pattern + MaxIndexCount *float64 `json:"maxIndexCount,omitempty" tf:"max_index_count,omitempty"` + + // Pattern format + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseOpensearchPatternParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Maximum number of index for this pattern + // +kubebuilder:validation:Optional + MaxIndexCount *float64 `json:"maxIndexCount,omitempty" tf:"max_index_count,omitempty"` + + // Pattern format + // +kubebuilder:validation:Optional + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabaseOpensearchPatternSpec defines the desired state of ProjectDatabaseOpensearchPattern +type ProjectDatabaseOpensearchPatternSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseOpensearchPatternParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseOpensearchPatternInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseOpensearchPatternStatus defines the observed state of ProjectDatabaseOpensearchPattern. +type ProjectDatabaseOpensearchPatternStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseOpensearchPatternObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseOpensearchPattern is the Schema for the ProjectDatabaseOpensearchPatterns API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseOpensearchPattern struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.pattern) || (has(self.initProvider) && has(self.initProvider.pattern))",message="spec.forProvider.pattern is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseOpensearchPatternSpec `json:"spec"` + Status ProjectDatabaseOpensearchPatternStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseOpensearchPatternList contains a list of ProjectDatabaseOpensearchPatterns +type ProjectDatabaseOpensearchPatternList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseOpensearchPattern `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseOpensearchPattern_Kind = "ProjectDatabaseOpensearchPattern" + ProjectDatabaseOpensearchPattern_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseOpensearchPattern_Kind}.String() + ProjectDatabaseOpensearchPattern_KindAPIVersion = ProjectDatabaseOpensearchPattern_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseOpensearchPattern_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseOpensearchPattern_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseOpensearchPattern{}, &ProjectDatabaseOpensearchPatternList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabaseopensearchuser_types.go b/apis/databases/v1alpha1/zz_projectdatabaseopensearchuser_types.go new file mode 100755 index 0000000..addbe17 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabaseopensearchuser_types.go @@ -0,0 +1,163 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AclsInitParameters struct { + + // Pattern of the ACL + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // Permission of the ACL + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type AclsObservation struct { + + // Pattern of the ACL + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // Permission of the ACL + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type AclsParameters struct { + + // Pattern of the ACL + // +kubebuilder:validation:Optional + Pattern *string `json:"pattern" tf:"pattern,omitempty"` + + // Permission of the ACL + // +kubebuilder:validation:Optional + Permission *string `json:"permission" tf:"permission,omitempty"` +} + +type ProjectDatabaseOpensearchUserInitParameters struct { + + // Acls of the user + Acls []AclsInitParameters `json:"acls,omitempty" tf:"acls,omitempty"` + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseOpensearchUserObservation struct { + + // Acls of the user + Acls []AclsObservation `json:"acls,omitempty" tf:"acls,omitempty"` + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Date of the creation of the user + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Current status of the user + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ProjectDatabaseOpensearchUserParameters struct { + + // Acls of the user + // +kubebuilder:validation:Optional + Acls []AclsParameters `json:"acls,omitempty" tf:"acls,omitempty"` + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Arbitrary string to change to trigger a password update + // +kubebuilder:validation:Optional + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabaseOpensearchUserSpec defines the desired state of ProjectDatabaseOpensearchUser +type ProjectDatabaseOpensearchUserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseOpensearchUserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseOpensearchUserInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseOpensearchUserStatus defines the observed state of ProjectDatabaseOpensearchUser. +type ProjectDatabaseOpensearchUserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseOpensearchUserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseOpensearchUser is the Schema for the ProjectDatabaseOpensearchUsers API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseOpensearchUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseOpensearchUserSpec `json:"spec"` + Status ProjectDatabaseOpensearchUserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseOpensearchUserList contains a list of ProjectDatabaseOpensearchUsers +type ProjectDatabaseOpensearchUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseOpensearchUser `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseOpensearchUser_Kind = "ProjectDatabaseOpensearchUser" + ProjectDatabaseOpensearchUser_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseOpensearchUser_Kind}.String() + ProjectDatabaseOpensearchUser_KindAPIVersion = ProjectDatabaseOpensearchUser_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseOpensearchUser_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseOpensearchUser_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseOpensearchUser{}, &ProjectDatabaseOpensearchUserList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabasepostgresqluser_types.go b/apis/databases/v1alpha1/zz_projectdatabasepostgresqluser_types.go new file mode 100755 index 0000000..af53629 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabasepostgresqluser_types.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabasePostgresqlUserInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // Roles the user belongs to + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabasePostgresqlUserObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Date of the creation of the user + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // Roles the user belongs to + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Current status of the user + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ProjectDatabasePostgresqlUserParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Arbitrary string to change to trigger a password update + // +kubebuilder:validation:Optional + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // Roles the user belongs to + // +kubebuilder:validation:Optional + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabasePostgresqlUserSpec defines the desired state of ProjectDatabasePostgresqlUser +type ProjectDatabasePostgresqlUserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabasePostgresqlUserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabasePostgresqlUserInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabasePostgresqlUserStatus defines the observed state of ProjectDatabasePostgresqlUser. +type ProjectDatabasePostgresqlUserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabasePostgresqlUserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabasePostgresqlUser is the Schema for the ProjectDatabasePostgresqlUsers API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabasePostgresqlUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabasePostgresqlUserSpec `json:"spec"` + Status ProjectDatabasePostgresqlUserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabasePostgresqlUserList contains a list of ProjectDatabasePostgresqlUsers +type ProjectDatabasePostgresqlUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabasePostgresqlUser `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabasePostgresqlUser_Kind = "ProjectDatabasePostgresqlUser" + ProjectDatabasePostgresqlUser_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabasePostgresqlUser_Kind}.String() + ProjectDatabasePostgresqlUser_KindAPIVersion = ProjectDatabasePostgresqlUser_Kind + "." + CRDGroupVersion.String() + ProjectDatabasePostgresqlUser_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabasePostgresqlUser_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabasePostgresqlUser{}, &ProjectDatabasePostgresqlUserList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabaseredisuser_types.go b/apis/databases/v1alpha1/zz_projectdatabaseredisuser_types.go new file mode 100755 index 0000000..992e08e --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabaseredisuser_types.go @@ -0,0 +1,164 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseRedisUserInitParameters struct { + + // Categories of the user + Categories []*string `json:"categories,omitempty" tf:"categories,omitempty"` + + // Channels of the user + Channels []*string `json:"channels,omitempty" tf:"channels,omitempty"` + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Commands of the user + Commands []*string `json:"commands,omitempty" tf:"commands,omitempty"` + + // Keys of the user + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseRedisUserObservation struct { + + // Categories of the user + Categories []*string `json:"categories,omitempty" tf:"categories,omitempty"` + + // Channels of the user + Channels []*string `json:"channels,omitempty" tf:"channels,omitempty"` + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Commands of the user + Commands []*string `json:"commands,omitempty" tf:"commands,omitempty"` + + // Date of the creation of the user + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Keys of the user + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Current status of the user + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ProjectDatabaseRedisUserParameters struct { + + // Categories of the user + // +kubebuilder:validation:Optional + Categories []*string `json:"categories,omitempty" tf:"categories,omitempty"` + + // Channels of the user + // +kubebuilder:validation:Optional + Channels []*string `json:"channels,omitempty" tf:"channels,omitempty"` + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Commands of the user + // +kubebuilder:validation:Optional + Commands []*string `json:"commands,omitempty" tf:"commands,omitempty"` + + // Keys of the user + // +kubebuilder:validation:Optional + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` + + // Arbitrary string to change to trigger a password update + // +kubebuilder:validation:Optional + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabaseRedisUserSpec defines the desired state of ProjectDatabaseRedisUser +type ProjectDatabaseRedisUserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseRedisUserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseRedisUserInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseRedisUserStatus defines the observed state of ProjectDatabaseRedisUser. +type ProjectDatabaseRedisUserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseRedisUserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseRedisUser is the Schema for the ProjectDatabaseRedisUsers API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseRedisUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseRedisUserSpec `json:"spec"` + Status ProjectDatabaseRedisUserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseRedisUserList contains a list of ProjectDatabaseRedisUsers +type ProjectDatabaseRedisUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseRedisUser `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseRedisUser_Kind = "ProjectDatabaseRedisUser" + ProjectDatabaseRedisUser_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseRedisUser_Kind}.String() + ProjectDatabaseRedisUser_KindAPIVersion = ProjectDatabaseRedisUser_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseRedisUser_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseRedisUser_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseRedisUser{}, &ProjectDatabaseRedisUserList{}) +} diff --git a/apis/databases/v1alpha1/zz_projectdatabaseuser_types.go b/apis/databases/v1alpha1/zz_projectdatabaseuser_types.go new file mode 100755 index 0000000..8417724 --- /dev/null +++ b/apis/databases/v1alpha1/zz_projectdatabaseuser_types.go @@ -0,0 +1,135 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectDatabaseUserInitParameters struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectDatabaseUserObservation struct { + + // Id of the database cluster + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Date of the creation of the user + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Name of the engine of the service + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Arbitrary string to change to trigger a password update + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Current status of the user + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ProjectDatabaseUserParameters struct { + + // Id of the database cluster + // +kubebuilder:validation:Optional + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Name of the engine of the service + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Arbitrary string to change to trigger a password update + // +kubebuilder:validation:Optional + PasswordReset *string `json:"passwordReset,omitempty" tf:"password_reset,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectDatabaseUserSpec defines the desired state of ProjectDatabaseUser +type ProjectDatabaseUserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectDatabaseUserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectDatabaseUserInitParameters `json:"initProvider,omitempty"` +} + +// ProjectDatabaseUserStatus defines the observed state of ProjectDatabaseUser. +type ProjectDatabaseUserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectDatabaseUserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseUser is the Schema for the ProjectDatabaseUsers API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectDatabaseUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterId) || (has(self.initProvider) && has(self.initProvider.clusterId))",message="spec.forProvider.clusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engine) || (has(self.initProvider) && has(self.initProvider.engine))",message="spec.forProvider.engine is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectDatabaseUserSpec `json:"spec"` + Status ProjectDatabaseUserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectDatabaseUserList contains a list of ProjectDatabaseUsers +type ProjectDatabaseUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectDatabaseUser `json:"items"` +} + +// Repository type metadata. +var ( + ProjectDatabaseUser_Kind = "ProjectDatabaseUser" + ProjectDatabaseUser_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectDatabaseUser_Kind}.String() + ProjectDatabaseUser_KindAPIVersion = ProjectDatabaseUser_Kind + "." + CRDGroupVersion.String() + ProjectDatabaseUser_GroupVersionKind = CRDGroupVersion.WithKind(ProjectDatabaseUser_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectDatabaseUser{}, &ProjectDatabaseUserList{}) +} diff --git a/apis/kube/v1alpha1/zz_generated.deepcopy.go b/apis/kube/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..96dc6ff --- /dev/null +++ b/apis/kube/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,2526 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionpluginsInitParameters) DeepCopyInto(out *AdmissionpluginsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionpluginsInitParameters. +func (in *AdmissionpluginsInitParameters) DeepCopy() *AdmissionpluginsInitParameters { + if in == nil { + return nil + } + out := new(AdmissionpluginsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionpluginsObservation) DeepCopyInto(out *AdmissionpluginsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionpluginsObservation. +func (in *AdmissionpluginsObservation) DeepCopy() *AdmissionpluginsObservation { + if in == nil { + return nil + } + out := new(AdmissionpluginsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionpluginsParameters) DeepCopyInto(out *AdmissionpluginsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionpluginsParameters. +func (in *AdmissionpluginsParameters) DeepCopy() *AdmissionpluginsParameters { + if in == nil { + return nil + } + out := new(AdmissionpluginsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApiserverInitParameters) DeepCopyInto(out *ApiserverInitParameters) { + *out = *in + if in.Admissionplugins != nil { + in, out := &in.Admissionplugins, &out.Admissionplugins + *out = make([]AdmissionpluginsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiserverInitParameters. +func (in *ApiserverInitParameters) DeepCopy() *ApiserverInitParameters { + if in == nil { + return nil + } + out := new(ApiserverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApiserverObservation) DeepCopyInto(out *ApiserverObservation) { + *out = *in + if in.Admissionplugins != nil { + in, out := &in.Admissionplugins, &out.Admissionplugins + *out = make([]AdmissionpluginsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiserverObservation. +func (in *ApiserverObservation) DeepCopy() *ApiserverObservation { + if in == nil { + return nil + } + out := new(ApiserverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApiserverParameters) DeepCopyInto(out *ApiserverParameters) { + *out = *in + if in.Admissionplugins != nil { + in, out := &in.Admissionplugins, &out.Admissionplugins + *out = make([]AdmissionpluginsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiserverParameters. +func (in *ApiserverParameters) DeepCopy() *ApiserverParameters { + if in == nil { + return nil + } + out := new(ApiserverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationApiserverAdmissionpluginsInitParameters) DeepCopyInto(out *CustomizationApiserverAdmissionpluginsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationApiserverAdmissionpluginsInitParameters. +func (in *CustomizationApiserverAdmissionpluginsInitParameters) DeepCopy() *CustomizationApiserverAdmissionpluginsInitParameters { + if in == nil { + return nil + } + out := new(CustomizationApiserverAdmissionpluginsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationApiserverAdmissionpluginsObservation) DeepCopyInto(out *CustomizationApiserverAdmissionpluginsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationApiserverAdmissionpluginsObservation. +func (in *CustomizationApiserverAdmissionpluginsObservation) DeepCopy() *CustomizationApiserverAdmissionpluginsObservation { + if in == nil { + return nil + } + out := new(CustomizationApiserverAdmissionpluginsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationApiserverAdmissionpluginsParameters) DeepCopyInto(out *CustomizationApiserverAdmissionpluginsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationApiserverAdmissionpluginsParameters. +func (in *CustomizationApiserverAdmissionpluginsParameters) DeepCopy() *CustomizationApiserverAdmissionpluginsParameters { + if in == nil { + return nil + } + out := new(CustomizationApiserverAdmissionpluginsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationApiserverInitParameters) DeepCopyInto(out *CustomizationApiserverInitParameters) { + *out = *in + if in.Admissionplugins != nil { + in, out := &in.Admissionplugins, &out.Admissionplugins + *out = make([]CustomizationApiserverAdmissionpluginsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationApiserverInitParameters. +func (in *CustomizationApiserverInitParameters) DeepCopy() *CustomizationApiserverInitParameters { + if in == nil { + return nil + } + out := new(CustomizationApiserverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationApiserverObservation) DeepCopyInto(out *CustomizationApiserverObservation) { + *out = *in + if in.Admissionplugins != nil { + in, out := &in.Admissionplugins, &out.Admissionplugins + *out = make([]CustomizationApiserverAdmissionpluginsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationApiserverObservation. +func (in *CustomizationApiserverObservation) DeepCopy() *CustomizationApiserverObservation { + if in == nil { + return nil + } + out := new(CustomizationApiserverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationApiserverParameters) DeepCopyInto(out *CustomizationApiserverParameters) { + *out = *in + if in.Admissionplugins != nil { + in, out := &in.Admissionplugins, &out.Admissionplugins + *out = make([]CustomizationApiserverAdmissionpluginsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationApiserverParameters. +func (in *CustomizationApiserverParameters) DeepCopy() *CustomizationApiserverParameters { + if in == nil { + return nil + } + out := new(CustomizationApiserverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationInitParameters) DeepCopyInto(out *CustomizationInitParameters) { + *out = *in + if in.Apiserver != nil { + in, out := &in.Apiserver, &out.Apiserver + *out = make([]ApiserverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationInitParameters. +func (in *CustomizationInitParameters) DeepCopy() *CustomizationInitParameters { + if in == nil { + return nil + } + out := new(CustomizationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationKubeProxyInitParameters) DeepCopyInto(out *CustomizationKubeProxyInitParameters) { + *out = *in + if in.Iptables != nil { + in, out := &in.Iptables, &out.Iptables + *out = make([]IptablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ipvs != nil { + in, out := &in.Ipvs, &out.Ipvs + *out = make([]IpvsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationKubeProxyInitParameters. +func (in *CustomizationKubeProxyInitParameters) DeepCopy() *CustomizationKubeProxyInitParameters { + if in == nil { + return nil + } + out := new(CustomizationKubeProxyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationKubeProxyObservation) DeepCopyInto(out *CustomizationKubeProxyObservation) { + *out = *in + if in.Iptables != nil { + in, out := &in.Iptables, &out.Iptables + *out = make([]IptablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ipvs != nil { + in, out := &in.Ipvs, &out.Ipvs + *out = make([]IpvsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationKubeProxyObservation. +func (in *CustomizationKubeProxyObservation) DeepCopy() *CustomizationKubeProxyObservation { + if in == nil { + return nil + } + out := new(CustomizationKubeProxyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationKubeProxyParameters) DeepCopyInto(out *CustomizationKubeProxyParameters) { + *out = *in + if in.Iptables != nil { + in, out := &in.Iptables, &out.Iptables + *out = make([]IptablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ipvs != nil { + in, out := &in.Ipvs, &out.Ipvs + *out = make([]IpvsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationKubeProxyParameters. +func (in *CustomizationKubeProxyParameters) DeepCopy() *CustomizationKubeProxyParameters { + if in == nil { + return nil + } + out := new(CustomizationKubeProxyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationObservation) DeepCopyInto(out *CustomizationObservation) { + *out = *in + if in.Apiserver != nil { + in, out := &in.Apiserver, &out.Apiserver + *out = make([]ApiserverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationObservation. +func (in *CustomizationObservation) DeepCopy() *CustomizationObservation { + if in == nil { + return nil + } + out := new(CustomizationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizationParameters) DeepCopyInto(out *CustomizationParameters) { + *out = *in + if in.Apiserver != nil { + in, out := &in.Apiserver, &out.Apiserver + *out = make([]ApiserverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizationParameters. +func (in *CustomizationParameters) DeepCopy() *CustomizationParameters { + if in == nil { + return nil + } + out := new(CustomizationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IptablesInitParameters) DeepCopyInto(out *IptablesInitParameters) { + *out = *in + if in.MinSyncPeriod != nil { + in, out := &in.MinSyncPeriod, &out.MinSyncPeriod + *out = new(string) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IptablesInitParameters. +func (in *IptablesInitParameters) DeepCopy() *IptablesInitParameters { + if in == nil { + return nil + } + out := new(IptablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IptablesObservation) DeepCopyInto(out *IptablesObservation) { + *out = *in + if in.MinSyncPeriod != nil { + in, out := &in.MinSyncPeriod, &out.MinSyncPeriod + *out = new(string) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IptablesObservation. +func (in *IptablesObservation) DeepCopy() *IptablesObservation { + if in == nil { + return nil + } + out := new(IptablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IptablesParameters) DeepCopyInto(out *IptablesParameters) { + *out = *in + if in.MinSyncPeriod != nil { + in, out := &in.MinSyncPeriod, &out.MinSyncPeriod + *out = new(string) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IptablesParameters. +func (in *IptablesParameters) DeepCopy() *IptablesParameters { + if in == nil { + return nil + } + out := new(IptablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IpvsInitParameters) DeepCopyInto(out *IpvsInitParameters) { + *out = *in + if in.MinSyncPeriod != nil { + in, out := &in.MinSyncPeriod, &out.MinSyncPeriod + *out = new(string) + **out = **in + } + if in.Scheduler != nil { + in, out := &in.Scheduler, &out.Scheduler + *out = new(string) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(string) + **out = **in + } + if in.TCPFinTimeout != nil { + in, out := &in.TCPFinTimeout, &out.TCPFinTimeout + *out = new(string) + **out = **in + } + if in.TCPTimeout != nil { + in, out := &in.TCPTimeout, &out.TCPTimeout + *out = new(string) + **out = **in + } + if in.UDPTimeout != nil { + in, out := &in.UDPTimeout, &out.UDPTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IpvsInitParameters. +func (in *IpvsInitParameters) DeepCopy() *IpvsInitParameters { + if in == nil { + return nil + } + out := new(IpvsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IpvsObservation) DeepCopyInto(out *IpvsObservation) { + *out = *in + if in.MinSyncPeriod != nil { + in, out := &in.MinSyncPeriod, &out.MinSyncPeriod + *out = new(string) + **out = **in + } + if in.Scheduler != nil { + in, out := &in.Scheduler, &out.Scheduler + *out = new(string) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(string) + **out = **in + } + if in.TCPFinTimeout != nil { + in, out := &in.TCPFinTimeout, &out.TCPFinTimeout + *out = new(string) + **out = **in + } + if in.TCPTimeout != nil { + in, out := &in.TCPTimeout, &out.TCPTimeout + *out = new(string) + **out = **in + } + if in.UDPTimeout != nil { + in, out := &in.UDPTimeout, &out.UDPTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IpvsObservation. +func (in *IpvsObservation) DeepCopy() *IpvsObservation { + if in == nil { + return nil + } + out := new(IpvsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IpvsParameters) DeepCopyInto(out *IpvsParameters) { + *out = *in + if in.MinSyncPeriod != nil { + in, out := &in.MinSyncPeriod, &out.MinSyncPeriod + *out = new(string) + **out = **in + } + if in.Scheduler != nil { + in, out := &in.Scheduler, &out.Scheduler + *out = new(string) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(string) + **out = **in + } + if in.TCPFinTimeout != nil { + in, out := &in.TCPFinTimeout, &out.TCPFinTimeout + *out = new(string) + **out = **in + } + if in.TCPTimeout != nil { + in, out := &in.TCPTimeout, &out.TCPTimeout + *out = new(string) + **out = **in + } + if in.UDPTimeout != nil { + in, out := &in.UDPTimeout, &out.UDPTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IpvsParameters. +func (in *IpvsParameters) DeepCopy() *IpvsParameters { + if in == nil { + return nil + } + out := new(IpvsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigAttributesInitParameters) DeepCopyInto(out *KubeconfigAttributesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigAttributesInitParameters. +func (in *KubeconfigAttributesInitParameters) DeepCopy() *KubeconfigAttributesInitParameters { + if in == nil { + return nil + } + out := new(KubeconfigAttributesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigAttributesObservation) DeepCopyInto(out *KubeconfigAttributesObservation) { + *out = *in + if in.ClientCertificate != nil { + in, out := &in.ClientCertificate, &out.ClientCertificate + *out = new(string) + **out = **in + } + if in.ClientKey != nil { + in, out := &in.ClientKey, &out.ClientKey + *out = new(string) + **out = **in + } + if in.ClusterCACertificate != nil { + in, out := &in.ClusterCACertificate, &out.ClusterCACertificate + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigAttributesObservation. +func (in *KubeconfigAttributesObservation) DeepCopy() *KubeconfigAttributesObservation { + if in == nil { + return nil + } + out := new(KubeconfigAttributesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeconfigAttributesParameters) DeepCopyInto(out *KubeconfigAttributesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigAttributesParameters. +func (in *KubeconfigAttributesParameters) DeepCopy() *KubeconfigAttributesParameters { + if in == nil { + return nil + } + out := new(KubeconfigAttributesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataInitParameters) DeepCopyInto(out *MetadataInitParameters) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataInitParameters. +func (in *MetadataInitParameters) DeepCopy() *MetadataInitParameters { + if in == nil { + return nil + } + out := new(MetadataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataObservation) DeepCopyInto(out *MetadataObservation) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataObservation. +func (in *MetadataObservation) DeepCopy() *MetadataObservation { + if in == nil { + return nil + } + out := new(MetadataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataParameters) DeepCopyInto(out *MetadataParameters) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataParameters. +func (in *MetadataParameters) DeepCopy() *MetadataParameters { + if in == nil { + return nil + } + out := new(MetadataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateNetworkConfigurationInitParameters) DeepCopyInto(out *PrivateNetworkConfigurationInitParameters) { + *out = *in + if in.DefaultVrackGateway != nil { + in, out := &in.DefaultVrackGateway, &out.DefaultVrackGateway + *out = new(string) + **out = **in + } + if in.PrivateNetworkRoutingAsDefault != nil { + in, out := &in.PrivateNetworkRoutingAsDefault, &out.PrivateNetworkRoutingAsDefault + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateNetworkConfigurationInitParameters. +func (in *PrivateNetworkConfigurationInitParameters) DeepCopy() *PrivateNetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(PrivateNetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateNetworkConfigurationObservation) DeepCopyInto(out *PrivateNetworkConfigurationObservation) { + *out = *in + if in.DefaultVrackGateway != nil { + in, out := &in.DefaultVrackGateway, &out.DefaultVrackGateway + *out = new(string) + **out = **in + } + if in.PrivateNetworkRoutingAsDefault != nil { + in, out := &in.PrivateNetworkRoutingAsDefault, &out.PrivateNetworkRoutingAsDefault + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateNetworkConfigurationObservation. +func (in *PrivateNetworkConfigurationObservation) DeepCopy() *PrivateNetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(PrivateNetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateNetworkConfigurationParameters) DeepCopyInto(out *PrivateNetworkConfigurationParameters) { + *out = *in + if in.DefaultVrackGateway != nil { + in, out := &in.DefaultVrackGateway, &out.DefaultVrackGateway + *out = new(string) + **out = **in + } + if in.PrivateNetworkRoutingAsDefault != nil { + in, out := &in.PrivateNetworkRoutingAsDefault, &out.PrivateNetworkRoutingAsDefault + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateNetworkConfigurationParameters. +func (in *PrivateNetworkConfigurationParameters) DeepCopy() *PrivateNetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(PrivateNetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKube) DeepCopyInto(out *ProjectKube) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKube. +func (in *ProjectKube) DeepCopy() *ProjectKube { + if in == nil { + return nil + } + out := new(ProjectKube) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectKube) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeInitParameters) DeepCopyInto(out *ProjectKubeInitParameters) { + *out = *in + if in.Customization != nil { + in, out := &in.Customization, &out.Customization + *out = make([]CustomizationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomizationApiserver != nil { + in, out := &in.CustomizationApiserver, &out.CustomizationApiserver + *out = make([]CustomizationApiserverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomizationKubeProxy != nil { + in, out := &in.CustomizationKubeProxy, &out.CustomizationKubeProxy + *out = make([]CustomizationKubeProxyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubeProxyMode != nil { + in, out := &in.KubeProxyMode, &out.KubeProxyMode + *out = new(string) + **out = **in + } + if in.PrivateNetworkConfiguration != nil { + in, out := &in.PrivateNetworkConfiguration, &out.PrivateNetworkConfiguration + *out = make([]PrivateNetworkConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateNetworkID != nil { + in, out := &in.PrivateNetworkID, &out.PrivateNetworkID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.UpdatePolicy != nil { + in, out := &in.UpdatePolicy, &out.UpdatePolicy + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeInitParameters. +func (in *ProjectKubeInitParameters) DeepCopy() *ProjectKubeInitParameters { + if in == nil { + return nil + } + out := new(ProjectKubeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeIprestrictions) DeepCopyInto(out *ProjectKubeIprestrictions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeIprestrictions. +func (in *ProjectKubeIprestrictions) DeepCopy() *ProjectKubeIprestrictions { + if in == nil { + return nil + } + out := new(ProjectKubeIprestrictions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectKubeIprestrictions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeIprestrictionsInitParameters) DeepCopyInto(out *ProjectKubeIprestrictionsInitParameters) { + *out = *in + if in.Ips != nil { + in, out := &in.Ips, &out.Ips + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KubeID != nil { + in, out := &in.KubeID, &out.KubeID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeIprestrictionsInitParameters. +func (in *ProjectKubeIprestrictionsInitParameters) DeepCopy() *ProjectKubeIprestrictionsInitParameters { + if in == nil { + return nil + } + out := new(ProjectKubeIprestrictionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeIprestrictionsList) DeepCopyInto(out *ProjectKubeIprestrictionsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectKubeIprestrictions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeIprestrictionsList. +func (in *ProjectKubeIprestrictionsList) DeepCopy() *ProjectKubeIprestrictionsList { + if in == nil { + return nil + } + out := new(ProjectKubeIprestrictionsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectKubeIprestrictionsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeIprestrictionsObservation) DeepCopyInto(out *ProjectKubeIprestrictionsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Ips != nil { + in, out := &in.Ips, &out.Ips + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KubeID != nil { + in, out := &in.KubeID, &out.KubeID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeIprestrictionsObservation. +func (in *ProjectKubeIprestrictionsObservation) DeepCopy() *ProjectKubeIprestrictionsObservation { + if in == nil { + return nil + } + out := new(ProjectKubeIprestrictionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeIprestrictionsParameters) DeepCopyInto(out *ProjectKubeIprestrictionsParameters) { + *out = *in + if in.Ips != nil { + in, out := &in.Ips, &out.Ips + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KubeID != nil { + in, out := &in.KubeID, &out.KubeID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeIprestrictionsParameters. +func (in *ProjectKubeIprestrictionsParameters) DeepCopy() *ProjectKubeIprestrictionsParameters { + if in == nil { + return nil + } + out := new(ProjectKubeIprestrictionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeIprestrictionsSpec) DeepCopyInto(out *ProjectKubeIprestrictionsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeIprestrictionsSpec. +func (in *ProjectKubeIprestrictionsSpec) DeepCopy() *ProjectKubeIprestrictionsSpec { + if in == nil { + return nil + } + out := new(ProjectKubeIprestrictionsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeIprestrictionsStatus) DeepCopyInto(out *ProjectKubeIprestrictionsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeIprestrictionsStatus. +func (in *ProjectKubeIprestrictionsStatus) DeepCopy() *ProjectKubeIprestrictionsStatus { + if in == nil { + return nil + } + out := new(ProjectKubeIprestrictionsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeList) DeepCopyInto(out *ProjectKubeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectKube, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeList. +func (in *ProjectKubeList) DeepCopy() *ProjectKubeList { + if in == nil { + return nil + } + out := new(ProjectKubeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectKubeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeNodepool) DeepCopyInto(out *ProjectKubeNodepool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeNodepool. +func (in *ProjectKubeNodepool) DeepCopy() *ProjectKubeNodepool { + if in == nil { + return nil + } + out := new(ProjectKubeNodepool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectKubeNodepool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeNodepoolInitParameters) DeepCopyInto(out *ProjectKubeNodepoolInitParameters) { + *out = *in + if in.AntiAffinity != nil { + in, out := &in.AntiAffinity, &out.AntiAffinity + *out = new(bool) + **out = **in + } + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(bool) + **out = **in + } + if in.DesiredNodes != nil { + in, out := &in.DesiredNodes, &out.DesiredNodes + *out = new(float64) + **out = **in + } + if in.FlavorName != nil { + in, out := &in.FlavorName, &out.FlavorName + *out = new(string) + **out = **in + } + if in.KubeID != nil { + in, out := &in.KubeID, &out.KubeID + *out = new(string) + **out = **in + } + if in.MaxNodes != nil { + in, out := &in.MaxNodes, &out.MaxNodes + *out = new(float64) + **out = **in + } + if in.MinNodes != nil { + in, out := &in.MinNodes, &out.MinNodes + *out = new(float64) + **out = **in + } + if in.MonthlyBilled != nil { + in, out := &in.MonthlyBilled, &out.MonthlyBilled + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = make([]TemplateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeNodepoolInitParameters. +func (in *ProjectKubeNodepoolInitParameters) DeepCopy() *ProjectKubeNodepoolInitParameters { + if in == nil { + return nil + } + out := new(ProjectKubeNodepoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeNodepoolList) DeepCopyInto(out *ProjectKubeNodepoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectKubeNodepool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeNodepoolList. +func (in *ProjectKubeNodepoolList) DeepCopy() *ProjectKubeNodepoolList { + if in == nil { + return nil + } + out := new(ProjectKubeNodepoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectKubeNodepoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeNodepoolObservation) DeepCopyInto(out *ProjectKubeNodepoolObservation) { + *out = *in + if in.AntiAffinity != nil { + in, out := &in.AntiAffinity, &out.AntiAffinity + *out = new(bool) + **out = **in + } + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(bool) + **out = **in + } + if in.AvailableNodes != nil { + in, out := &in.AvailableNodes, &out.AvailableNodes + *out = new(float64) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.CurrentNodes != nil { + in, out := &in.CurrentNodes, &out.CurrentNodes + *out = new(float64) + **out = **in + } + if in.DesiredNodes != nil { + in, out := &in.DesiredNodes, &out.DesiredNodes + *out = new(float64) + **out = **in + } + if in.Flavor != nil { + in, out := &in.Flavor, &out.Flavor + *out = new(string) + **out = **in + } + if in.FlavorName != nil { + in, out := &in.FlavorName, &out.FlavorName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KubeID != nil { + in, out := &in.KubeID, &out.KubeID + *out = new(string) + **out = **in + } + if in.MaxNodes != nil { + in, out := &in.MaxNodes, &out.MaxNodes + *out = new(float64) + **out = **in + } + if in.MinNodes != nil { + in, out := &in.MinNodes, &out.MinNodes + *out = new(float64) + **out = **in + } + if in.MonthlyBilled != nil { + in, out := &in.MonthlyBilled, &out.MonthlyBilled + *out = new(bool) + **out = **in + } + if in.ProjectID != nil { + in, out := &in.ProjectID, &out.ProjectID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SizeStatus != nil { + in, out := &in.SizeStatus, &out.SizeStatus + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = make([]TemplateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpToDateNodes != nil { + in, out := &in.UpToDateNodes, &out.UpToDateNodes + *out = new(float64) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeNodepoolObservation. +func (in *ProjectKubeNodepoolObservation) DeepCopy() *ProjectKubeNodepoolObservation { + if in == nil { + return nil + } + out := new(ProjectKubeNodepoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeNodepoolParameters) DeepCopyInto(out *ProjectKubeNodepoolParameters) { + *out = *in + if in.AntiAffinity != nil { + in, out := &in.AntiAffinity, &out.AntiAffinity + *out = new(bool) + **out = **in + } + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(bool) + **out = **in + } + if in.DesiredNodes != nil { + in, out := &in.DesiredNodes, &out.DesiredNodes + *out = new(float64) + **out = **in + } + if in.FlavorName != nil { + in, out := &in.FlavorName, &out.FlavorName + *out = new(string) + **out = **in + } + if in.KubeID != nil { + in, out := &in.KubeID, &out.KubeID + *out = new(string) + **out = **in + } + if in.MaxNodes != nil { + in, out := &in.MaxNodes, &out.MaxNodes + *out = new(float64) + **out = **in + } + if in.MinNodes != nil { + in, out := &in.MinNodes, &out.MinNodes + *out = new(float64) + **out = **in + } + if in.MonthlyBilled != nil { + in, out := &in.MonthlyBilled, &out.MonthlyBilled + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = make([]TemplateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeNodepoolParameters. +func (in *ProjectKubeNodepoolParameters) DeepCopy() *ProjectKubeNodepoolParameters { + if in == nil { + return nil + } + out := new(ProjectKubeNodepoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeNodepoolSpec) DeepCopyInto(out *ProjectKubeNodepoolSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeNodepoolSpec. +func (in *ProjectKubeNodepoolSpec) DeepCopy() *ProjectKubeNodepoolSpec { + if in == nil { + return nil + } + out := new(ProjectKubeNodepoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeNodepoolStatus) DeepCopyInto(out *ProjectKubeNodepoolStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeNodepoolStatus. +func (in *ProjectKubeNodepoolStatus) DeepCopy() *ProjectKubeNodepoolStatus { + if in == nil { + return nil + } + out := new(ProjectKubeNodepoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeObservation) DeepCopyInto(out *ProjectKubeObservation) { + *out = *in + if in.ControlPlaneIsUpToDate != nil { + in, out := &in.ControlPlaneIsUpToDate, &out.ControlPlaneIsUpToDate + *out = new(bool) + **out = **in + } + if in.Customization != nil { + in, out := &in.Customization, &out.Customization + *out = make([]CustomizationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomizationApiserver != nil { + in, out := &in.CustomizationApiserver, &out.CustomizationApiserver + *out = make([]CustomizationApiserverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomizationKubeProxy != nil { + in, out := &in.CustomizationKubeProxy, &out.CustomizationKubeProxy + *out = make([]CustomizationKubeProxyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IsUpToDate != nil { + in, out := &in.IsUpToDate, &out.IsUpToDate + *out = new(bool) + **out = **in + } + if in.KubeProxyMode != nil { + in, out := &in.KubeProxyMode, &out.KubeProxyMode + *out = new(string) + **out = **in + } + if in.NextUpgradeVersions != nil { + in, out := &in.NextUpgradeVersions, &out.NextUpgradeVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NodesURL != nil { + in, out := &in.NodesURL, &out.NodesURL + *out = new(string) + **out = **in + } + if in.PrivateNetworkConfiguration != nil { + in, out := &in.PrivateNetworkConfiguration, &out.PrivateNetworkConfiguration + *out = make([]PrivateNetworkConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateNetworkID != nil { + in, out := &in.PrivateNetworkID, &out.PrivateNetworkID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UpdatePolicy != nil { + in, out := &in.UpdatePolicy, &out.UpdatePolicy + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeObservation. +func (in *ProjectKubeObservation) DeepCopy() *ProjectKubeObservation { + if in == nil { + return nil + } + out := new(ProjectKubeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeOidc) DeepCopyInto(out *ProjectKubeOidc) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeOidc. +func (in *ProjectKubeOidc) DeepCopy() *ProjectKubeOidc { + if in == nil { + return nil + } + out := new(ProjectKubeOidc) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectKubeOidc) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeOidcInitParameters) DeepCopyInto(out *ProjectKubeOidcInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IssuerURL != nil { + in, out := &in.IssuerURL, &out.IssuerURL + *out = new(string) + **out = **in + } + if in.KubeID != nil { + in, out := &in.KubeID, &out.KubeID + *out = new(string) + **out = **in + } + if in.OidcCAContent != nil { + in, out := &in.OidcCAContent, &out.OidcCAContent + *out = new(string) + **out = **in + } + if in.OidcGroupsClaim != nil { + in, out := &in.OidcGroupsClaim, &out.OidcGroupsClaim + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OidcGroupsPrefix != nil { + in, out := &in.OidcGroupsPrefix, &out.OidcGroupsPrefix + *out = new(string) + **out = **in + } + if in.OidcRequiredClaim != nil { + in, out := &in.OidcRequiredClaim, &out.OidcRequiredClaim + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OidcSigningAlgs != nil { + in, out := &in.OidcSigningAlgs, &out.OidcSigningAlgs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OidcUsernameClaim != nil { + in, out := &in.OidcUsernameClaim, &out.OidcUsernameClaim + *out = new(string) + **out = **in + } + if in.OidcUsernamePrefix != nil { + in, out := &in.OidcUsernamePrefix, &out.OidcUsernamePrefix + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeOidcInitParameters. +func (in *ProjectKubeOidcInitParameters) DeepCopy() *ProjectKubeOidcInitParameters { + if in == nil { + return nil + } + out := new(ProjectKubeOidcInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeOidcList) DeepCopyInto(out *ProjectKubeOidcList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectKubeOidc, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeOidcList. +func (in *ProjectKubeOidcList) DeepCopy() *ProjectKubeOidcList { + if in == nil { + return nil + } + out := new(ProjectKubeOidcList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectKubeOidcList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeOidcObservation) DeepCopyInto(out *ProjectKubeOidcObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IssuerURL != nil { + in, out := &in.IssuerURL, &out.IssuerURL + *out = new(string) + **out = **in + } + if in.KubeID != nil { + in, out := &in.KubeID, &out.KubeID + *out = new(string) + **out = **in + } + if in.OidcCAContent != nil { + in, out := &in.OidcCAContent, &out.OidcCAContent + *out = new(string) + **out = **in + } + if in.OidcGroupsClaim != nil { + in, out := &in.OidcGroupsClaim, &out.OidcGroupsClaim + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OidcGroupsPrefix != nil { + in, out := &in.OidcGroupsPrefix, &out.OidcGroupsPrefix + *out = new(string) + **out = **in + } + if in.OidcRequiredClaim != nil { + in, out := &in.OidcRequiredClaim, &out.OidcRequiredClaim + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OidcSigningAlgs != nil { + in, out := &in.OidcSigningAlgs, &out.OidcSigningAlgs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OidcUsernameClaim != nil { + in, out := &in.OidcUsernameClaim, &out.OidcUsernameClaim + *out = new(string) + **out = **in + } + if in.OidcUsernamePrefix != nil { + in, out := &in.OidcUsernamePrefix, &out.OidcUsernamePrefix + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeOidcObservation. +func (in *ProjectKubeOidcObservation) DeepCopy() *ProjectKubeOidcObservation { + if in == nil { + return nil + } + out := new(ProjectKubeOidcObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeOidcParameters) DeepCopyInto(out *ProjectKubeOidcParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IssuerURL != nil { + in, out := &in.IssuerURL, &out.IssuerURL + *out = new(string) + **out = **in + } + if in.KubeID != nil { + in, out := &in.KubeID, &out.KubeID + *out = new(string) + **out = **in + } + if in.OidcCAContent != nil { + in, out := &in.OidcCAContent, &out.OidcCAContent + *out = new(string) + **out = **in + } + if in.OidcGroupsClaim != nil { + in, out := &in.OidcGroupsClaim, &out.OidcGroupsClaim + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OidcGroupsPrefix != nil { + in, out := &in.OidcGroupsPrefix, &out.OidcGroupsPrefix + *out = new(string) + **out = **in + } + if in.OidcRequiredClaim != nil { + in, out := &in.OidcRequiredClaim, &out.OidcRequiredClaim + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OidcSigningAlgs != nil { + in, out := &in.OidcSigningAlgs, &out.OidcSigningAlgs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OidcUsernameClaim != nil { + in, out := &in.OidcUsernameClaim, &out.OidcUsernameClaim + *out = new(string) + **out = **in + } + if in.OidcUsernamePrefix != nil { + in, out := &in.OidcUsernamePrefix, &out.OidcUsernamePrefix + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeOidcParameters. +func (in *ProjectKubeOidcParameters) DeepCopy() *ProjectKubeOidcParameters { + if in == nil { + return nil + } + out := new(ProjectKubeOidcParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeOidcSpec) DeepCopyInto(out *ProjectKubeOidcSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeOidcSpec. +func (in *ProjectKubeOidcSpec) DeepCopy() *ProjectKubeOidcSpec { + if in == nil { + return nil + } + out := new(ProjectKubeOidcSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeOidcStatus) DeepCopyInto(out *ProjectKubeOidcStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeOidcStatus. +func (in *ProjectKubeOidcStatus) DeepCopy() *ProjectKubeOidcStatus { + if in == nil { + return nil + } + out := new(ProjectKubeOidcStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeParameters) DeepCopyInto(out *ProjectKubeParameters) { + *out = *in + if in.Customization != nil { + in, out := &in.Customization, &out.Customization + *out = make([]CustomizationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomizationApiserver != nil { + in, out := &in.CustomizationApiserver, &out.CustomizationApiserver + *out = make([]CustomizationApiserverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomizationKubeProxy != nil { + in, out := &in.CustomizationKubeProxy, &out.CustomizationKubeProxy + *out = make([]CustomizationKubeProxyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubeProxyMode != nil { + in, out := &in.KubeProxyMode, &out.KubeProxyMode + *out = new(string) + **out = **in + } + if in.PrivateNetworkConfiguration != nil { + in, out := &in.PrivateNetworkConfiguration, &out.PrivateNetworkConfiguration + *out = make([]PrivateNetworkConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateNetworkID != nil { + in, out := &in.PrivateNetworkID, &out.PrivateNetworkID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.UpdatePolicy != nil { + in, out := &in.UpdatePolicy, &out.UpdatePolicy + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeParameters. +func (in *ProjectKubeParameters) DeepCopy() *ProjectKubeParameters { + if in == nil { + return nil + } + out := new(ProjectKubeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeSpec) DeepCopyInto(out *ProjectKubeSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeSpec. +func (in *ProjectKubeSpec) DeepCopy() *ProjectKubeSpec { + if in == nil { + return nil + } + out := new(ProjectKubeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectKubeStatus) DeepCopyInto(out *ProjectKubeStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectKubeStatus. +func (in *ProjectKubeStatus) DeepCopy() *ProjectKubeStatus { + if in == nil { + return nil + } + out := new(ProjectKubeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecInitParameters) DeepCopyInto(out *SpecInitParameters) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]map[string]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + } + } + if in.Unschedulable != nil { + in, out := &in.Unschedulable, &out.Unschedulable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecInitParameters. +func (in *SpecInitParameters) DeepCopy() *SpecInitParameters { + if in == nil { + return nil + } + out := new(SpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecObservation) DeepCopyInto(out *SpecObservation) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]map[string]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + } + } + if in.Unschedulable != nil { + in, out := &in.Unschedulable, &out.Unschedulable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecObservation. +func (in *SpecObservation) DeepCopy() *SpecObservation { + if in == nil { + return nil + } + out := new(SpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecParameters) DeepCopyInto(out *SpecParameters) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]map[string]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + } + } + if in.Unschedulable != nil { + in, out := &in.Unschedulable, &out.Unschedulable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecParameters. +func (in *SpecParameters) DeepCopy() *SpecParameters { + if in == nil { + return nil + } + out := new(SpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInitParameters) DeepCopyInto(out *TemplateInitParameters) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]MetadataInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = make([]SpecInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInitParameters. +func (in *TemplateInitParameters) DeepCopy() *TemplateInitParameters { + if in == nil { + return nil + } + out := new(TemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateObservation) DeepCopyInto(out *TemplateObservation) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]MetadataObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = make([]SpecObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateObservation. +func (in *TemplateObservation) DeepCopy() *TemplateObservation { + if in == nil { + return nil + } + out := new(TemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameters) DeepCopyInto(out *TemplateParameters) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]MetadataParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = make([]SpecParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameters. +func (in *TemplateParameters) DeepCopy() *TemplateParameters { + if in == nil { + return nil + } + out := new(TemplateParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kube/v1alpha1/zz_generated.managed.go b/apis/kube/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..1a95010 --- /dev/null +++ b/apis/kube/v1alpha1/zz_generated.managed.go @@ -0,0 +1,248 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ProjectKube. +func (mg *ProjectKube) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectKube. +func (mg *ProjectKube) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectKube. +func (mg *ProjectKube) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectKube. +func (mg *ProjectKube) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectKube. +func (mg *ProjectKube) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectKube. +func (mg *ProjectKube) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectKube. +func (mg *ProjectKube) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectKube. +func (mg *ProjectKube) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectKube. +func (mg *ProjectKube) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectKube. +func (mg *ProjectKube) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectKube. +func (mg *ProjectKube) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectKube. +func (mg *ProjectKube) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectKubeIprestrictions. +func (mg *ProjectKubeIprestrictions) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectKubeNodepool. +func (mg *ProjectKubeNodepool) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProjectKubeOidc. +func (mg *ProjectKubeOidc) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kube/v1alpha1/zz_generated.managedlist.go b/apis/kube/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..a624898 --- /dev/null +++ b/apis/kube/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ProjectKubeIprestrictionsList. +func (l *ProjectKubeIprestrictionsList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectKubeList. +func (l *ProjectKubeList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectKubeNodepoolList. +func (l *ProjectKubeNodepoolList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectKubeOidcList. +func (l *ProjectKubeOidcList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kube/v1alpha1/zz_generated_terraformed.go b/apis/kube/v1alpha1/zz_generated_terraformed.go new file mode 100755 index 0000000..9374202 --- /dev/null +++ b/apis/kube/v1alpha1/zz_generated_terraformed.go @@ -0,0 +1,354 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ProjectKube +func (mg *ProjectKube) GetTerraformResourceType() string { + return "ovh_cloud_project_kube" +} + +// GetConnectionDetailsMapping for this ProjectKube +func (tr *ProjectKube) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"kubeconfig": "status.atProvider.kubeconfig", "kubeconfig_attributes[*]": "status.atProvider.kubeconfigAttributes[*]"} +} + +// GetObservation of this ProjectKube +func (tr *ProjectKube) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectKube +func (tr *ProjectKube) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectKube +func (tr *ProjectKube) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectKube +func (tr *ProjectKube) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectKube +func (tr *ProjectKube) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectKube +func (tr *ProjectKube) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectKube using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectKube) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectKubeParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectKube) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectKubeIprestrictions +func (mg *ProjectKubeIprestrictions) GetTerraformResourceType() string { + return "ovh_cloud_project_kube_iprestrictions" +} + +// GetConnectionDetailsMapping for this ProjectKubeIprestrictions +func (tr *ProjectKubeIprestrictions) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectKubeIprestrictions +func (tr *ProjectKubeIprestrictions) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectKubeIprestrictions +func (tr *ProjectKubeIprestrictions) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectKubeIprestrictions +func (tr *ProjectKubeIprestrictions) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectKubeIprestrictions +func (tr *ProjectKubeIprestrictions) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectKubeIprestrictions +func (tr *ProjectKubeIprestrictions) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectKubeIprestrictions +func (tr *ProjectKubeIprestrictions) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectKubeIprestrictions using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectKubeIprestrictions) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectKubeIprestrictionsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectKubeIprestrictions) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectKubeNodepool +func (mg *ProjectKubeNodepool) GetTerraformResourceType() string { + return "ovh_cloud_project_kube_nodepool" +} + +// GetConnectionDetailsMapping for this ProjectKubeNodepool +func (tr *ProjectKubeNodepool) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectKubeNodepool +func (tr *ProjectKubeNodepool) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectKubeNodepool +func (tr *ProjectKubeNodepool) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectKubeNodepool +func (tr *ProjectKubeNodepool) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectKubeNodepool +func (tr *ProjectKubeNodepool) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectKubeNodepool +func (tr *ProjectKubeNodepool) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectKubeNodepool +func (tr *ProjectKubeNodepool) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectKubeNodepool using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectKubeNodepool) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectKubeNodepoolParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectKubeNodepool) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this ProjectKubeOidc +func (mg *ProjectKubeOidc) GetTerraformResourceType() string { + return "ovh_cloud_project_kube_oidc" +} + +// GetConnectionDetailsMapping for this ProjectKubeOidc +func (tr *ProjectKubeOidc) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProjectKubeOidc +func (tr *ProjectKubeOidc) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProjectKubeOidc +func (tr *ProjectKubeOidc) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProjectKubeOidc +func (tr *ProjectKubeOidc) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProjectKubeOidc +func (tr *ProjectKubeOidc) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProjectKubeOidc +func (tr *ProjectKubeOidc) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProjectKubeOidc +func (tr *ProjectKubeOidc) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this ProjectKubeOidc using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProjectKubeOidc) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectKubeOidcParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProjectKubeOidc) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kube/v1alpha1/zz_groupversion_info.go b/apis/kube/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..101b7ef --- /dev/null +++ b/apis/kube/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kube.ovh.edixos.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kube.ovh.edixos.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/kube/v1alpha1/zz_projectkube_types.go b/apis/kube/v1alpha1/zz_projectkube_types.go new file mode 100755 index 0000000..b5b5c00 --- /dev/null +++ b/apis/kube/v1alpha1/zz_projectkube_types.go @@ -0,0 +1,391 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdmissionpluginsInitParameters struct { + Disabled []*string `json:"disabled,omitempty" tf:"disabled,omitempty"` + + Enabled []*string `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AdmissionpluginsObservation struct { + Disabled []*string `json:"disabled,omitempty" tf:"disabled,omitempty"` + + Enabled []*string `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AdmissionpluginsParameters struct { + + // +kubebuilder:validation:Optional + Disabled []*string `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // +kubebuilder:validation:Optional + Enabled []*string `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ApiserverInitParameters struct { + Admissionplugins []AdmissionpluginsInitParameters `json:"admissionplugins,omitempty" tf:"admissionplugins,omitempty"` +} + +type ApiserverObservation struct { + Admissionplugins []AdmissionpluginsObservation `json:"admissionplugins,omitempty" tf:"admissionplugins,omitempty"` +} + +type ApiserverParameters struct { + + // +kubebuilder:validation:Optional + Admissionplugins []AdmissionpluginsParameters `json:"admissionplugins,omitempty" tf:"admissionplugins,omitempty"` +} + +type CustomizationApiserverAdmissionpluginsInitParameters struct { + Disabled []*string `json:"disabled,omitempty" tf:"disabled,omitempty"` + + Enabled []*string `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type CustomizationApiserverAdmissionpluginsObservation struct { + Disabled []*string `json:"disabled,omitempty" tf:"disabled,omitempty"` + + Enabled []*string `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type CustomizationApiserverAdmissionpluginsParameters struct { + + // +kubebuilder:validation:Optional + Disabled []*string `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // +kubebuilder:validation:Optional + Enabled []*string `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type CustomizationApiserverInitParameters struct { + Admissionplugins []CustomizationApiserverAdmissionpluginsInitParameters `json:"admissionplugins,omitempty" tf:"admissionplugins,omitempty"` +} + +type CustomizationApiserverObservation struct { + Admissionplugins []CustomizationApiserverAdmissionpluginsObservation `json:"admissionplugins,omitempty" tf:"admissionplugins,omitempty"` +} + +type CustomizationApiserverParameters struct { + + // +kubebuilder:validation:Optional + Admissionplugins []CustomizationApiserverAdmissionpluginsParameters `json:"admissionplugins,omitempty" tf:"admissionplugins,omitempty"` +} + +type CustomizationInitParameters struct { + Apiserver []ApiserverInitParameters `json:"apiserver,omitempty" tf:"apiserver,omitempty"` +} + +type CustomizationKubeProxyInitParameters struct { + Iptables []IptablesInitParameters `json:"iptables,omitempty" tf:"iptables,omitempty"` + + Ipvs []IpvsInitParameters `json:"ipvs,omitempty" tf:"ipvs,omitempty"` +} + +type CustomizationKubeProxyObservation struct { + Iptables []IptablesObservation `json:"iptables,omitempty" tf:"iptables,omitempty"` + + Ipvs []IpvsObservation `json:"ipvs,omitempty" tf:"ipvs,omitempty"` +} + +type CustomizationKubeProxyParameters struct { + + // +kubebuilder:validation:Optional + Iptables []IptablesParameters `json:"iptables,omitempty" tf:"iptables,omitempty"` + + // +kubebuilder:validation:Optional + Ipvs []IpvsParameters `json:"ipvs,omitempty" tf:"ipvs,omitempty"` +} + +type CustomizationObservation struct { + Apiserver []ApiserverObservation `json:"apiserver,omitempty" tf:"apiserver,omitempty"` +} + +type CustomizationParameters struct { + + // +kubebuilder:validation:Optional + Apiserver []ApiserverParameters `json:"apiserver,omitempty" tf:"apiserver,omitempty"` +} + +type IptablesInitParameters struct { + MinSyncPeriod *string `json:"minSyncPeriod,omitempty" tf:"min_sync_period,omitempty"` + + SyncPeriod *string `json:"syncPeriod,omitempty" tf:"sync_period,omitempty"` +} + +type IptablesObservation struct { + MinSyncPeriod *string `json:"minSyncPeriod,omitempty" tf:"min_sync_period,omitempty"` + + SyncPeriod *string `json:"syncPeriod,omitempty" tf:"sync_period,omitempty"` +} + +type IptablesParameters struct { + + // +kubebuilder:validation:Optional + MinSyncPeriod *string `json:"minSyncPeriod,omitempty" tf:"min_sync_period,omitempty"` + + // +kubebuilder:validation:Optional + SyncPeriod *string `json:"syncPeriod,omitempty" tf:"sync_period,omitempty"` +} + +type IpvsInitParameters struct { + MinSyncPeriod *string `json:"minSyncPeriod,omitempty" tf:"min_sync_period,omitempty"` + + Scheduler *string `json:"scheduler,omitempty" tf:"scheduler,omitempty"` + + SyncPeriod *string `json:"syncPeriod,omitempty" tf:"sync_period,omitempty"` + + TCPFinTimeout *string `json:"tcpFinTimeout,omitempty" tf:"tcp_fin_timeout,omitempty"` + + TCPTimeout *string `json:"tcpTimeout,omitempty" tf:"tcp_timeout,omitempty"` + + UDPTimeout *string `json:"udpTimeout,omitempty" tf:"udp_timeout,omitempty"` +} + +type IpvsObservation struct { + MinSyncPeriod *string `json:"minSyncPeriod,omitempty" tf:"min_sync_period,omitempty"` + + Scheduler *string `json:"scheduler,omitempty" tf:"scheduler,omitempty"` + + SyncPeriod *string `json:"syncPeriod,omitempty" tf:"sync_period,omitempty"` + + TCPFinTimeout *string `json:"tcpFinTimeout,omitempty" tf:"tcp_fin_timeout,omitempty"` + + TCPTimeout *string `json:"tcpTimeout,omitempty" tf:"tcp_timeout,omitempty"` + + UDPTimeout *string `json:"udpTimeout,omitempty" tf:"udp_timeout,omitempty"` +} + +type IpvsParameters struct { + + // +kubebuilder:validation:Optional + MinSyncPeriod *string `json:"minSyncPeriod,omitempty" tf:"min_sync_period,omitempty"` + + // +kubebuilder:validation:Optional + Scheduler *string `json:"scheduler,omitempty" tf:"scheduler,omitempty"` + + // +kubebuilder:validation:Optional + SyncPeriod *string `json:"syncPeriod,omitempty" tf:"sync_period,omitempty"` + + // +kubebuilder:validation:Optional + TCPFinTimeout *string `json:"tcpFinTimeout,omitempty" tf:"tcp_fin_timeout,omitempty"` + + // +kubebuilder:validation:Optional + TCPTimeout *string `json:"tcpTimeout,omitempty" tf:"tcp_timeout,omitempty"` + + // +kubebuilder:validation:Optional + UDPTimeout *string `json:"udpTimeout,omitempty" tf:"udp_timeout,omitempty"` +} + +type KubeconfigAttributesInitParameters struct { +} + +type KubeconfigAttributesObservation struct { + ClientCertificate *string `json:"clientCertificate,omitempty" tf:"client_certificate,omitempty"` + + ClientKey *string `json:"clientKey,omitempty" tf:"client_key,omitempty"` + + ClusterCACertificate *string `json:"clusterCaCertificate,omitempty" tf:"cluster_ca_certificate,omitempty"` + + Host *string `json:"host,omitempty" tf:"host,omitempty"` +} + +type KubeconfigAttributesParameters struct { +} + +type PrivateNetworkConfigurationInitParameters struct { + + // If defined, all egress traffic will be routed towards this IP address, which should belong to the private network. Empty string means disabled. + DefaultVrackGateway *string `json:"defaultVrackGateway,omitempty" tf:"default_vrack_gateway,omitempty"` + + // Defines whether routing should default to using the nodes' private interface, instead of their public interface. Default is false. + PrivateNetworkRoutingAsDefault *bool `json:"privateNetworkRoutingAsDefault,omitempty" tf:"private_network_routing_as_default,omitempty"` +} + +type PrivateNetworkConfigurationObservation struct { + + // If defined, all egress traffic will be routed towards this IP address, which should belong to the private network. Empty string means disabled. + DefaultVrackGateway *string `json:"defaultVrackGateway,omitempty" tf:"default_vrack_gateway,omitempty"` + + // Defines whether routing should default to using the nodes' private interface, instead of their public interface. Default is false. + PrivateNetworkRoutingAsDefault *bool `json:"privateNetworkRoutingAsDefault,omitempty" tf:"private_network_routing_as_default,omitempty"` +} + +type PrivateNetworkConfigurationParameters struct { + + // If defined, all egress traffic will be routed towards this IP address, which should belong to the private network. Empty string means disabled. + // +kubebuilder:validation:Optional + DefaultVrackGateway *string `json:"defaultVrackGateway" tf:"default_vrack_gateway,omitempty"` + + // Defines whether routing should default to using the nodes' private interface, instead of their public interface. Default is false. + // +kubebuilder:validation:Optional + PrivateNetworkRoutingAsDefault *bool `json:"privateNetworkRoutingAsDefault" tf:"private_network_routing_as_default,omitempty"` +} + +type ProjectKubeInitParameters struct { + Customization []CustomizationInitParameters `json:"customization,omitempty" tf:"customization,omitempty"` + + CustomizationApiserver []CustomizationApiserverInitParameters `json:"customizationApiserver,omitempty" tf:"customization_apiserver,omitempty"` + + CustomizationKubeProxy []CustomizationKubeProxyInitParameters `json:"customizationKubeProxy,omitempty" tf:"customization_kube_proxy,omitempty"` + + KubeProxyMode *string `json:"kubeProxyMode,omitempty" tf:"kube_proxy_mode,omitempty"` + + PrivateNetworkConfiguration []PrivateNetworkConfigurationInitParameters `json:"privateNetworkConfiguration,omitempty" tf:"private_network_configuration,omitempty"` + + PrivateNetworkID *string `json:"privateNetworkId,omitempty" tf:"private_network_id,omitempty"` + + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + UpdatePolicy *string `json:"updatePolicy,omitempty" tf:"update_policy,omitempty"` + + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ProjectKubeObservation struct { + ControlPlaneIsUpToDate *bool `json:"controlPlaneIsUpToDate,omitempty" tf:"control_plane_is_up_to_date,omitempty"` + + Customization []CustomizationObservation `json:"customization,omitempty" tf:"customization,omitempty"` + + CustomizationApiserver []CustomizationApiserverObservation `json:"customizationApiserver,omitempty" tf:"customization_apiserver,omitempty"` + + CustomizationKubeProxy []CustomizationKubeProxyObservation `json:"customizationKubeProxy,omitempty" tf:"customization_kube_proxy,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + IsUpToDate *bool `json:"isUpToDate,omitempty" tf:"is_up_to_date,omitempty"` + + KubeProxyMode *string `json:"kubeProxyMode,omitempty" tf:"kube_proxy_mode,omitempty"` + + NextUpgradeVersions []*string `json:"nextUpgradeVersions,omitempty" tf:"next_upgrade_versions,omitempty"` + + NodesURL *string `json:"nodesUrl,omitempty" tf:"nodes_url,omitempty"` + + PrivateNetworkConfiguration []PrivateNetworkConfigurationObservation `json:"privateNetworkConfiguration,omitempty" tf:"private_network_configuration,omitempty"` + + PrivateNetworkID *string `json:"privateNetworkId,omitempty" tf:"private_network_id,omitempty"` + + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + UpdatePolicy *string `json:"updatePolicy,omitempty" tf:"update_policy,omitempty"` + + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ProjectKubeParameters struct { + + // +kubebuilder:validation:Optional + Customization []CustomizationParameters `json:"customization,omitempty" tf:"customization,omitempty"` + + // +kubebuilder:validation:Optional + CustomizationApiserver []CustomizationApiserverParameters `json:"customizationApiserver,omitempty" tf:"customization_apiserver,omitempty"` + + // +kubebuilder:validation:Optional + CustomizationKubeProxy []CustomizationKubeProxyParameters `json:"customizationKubeProxy,omitempty" tf:"customization_kube_proxy,omitempty"` + + // +kubebuilder:validation:Optional + KubeProxyMode *string `json:"kubeProxyMode,omitempty" tf:"kube_proxy_mode,omitempty"` + + // +kubebuilder:validation:Optional + PrivateNetworkConfiguration []PrivateNetworkConfigurationParameters `json:"privateNetworkConfiguration,omitempty" tf:"private_network_configuration,omitempty"` + + // +kubebuilder:validation:Optional + PrivateNetworkID *string `json:"privateNetworkId,omitempty" tf:"private_network_id,omitempty"` + + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // +kubebuilder:validation:Optional + UpdatePolicy *string `json:"updatePolicy,omitempty" tf:"update_policy,omitempty"` + + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +// ProjectKubeSpec defines the desired state of ProjectKube +type ProjectKubeSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectKubeParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectKubeInitParameters `json:"initProvider,omitempty"` +} + +// ProjectKubeStatus defines the observed state of ProjectKube. +type ProjectKubeStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectKubeObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectKube is the Schema for the ProjectKubes API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectKube struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.region) || (has(self.initProvider) && has(self.initProvider.region))",message="spec.forProvider.region is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectKubeSpec `json:"spec"` + Status ProjectKubeStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectKubeList contains a list of ProjectKubes +type ProjectKubeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectKube `json:"items"` +} + +// Repository type metadata. +var ( + ProjectKube_Kind = "ProjectKube" + ProjectKube_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectKube_Kind}.String() + ProjectKube_KindAPIVersion = ProjectKube_Kind + "." + CRDGroupVersion.String() + ProjectKube_GroupVersionKind = CRDGroupVersion.WithKind(ProjectKube_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectKube{}, &ProjectKubeList{}) +} diff --git a/apis/kube/v1alpha1/zz_projectkubeiprestrictions_types.go b/apis/kube/v1alpha1/zz_projectkubeiprestrictions_types.go new file mode 100755 index 0000000..71ad610 --- /dev/null +++ b/apis/kube/v1alpha1/zz_projectkubeiprestrictions_types.go @@ -0,0 +1,121 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectKubeIprestrictionsInitParameters struct { + + // List of IP restrictions for the cluster + Ips []*string `json:"ips,omitempty" tf:"ips,omitempty"` + + // Kube ID + KubeID *string `json:"kubeId,omitempty" tf:"kube_id,omitempty"` + + // Service name + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectKubeIprestrictionsObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of IP restrictions for the cluster + Ips []*string `json:"ips,omitempty" tf:"ips,omitempty"` + + // Kube ID + KubeID *string `json:"kubeId,omitempty" tf:"kube_id,omitempty"` + + // Service name + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectKubeIprestrictionsParameters struct { + + // List of IP restrictions for the cluster + // +kubebuilder:validation:Optional + Ips []*string `json:"ips,omitempty" tf:"ips,omitempty"` + + // Kube ID + // +kubebuilder:validation:Optional + KubeID *string `json:"kubeId,omitempty" tf:"kube_id,omitempty"` + + // Service name + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectKubeIprestrictionsSpec defines the desired state of ProjectKubeIprestrictions +type ProjectKubeIprestrictionsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectKubeIprestrictionsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectKubeIprestrictionsInitParameters `json:"initProvider,omitempty"` +} + +// ProjectKubeIprestrictionsStatus defines the observed state of ProjectKubeIprestrictions. +type ProjectKubeIprestrictionsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectKubeIprestrictionsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectKubeIprestrictions is the Schema for the ProjectKubeIprestrictionss API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectKubeIprestrictions struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ips) || (has(self.initProvider) && has(self.initProvider.ips))",message="spec.forProvider.ips is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kubeId) || (has(self.initProvider) && has(self.initProvider.kubeId))",message="spec.forProvider.kubeId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectKubeIprestrictionsSpec `json:"spec"` + Status ProjectKubeIprestrictionsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectKubeIprestrictionsList contains a list of ProjectKubeIprestrictionss +type ProjectKubeIprestrictionsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectKubeIprestrictions `json:"items"` +} + +// Repository type metadata. +var ( + ProjectKubeIprestrictions_Kind = "ProjectKubeIprestrictions" + ProjectKubeIprestrictions_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectKubeIprestrictions_Kind}.String() + ProjectKubeIprestrictions_KindAPIVersion = ProjectKubeIprestrictions_Kind + "." + CRDGroupVersion.String() + ProjectKubeIprestrictions_GroupVersionKind = CRDGroupVersion.WithKind(ProjectKubeIprestrictions_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectKubeIprestrictions{}, &ProjectKubeIprestrictionsList{}) +} diff --git a/apis/kube/v1alpha1/zz_projectkubenodepool_types.go b/apis/kube/v1alpha1/zz_projectkubenodepool_types.go new file mode 100755 index 0000000..ddf83b0 --- /dev/null +++ b/apis/kube/v1alpha1/zz_projectkubenodepool_types.go @@ -0,0 +1,316 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MetadataInitParameters struct { + + // annotations + Annotations map[string]*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // finalizers + Finalizers []*string `json:"finalizers,omitempty" tf:"finalizers,omitempty"` + + // labels + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type MetadataObservation struct { + + // annotations + Annotations map[string]*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // finalizers + Finalizers []*string `json:"finalizers,omitempty" tf:"finalizers,omitempty"` + + // labels + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type MetadataParameters struct { + + // annotations + // +kubebuilder:validation:Optional + Annotations map[string]*string `json:"annotations" tf:"annotations,omitempty"` + + // finalizers + // +kubebuilder:validation:Optional + Finalizers []*string `json:"finalizers" tf:"finalizers,omitempty"` + + // labels + // +kubebuilder:validation:Optional + Labels map[string]*string `json:"labels" tf:"labels,omitempty"` +} + +type ProjectKubeNodepoolInitParameters struct { + + // Enable anti affinity groups for nodes in the pool + AntiAffinity *bool `json:"antiAffinity,omitempty" tf:"anti_affinity,omitempty"` + + // Enable auto-scaling for the pool + Autoscale *bool `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // Number of nodes you desire in the pool + DesiredNodes *float64 `json:"desiredNodes,omitempty" tf:"desired_nodes,omitempty"` + + // Flavor name + FlavorName *string `json:"flavorName,omitempty" tf:"flavor_name,omitempty"` + + // Kube ID + KubeID *string `json:"kubeId,omitempty" tf:"kube_id,omitempty"` + + // Number of nodes you desire in the pool + MaxNodes *float64 `json:"maxNodes,omitempty" tf:"max_nodes,omitempty"` + + // Number of nodes you desire in the pool + MinNodes *float64 `json:"minNodes,omitempty" tf:"min_nodes,omitempty"` + + // Enable monthly billing on all nodes in the pool + MonthlyBilled *bool `json:"monthlyBilled,omitempty" tf:"monthly_billed,omitempty"` + + // Service name + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Node pool template + Template []TemplateInitParameters `json:"template,omitempty" tf:"template,omitempty"` +} + +type ProjectKubeNodepoolObservation struct { + + // Enable anti affinity groups for nodes in the pool + AntiAffinity *bool `json:"antiAffinity,omitempty" tf:"anti_affinity,omitempty"` + + // Enable auto-scaling for the pool + Autoscale *bool `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // Number of nodes which are actually ready in the pool + AvailableNodes *float64 `json:"availableNodes,omitempty" tf:"available_nodes,omitempty"` + + // Creation date + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Number of nodes present in the pool + CurrentNodes *float64 `json:"currentNodes,omitempty" tf:"current_nodes,omitempty"` + + // Number of nodes you desire in the pool + DesiredNodes *float64 `json:"desiredNodes,omitempty" tf:"desired_nodes,omitempty"` + + // Flavor name + Flavor *string `json:"flavor,omitempty" tf:"flavor,omitempty"` + + // Flavor name + FlavorName *string `json:"flavorName,omitempty" tf:"flavor_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Kube ID + KubeID *string `json:"kubeId,omitempty" tf:"kube_id,omitempty"` + + // Number of nodes you desire in the pool + MaxNodes *float64 `json:"maxNodes,omitempty" tf:"max_nodes,omitempty"` + + // Number of nodes you desire in the pool + MinNodes *float64 `json:"minNodes,omitempty" tf:"min_nodes,omitempty"` + + // Enable monthly billing on all nodes in the pool + MonthlyBilled *bool `json:"monthlyBilled,omitempty" tf:"monthly_billed,omitempty"` + + // Project id + ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` + + // Service name + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Status describing the state between number of nodes wanted and available ones + SizeStatus *string `json:"sizeStatus,omitempty" tf:"size_status,omitempty"` + + // Current status + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Node pool template + Template []TemplateObservation `json:"template,omitempty" tf:"template,omitempty"` + + // Number of nodes with latest version installed in the pool + UpToDateNodes *float64 `json:"upToDateNodes,omitempty" tf:"up_to_date_nodes,omitempty"` + + // Last update date + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type ProjectKubeNodepoolParameters struct { + + // Enable anti affinity groups for nodes in the pool + // +kubebuilder:validation:Optional + AntiAffinity *bool `json:"antiAffinity,omitempty" tf:"anti_affinity,omitempty"` + + // Enable auto-scaling for the pool + // +kubebuilder:validation:Optional + Autoscale *bool `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // Number of nodes you desire in the pool + // +kubebuilder:validation:Optional + DesiredNodes *float64 `json:"desiredNodes,omitempty" tf:"desired_nodes,omitempty"` + + // Flavor name + // +kubebuilder:validation:Optional + FlavorName *string `json:"flavorName,omitempty" tf:"flavor_name,omitempty"` + + // Kube ID + // +kubebuilder:validation:Optional + KubeID *string `json:"kubeId,omitempty" tf:"kube_id,omitempty"` + + // Number of nodes you desire in the pool + // +kubebuilder:validation:Optional + MaxNodes *float64 `json:"maxNodes,omitempty" tf:"max_nodes,omitempty"` + + // Number of nodes you desire in the pool + // +kubebuilder:validation:Optional + MinNodes *float64 `json:"minNodes,omitempty" tf:"min_nodes,omitempty"` + + // Enable monthly billing on all nodes in the pool + // +kubebuilder:validation:Optional + MonthlyBilled *bool `json:"monthlyBilled,omitempty" tf:"monthly_billed,omitempty"` + + // Service name + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Node pool template + // +kubebuilder:validation:Optional + Template []TemplateParameters `json:"template,omitempty" tf:"template,omitempty"` +} + +type SpecInitParameters struct { + + // taints + Taints []map[string]*string `json:"taints,omitempty" tf:"taints,omitempty"` + + // unschedulable + Unschedulable *bool `json:"unschedulable,omitempty" tf:"unschedulable,omitempty"` +} + +type SpecObservation struct { + + // taints + Taints []map[string]*string `json:"taints,omitempty" tf:"taints,omitempty"` + + // unschedulable + Unschedulable *bool `json:"unschedulable,omitempty" tf:"unschedulable,omitempty"` +} + +type SpecParameters struct { + + // taints + // +kubebuilder:validation:Optional + Taints []map[string]*string `json:"taints" tf:"taints,omitempty"` + + // unschedulable + // +kubebuilder:validation:Optional + Unschedulable *bool `json:"unschedulable" tf:"unschedulable,omitempty"` +} + +type TemplateInitParameters struct { + + // metadata + Metadata []MetadataInitParameters `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // spec + Spec []SpecInitParameters `json:"spec,omitempty" tf:"spec,omitempty"` +} + +type TemplateObservation struct { + + // metadata + Metadata []MetadataObservation `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // spec + Spec []SpecObservation `json:"spec,omitempty" tf:"spec,omitempty"` +} + +type TemplateParameters struct { + + // metadata + // +kubebuilder:validation:Optional + Metadata []MetadataParameters `json:"metadata" tf:"metadata,omitempty"` + + // spec + // +kubebuilder:validation:Optional + Spec []SpecParameters `json:"spec" tf:"spec,omitempty"` +} + +// ProjectKubeNodepoolSpec defines the desired state of ProjectKubeNodepool +type ProjectKubeNodepoolSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectKubeNodepoolParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectKubeNodepoolInitParameters `json:"initProvider,omitempty"` +} + +// ProjectKubeNodepoolStatus defines the observed state of ProjectKubeNodepool. +type ProjectKubeNodepoolStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectKubeNodepoolObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectKubeNodepool is the Schema for the ProjectKubeNodepools API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectKubeNodepool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.flavorName) || (has(self.initProvider) && has(self.initProvider.flavorName))",message="spec.forProvider.flavorName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kubeId) || (has(self.initProvider) && has(self.initProvider.kubeId))",message="spec.forProvider.kubeId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectKubeNodepoolSpec `json:"spec"` + Status ProjectKubeNodepoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectKubeNodepoolList contains a list of ProjectKubeNodepools +type ProjectKubeNodepoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectKubeNodepool `json:"items"` +} + +// Repository type metadata. +var ( + ProjectKubeNodepool_Kind = "ProjectKubeNodepool" + ProjectKubeNodepool_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectKubeNodepool_Kind}.String() + ProjectKubeNodepool_KindAPIVersion = ProjectKubeNodepool_Kind + "." + CRDGroupVersion.String() + ProjectKubeNodepool_GroupVersionKind = CRDGroupVersion.WithKind(ProjectKubeNodepool_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectKubeNodepool{}, &ProjectKubeNodepoolList{}) +} diff --git a/apis/kube/v1alpha1/zz_projectkubeoidc_types.go b/apis/kube/v1alpha1/zz_projectkubeoidc_types.go new file mode 100755 index 0000000..1aead4c --- /dev/null +++ b/apis/kube/v1alpha1/zz_projectkubeoidc_types.go @@ -0,0 +1,168 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProjectKubeOidcInitParameters struct { + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + IssuerURL *string `json:"issuerUrl,omitempty" tf:"issuer_url,omitempty"` + + KubeID *string `json:"kubeId,omitempty" tf:"kube_id,omitempty"` + + OidcCAContent *string `json:"oidcCaContent,omitempty" tf:"oidc_ca_content,omitempty"` + + OidcGroupsClaim []*string `json:"oidcGroupsClaim,omitempty" tf:"oidc_groups_claim,omitempty"` + + OidcGroupsPrefix *string `json:"oidcGroupsPrefix,omitempty" tf:"oidc_groups_prefix,omitempty"` + + OidcRequiredClaim []*string `json:"oidcRequiredClaim,omitempty" tf:"oidc_required_claim,omitempty"` + + OidcSigningAlgs []*string `json:"oidcSigningAlgs,omitempty" tf:"oidc_signing_algs,omitempty"` + + OidcUsernameClaim *string `json:"oidcUsernameClaim,omitempty" tf:"oidc_username_claim,omitempty"` + + OidcUsernamePrefix *string `json:"oidcUsernamePrefix,omitempty" tf:"oidc_username_prefix,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectKubeOidcObservation struct { + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + IssuerURL *string `json:"issuerUrl,omitempty" tf:"issuer_url,omitempty"` + + KubeID *string `json:"kubeId,omitempty" tf:"kube_id,omitempty"` + + OidcCAContent *string `json:"oidcCaContent,omitempty" tf:"oidc_ca_content,omitempty"` + + OidcGroupsClaim []*string `json:"oidcGroupsClaim,omitempty" tf:"oidc_groups_claim,omitempty"` + + OidcGroupsPrefix *string `json:"oidcGroupsPrefix,omitempty" tf:"oidc_groups_prefix,omitempty"` + + OidcRequiredClaim []*string `json:"oidcRequiredClaim,omitempty" tf:"oidc_required_claim,omitempty"` + + OidcSigningAlgs []*string `json:"oidcSigningAlgs,omitempty" tf:"oidc_signing_algs,omitempty"` + + OidcUsernameClaim *string `json:"oidcUsernameClaim,omitempty" tf:"oidc_username_claim,omitempty"` + + OidcUsernamePrefix *string `json:"oidcUsernamePrefix,omitempty" tf:"oidc_username_prefix,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type ProjectKubeOidcParameters struct { + + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // +kubebuilder:validation:Optional + IssuerURL *string `json:"issuerUrl,omitempty" tf:"issuer_url,omitempty"` + + // +kubebuilder:validation:Optional + KubeID *string `json:"kubeId,omitempty" tf:"kube_id,omitempty"` + + // +kubebuilder:validation:Optional + OidcCAContent *string `json:"oidcCaContent,omitempty" tf:"oidc_ca_content,omitempty"` + + // +kubebuilder:validation:Optional + OidcGroupsClaim []*string `json:"oidcGroupsClaim,omitempty" tf:"oidc_groups_claim,omitempty"` + + // +kubebuilder:validation:Optional + OidcGroupsPrefix *string `json:"oidcGroupsPrefix,omitempty" tf:"oidc_groups_prefix,omitempty"` + + // +kubebuilder:validation:Optional + OidcRequiredClaim []*string `json:"oidcRequiredClaim,omitempty" tf:"oidc_required_claim,omitempty"` + + // +kubebuilder:validation:Optional + OidcSigningAlgs []*string `json:"oidcSigningAlgs,omitempty" tf:"oidc_signing_algs,omitempty"` + + // +kubebuilder:validation:Optional + OidcUsernameClaim *string `json:"oidcUsernameClaim,omitempty" tf:"oidc_username_claim,omitempty"` + + // +kubebuilder:validation:Optional + OidcUsernamePrefix *string `json:"oidcUsernamePrefix,omitempty" tf:"oidc_username_prefix,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// ProjectKubeOidcSpec defines the desired state of ProjectKubeOidc +type ProjectKubeOidcSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectKubeOidcParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectKubeOidcInitParameters `json:"initProvider,omitempty"` +} + +// ProjectKubeOidcStatus defines the observed state of ProjectKubeOidc. +type ProjectKubeOidcStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectKubeOidcObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectKubeOidc is the Schema for the ProjectKubeOidcs API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type ProjectKubeOidc struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clientId) || (has(self.initProvider) && has(self.initProvider.clientId))",message="spec.forProvider.clientId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.issuerUrl) || (has(self.initProvider) && has(self.initProvider.issuerUrl))",message="spec.forProvider.issuerUrl is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kubeId) || (has(self.initProvider) && has(self.initProvider.kubeId))",message="spec.forProvider.kubeId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec ProjectKubeOidcSpec `json:"spec"` + Status ProjectKubeOidcStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectKubeOidcList contains a list of ProjectKubeOidcs +type ProjectKubeOidcList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProjectKubeOidc `json:"items"` +} + +// Repository type metadata. +var ( + ProjectKubeOidc_Kind = "ProjectKubeOidc" + ProjectKubeOidc_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProjectKubeOidc_Kind}.String() + ProjectKubeOidc_KindAPIVersion = ProjectKubeOidc_Kind + "." + CRDGroupVersion.String() + ProjectKubeOidc_GroupVersionKind = CRDGroupVersion.WithKind(ProjectKubeOidc_Kind) +) + +func init() { + SchemeBuilder.Register(&ProjectKubeOidc{}, &ProjectKubeOidcList{}) +} diff --git a/apis/lb/v1alpha1/zz_generated.deepcopy.go b/apis/lb/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..585e137 --- /dev/null +++ b/apis/lb/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,4990 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(float64) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(float64) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(float64) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetailsInitParameters) DeepCopyInto(out *DetailsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetailsInitParameters. +func (in *DetailsInitParameters) DeepCopy() *DetailsInitParameters { + if in == nil { + return nil + } + out := new(DetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetailsObservation) DeepCopyInto(out *DetailsObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.OrderDetailID != nil { + in, out := &in.OrderDetailID, &out.OrderDetailID + *out = new(float64) + **out = **in + } + if in.Quantity != nil { + in, out := &in.Quantity, &out.Quantity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetailsObservation. +func (in *DetailsObservation) DeepCopy() *DetailsObservation { + if in == nil { + return nil + } + out := new(DetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetailsParameters) DeepCopyInto(out *DetailsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetailsParameters. +func (in *DetailsParameters) DeepCopy() *DetailsParameters { + if in == nil { + return nil + } + out := new(DetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarm) DeepCopyInto(out *HTTPFarm) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarm. +func (in *HTTPFarm) DeepCopy() *HTTPFarm { + if in == nil { + return nil + } + out := new(HTTPFarm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPFarm) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmInitParameters) DeepCopyInto(out *HTTPFarmInitParameters) { + *out = *in + if in.Balance != nil { + in, out := &in.Balance, &out.Balance + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = make([]ProbeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(string) + **out = **in + } + if in.VrackNetworkID != nil { + in, out := &in.VrackNetworkID, &out.VrackNetworkID + *out = new(float64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmInitParameters. +func (in *HTTPFarmInitParameters) DeepCopy() *HTTPFarmInitParameters { + if in == nil { + return nil + } + out := new(HTTPFarmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmList) DeepCopyInto(out *HTTPFarmList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HTTPFarm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmList. +func (in *HTTPFarmList) DeepCopy() *HTTPFarmList { + if in == nil { + return nil + } + out := new(HTTPFarmList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPFarmList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmObservation) DeepCopyInto(out *HTTPFarmObservation) { + *out = *in + if in.Balance != nil { + in, out := &in.Balance, &out.Balance + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = make([]ProbeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(string) + **out = **in + } + if in.VrackNetworkID != nil { + in, out := &in.VrackNetworkID, &out.VrackNetworkID + *out = new(float64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmObservation. +func (in *HTTPFarmObservation) DeepCopy() *HTTPFarmObservation { + if in == nil { + return nil + } + out := new(HTTPFarmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmParameters) DeepCopyInto(out *HTTPFarmParameters) { + *out = *in + if in.Balance != nil { + in, out := &in.Balance, &out.Balance + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = make([]ProbeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(string) + **out = **in + } + if in.VrackNetworkID != nil { + in, out := &in.VrackNetworkID, &out.VrackNetworkID + *out = new(float64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmParameters. +func (in *HTTPFarmParameters) DeepCopy() *HTTPFarmParameters { + if in == nil { + return nil + } + out := new(HTTPFarmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmServer) DeepCopyInto(out *HTTPFarmServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmServer. +func (in *HTTPFarmServer) DeepCopy() *HTTPFarmServer { + if in == nil { + return nil + } + out := new(HTTPFarmServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPFarmServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmServerInitParameters) DeepCopyInto(out *HTTPFarmServerInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Chain != nil { + in, out := &in.Chain, &out.Chain + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FarmID != nil { + in, out := &in.FarmID, &out.FarmID + *out = new(float64) + **out = **in + } + if in.OnMarkedDown != nil { + in, out := &in.OnMarkedDown, &out.OnMarkedDown + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = new(bool) + **out = **in + } + if in.ProxyProtocolVersion != nil { + in, out := &in.ProxyProtocolVersion, &out.ProxyProtocolVersion + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmServerInitParameters. +func (in *HTTPFarmServerInitParameters) DeepCopy() *HTTPFarmServerInitParameters { + if in == nil { + return nil + } + out := new(HTTPFarmServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmServerList) DeepCopyInto(out *HTTPFarmServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HTTPFarmServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmServerList. +func (in *HTTPFarmServerList) DeepCopy() *HTTPFarmServerList { + if in == nil { + return nil + } + out := new(HTTPFarmServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPFarmServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmServerObservation) DeepCopyInto(out *HTTPFarmServerObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Chain != nil { + in, out := &in.Chain, &out.Chain + *out = new(string) + **out = **in + } + if in.Cookie != nil { + in, out := &in.Cookie, &out.Cookie + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FarmID != nil { + in, out := &in.FarmID, &out.FarmID + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OnMarkedDown != nil { + in, out := &in.OnMarkedDown, &out.OnMarkedDown + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = new(bool) + **out = **in + } + if in.ProxyProtocolVersion != nil { + in, out := &in.ProxyProtocolVersion, &out.ProxyProtocolVersion + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmServerObservation. +func (in *HTTPFarmServerObservation) DeepCopy() *HTTPFarmServerObservation { + if in == nil { + return nil + } + out := new(HTTPFarmServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmServerParameters) DeepCopyInto(out *HTTPFarmServerParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Chain != nil { + in, out := &in.Chain, &out.Chain + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FarmID != nil { + in, out := &in.FarmID, &out.FarmID + *out = new(float64) + **out = **in + } + if in.OnMarkedDown != nil { + in, out := &in.OnMarkedDown, &out.OnMarkedDown + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = new(bool) + **out = **in + } + if in.ProxyProtocolVersion != nil { + in, out := &in.ProxyProtocolVersion, &out.ProxyProtocolVersion + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmServerParameters. +func (in *HTTPFarmServerParameters) DeepCopy() *HTTPFarmServerParameters { + if in == nil { + return nil + } + out := new(HTTPFarmServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmServerSpec) DeepCopyInto(out *HTTPFarmServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmServerSpec. +func (in *HTTPFarmServerSpec) DeepCopy() *HTTPFarmServerSpec { + if in == nil { + return nil + } + out := new(HTTPFarmServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmServerStatus) DeepCopyInto(out *HTTPFarmServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmServerStatus. +func (in *HTTPFarmServerStatus) DeepCopy() *HTTPFarmServerStatus { + if in == nil { + return nil + } + out := new(HTTPFarmServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmSpec) DeepCopyInto(out *HTTPFarmSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmSpec. +func (in *HTTPFarmSpec) DeepCopy() *HTTPFarmSpec { + if in == nil { + return nil + } + out := new(HTTPFarmSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFarmStatus) DeepCopyInto(out *HTTPFarmStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFarmStatus. +func (in *HTTPFarmStatus) DeepCopy() *HTTPFarmStatus { + if in == nil { + return nil + } + out := new(HTTPFarmStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFrontend) DeepCopyInto(out *HTTPFrontend) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFrontend. +func (in *HTTPFrontend) DeepCopy() *HTTPFrontend { + if in == nil { + return nil + } + out := new(HTTPFrontend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPFrontend) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFrontendInitParameters) DeepCopyInto(out *HTTPFrontendInitParameters) { + *out = *in + if in.AllowedSource != nil { + in, out := &in.AllowedSource, &out.AllowedSource + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DedicatedIpfo != nil { + in, out := &in.DedicatedIpfo, &out.DedicatedIpfo + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultFarmID != nil { + in, out := &in.DefaultFarmID, &out.DefaultFarmID + *out = new(float64) + **out = **in + } + if in.DefaultSSLID != nil { + in, out := &in.DefaultSSLID, &out.DefaultSSLID + *out = new(float64) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hsts != nil { + in, out := &in.Hsts, &out.Hsts + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.RedirectLocation != nil { + in, out := &in.RedirectLocation, &out.RedirectLocation + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFrontendInitParameters. +func (in *HTTPFrontendInitParameters) DeepCopy() *HTTPFrontendInitParameters { + if in == nil { + return nil + } + out := new(HTTPFrontendInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFrontendList) DeepCopyInto(out *HTTPFrontendList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HTTPFrontend, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFrontendList. +func (in *HTTPFrontendList) DeepCopy() *HTTPFrontendList { + if in == nil { + return nil + } + out := new(HTTPFrontendList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPFrontendList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFrontendObservation) DeepCopyInto(out *HTTPFrontendObservation) { + *out = *in + if in.AllowedSource != nil { + in, out := &in.AllowedSource, &out.AllowedSource + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DedicatedIpfo != nil { + in, out := &in.DedicatedIpfo, &out.DedicatedIpfo + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultFarmID != nil { + in, out := &in.DefaultFarmID, &out.DefaultFarmID + *out = new(float64) + **out = **in + } + if in.DefaultSSLID != nil { + in, out := &in.DefaultSSLID, &out.DefaultSSLID + *out = new(float64) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hsts != nil { + in, out := &in.Hsts, &out.Hsts + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.RedirectLocation != nil { + in, out := &in.RedirectLocation, &out.RedirectLocation + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFrontendObservation. +func (in *HTTPFrontendObservation) DeepCopy() *HTTPFrontendObservation { + if in == nil { + return nil + } + out := new(HTTPFrontendObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFrontendParameters) DeepCopyInto(out *HTTPFrontendParameters) { + *out = *in + if in.AllowedSource != nil { + in, out := &in.AllowedSource, &out.AllowedSource + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DedicatedIpfo != nil { + in, out := &in.DedicatedIpfo, &out.DedicatedIpfo + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultFarmID != nil { + in, out := &in.DefaultFarmID, &out.DefaultFarmID + *out = new(float64) + **out = **in + } + if in.DefaultSSLID != nil { + in, out := &in.DefaultSSLID, &out.DefaultSSLID + *out = new(float64) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hsts != nil { + in, out := &in.Hsts, &out.Hsts + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.RedirectLocation != nil { + in, out := &in.RedirectLocation, &out.RedirectLocation + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFrontendParameters. +func (in *HTTPFrontendParameters) DeepCopy() *HTTPFrontendParameters { + if in == nil { + return nil + } + out := new(HTTPFrontendParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFrontendSpec) DeepCopyInto(out *HTTPFrontendSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFrontendSpec. +func (in *HTTPFrontendSpec) DeepCopy() *HTTPFrontendSpec { + if in == nil { + return nil + } + out := new(HTTPFrontendSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFrontendStatus) DeepCopyInto(out *HTTPFrontendStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFrontendStatus. +func (in *HTTPFrontendStatus) DeepCopy() *HTTPFrontendStatus { + if in == nil { + return nil + } + out := new(HTTPFrontendStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRoute. +func (in *HTTPRoute) DeepCopy() *HTTPRoute { + if in == nil { + return nil + } + out := new(HTTPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteInitParameters) DeepCopyInto(out *HTTPRouteInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FrontendID != nil { + in, out := &in.FrontendID, &out.FrontendID + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteInitParameters. +func (in *HTTPRouteInitParameters) DeepCopy() *HTTPRouteInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteList) DeepCopyInto(out *HTTPRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HTTPRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteList. +func (in *HTTPRouteList) DeepCopy() *HTTPRouteList { + if in == nil { + return nil + } + out := new(HTTPRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteObservation) DeepCopyInto(out *HTTPRouteObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FrontendID != nil { + in, out := &in.FrontendID, &out.FrontendID + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteObservation. +func (in *HTTPRouteObservation) DeepCopy() *HTTPRouteObservation { + if in == nil { + return nil + } + out := new(HTTPRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteParameters) DeepCopyInto(out *HTTPRouteParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FrontendID != nil { + in, out := &in.FrontendID, &out.FrontendID + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteParameters. +func (in *HTTPRouteParameters) DeepCopy() *HTTPRouteParameters { + if in == nil { + return nil + } + out := new(HTTPRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRule) DeepCopyInto(out *HTTPRouteRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRule. +func (in *HTTPRouteRule) DeepCopy() *HTTPRouteRule { + if in == nil { + return nil + } + out := new(HTTPRouteRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRouteRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRuleInitParameters) DeepCopyInto(out *HTTPRouteRuleInitParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.RouteID != nil { + in, out := &in.RouteID, &out.RouteID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SubField != nil { + in, out := &in.SubField, &out.SubField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRuleInitParameters. +func (in *HTTPRouteRuleInitParameters) DeepCopy() *HTTPRouteRuleInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRuleList) DeepCopyInto(out *HTTPRouteRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HTTPRouteRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRuleList. +func (in *HTTPRouteRuleList) DeepCopy() *HTTPRouteRuleList { + if in == nil { + return nil + } + out := new(HTTPRouteRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTTPRouteRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRuleObservation) DeepCopyInto(out *HTTPRouteRuleObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.RouteID != nil { + in, out := &in.RouteID, &out.RouteID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SubField != nil { + in, out := &in.SubField, &out.SubField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRuleObservation. +func (in *HTTPRouteRuleObservation) DeepCopy() *HTTPRouteRuleObservation { + if in == nil { + return nil + } + out := new(HTTPRouteRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRuleParameters) DeepCopyInto(out *HTTPRouteRuleParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.RouteID != nil { + in, out := &in.RouteID, &out.RouteID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SubField != nil { + in, out := &in.SubField, &out.SubField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRuleParameters. +func (in *HTTPRouteRuleParameters) DeepCopy() *HTTPRouteRuleParameters { + if in == nil { + return nil + } + out := new(HTTPRouteRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRuleSpec) DeepCopyInto(out *HTTPRouteRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRuleSpec. +func (in *HTTPRouteRuleSpec) DeepCopy() *HTTPRouteRuleSpec { + if in == nil { + return nil + } + out := new(HTTPRouteRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRuleStatus) DeepCopyInto(out *HTTPRouteRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRuleStatus. +func (in *HTTPRouteRuleStatus) DeepCopy() *HTTPRouteRuleStatus { + if in == nil { + return nil + } + out := new(HTTPRouteRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteSpec) DeepCopyInto(out *HTTPRouteSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteSpec. +func (in *HTTPRouteSpec) DeepCopy() *HTTPRouteSpec { + if in == nil { + return nil + } + out := new(HTTPRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteStatus) DeepCopyInto(out *HTTPRouteStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteStatus. +func (in *HTTPRouteStatus) DeepCopy() *HTTPRouteStatus { + if in == nil { + return nil + } + out := new(HTTPRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Iploadbalancing) DeepCopyInto(out *Iploadbalancing) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Iploadbalancing. +func (in *Iploadbalancing) DeepCopy() *Iploadbalancing { + if in == nil { + return nil + } + out := new(Iploadbalancing) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Iploadbalancing) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IploadbalancingInitParameters) DeepCopyInto(out *IploadbalancingInitParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.OvhSubsidiary != nil { + in, out := &in.OvhSubsidiary, &out.OvhSubsidiary + *out = new(string) + **out = **in + } + if in.PaymentMean != nil { + in, out := &in.PaymentMean, &out.PaymentMean + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = make([]PlanInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlanOption != nil { + in, out := &in.PlanOption, &out.PlanOption + *out = make([]PlanOptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLConfiguration != nil { + in, out := &in.SSLConfiguration, &out.SSLConfiguration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IploadbalancingInitParameters. +func (in *IploadbalancingInitParameters) DeepCopy() *IploadbalancingInitParameters { + if in == nil { + return nil + } + out := new(IploadbalancingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IploadbalancingList) DeepCopyInto(out *IploadbalancingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Iploadbalancing, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IploadbalancingList. +func (in *IploadbalancingList) DeepCopy() *IploadbalancingList { + if in == nil { + return nil + } + out := new(IploadbalancingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IploadbalancingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IploadbalancingObservation) DeepCopyInto(out *IploadbalancingObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPLoadbalancing != nil { + in, out := &in.IPLoadbalancing, &out.IPLoadbalancing + *out = new(string) + **out = **in + } + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(string) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(string) + **out = **in + } + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = make([]OrderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OrderableZone != nil { + in, out := &in.OrderableZone, &out.OrderableZone + *out = make([]OrderableZoneObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OvhSubsidiary != nil { + in, out := &in.OvhSubsidiary, &out.OvhSubsidiary + *out = new(string) + **out = **in + } + if in.PaymentMean != nil { + in, out := &in.PaymentMean, &out.PaymentMean + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = make([]PlanObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlanOption != nil { + in, out := &in.PlanOption, &out.PlanOption + *out = make([]PlanOptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLConfiguration != nil { + in, out := &in.SSLConfiguration, &out.SSLConfiguration + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Urn != nil { + in, out := &in.Urn, &out.Urn + *out = new(string) + **out = **in + } + if in.VrackEligibility != nil { + in, out := &in.VrackEligibility, &out.VrackEligibility + *out = new(bool) + **out = **in + } + if in.VrackName != nil { + in, out := &in.VrackName, &out.VrackName + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IploadbalancingObservation. +func (in *IploadbalancingObservation) DeepCopy() *IploadbalancingObservation { + if in == nil { + return nil + } + out := new(IploadbalancingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IploadbalancingParameters) DeepCopyInto(out *IploadbalancingParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.OvhSubsidiary != nil { + in, out := &in.OvhSubsidiary, &out.OvhSubsidiary + *out = new(string) + **out = **in + } + if in.PaymentMean != nil { + in, out := &in.PaymentMean, &out.PaymentMean + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = make([]PlanParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlanOption != nil { + in, out := &in.PlanOption, &out.PlanOption + *out = make([]PlanOptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLConfiguration != nil { + in, out := &in.SSLConfiguration, &out.SSLConfiguration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IploadbalancingParameters. +func (in *IploadbalancingParameters) DeepCopy() *IploadbalancingParameters { + if in == nil { + return nil + } + out := new(IploadbalancingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IploadbalancingSpec) DeepCopyInto(out *IploadbalancingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IploadbalancingSpec. +func (in *IploadbalancingSpec) DeepCopy() *IploadbalancingSpec { + if in == nil { + return nil + } + out := new(IploadbalancingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IploadbalancingStatus) DeepCopyInto(out *IploadbalancingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IploadbalancingStatus. +func (in *IploadbalancingStatus) DeepCopy() *IploadbalancingStatus { + if in == nil { + return nil + } + out := new(IploadbalancingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderInitParameters) DeepCopyInto(out *OrderInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderInitParameters. +func (in *OrderInitParameters) DeepCopy() *OrderInitParameters { + if in == nil { + return nil + } + out := new(OrderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderObservation) DeepCopyInto(out *OrderObservation) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = make([]DetailsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = new(string) + **out = **in + } + if in.OrderID != nil { + in, out := &in.OrderID, &out.OrderID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderObservation. +func (in *OrderObservation) DeepCopy() *OrderObservation { + if in == nil { + return nil + } + out := new(OrderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderParameters) DeepCopyInto(out *OrderParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderParameters. +func (in *OrderParameters) DeepCopy() *OrderParameters { + if in == nil { + return nil + } + out := new(OrderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderableZoneInitParameters) DeepCopyInto(out *OrderableZoneInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderableZoneInitParameters. +func (in *OrderableZoneInitParameters) DeepCopy() *OrderableZoneInitParameters { + if in == nil { + return nil + } + out := new(OrderableZoneInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderableZoneObservation) DeepCopyInto(out *OrderableZoneObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlanCode != nil { + in, out := &in.PlanCode, &out.PlanCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderableZoneObservation. +func (in *OrderableZoneObservation) DeepCopy() *OrderableZoneObservation { + if in == nil { + return nil + } + out := new(OrderableZoneObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderableZoneParameters) DeepCopyInto(out *OrderableZoneParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderableZoneParameters. +func (in *OrderableZoneParameters) DeepCopy() *OrderableZoneParameters { + if in == nil { + return nil + } + out := new(OrderableZoneParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanInitParameters) DeepCopyInto(out *PlanInitParameters) { + *out = *in + if in.CatalogName != nil { + in, out := &in.CatalogName, &out.CatalogName + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]ConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.PlanCode != nil { + in, out := &in.PlanCode, &out.PlanCode + *out = new(string) + **out = **in + } + if in.PricingMode != nil { + in, out := &in.PricingMode, &out.PricingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanInitParameters. +func (in *PlanInitParameters) DeepCopy() *PlanInitParameters { + if in == nil { + return nil + } + out := new(PlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanObservation) DeepCopyInto(out *PlanObservation) { + *out = *in + if in.CatalogName != nil { + in, out := &in.CatalogName, &out.CatalogName + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]ConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.PlanCode != nil { + in, out := &in.PlanCode, &out.PlanCode + *out = new(string) + **out = **in + } + if in.PricingMode != nil { + in, out := &in.PricingMode, &out.PricingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanObservation. +func (in *PlanObservation) DeepCopy() *PlanObservation { + if in == nil { + return nil + } + out := new(PlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanOptionConfigurationInitParameters) DeepCopyInto(out *PlanOptionConfigurationInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanOptionConfigurationInitParameters. +func (in *PlanOptionConfigurationInitParameters) DeepCopy() *PlanOptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(PlanOptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanOptionConfigurationObservation) DeepCopyInto(out *PlanOptionConfigurationObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanOptionConfigurationObservation. +func (in *PlanOptionConfigurationObservation) DeepCopy() *PlanOptionConfigurationObservation { + if in == nil { + return nil + } + out := new(PlanOptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanOptionConfigurationParameters) DeepCopyInto(out *PlanOptionConfigurationParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanOptionConfigurationParameters. +func (in *PlanOptionConfigurationParameters) DeepCopy() *PlanOptionConfigurationParameters { + if in == nil { + return nil + } + out := new(PlanOptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanOptionInitParameters) DeepCopyInto(out *PlanOptionInitParameters) { + *out = *in + if in.CatalogName != nil { + in, out := &in.CatalogName, &out.CatalogName + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]PlanOptionConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.PlanCode != nil { + in, out := &in.PlanCode, &out.PlanCode + *out = new(string) + **out = **in + } + if in.PricingMode != nil { + in, out := &in.PricingMode, &out.PricingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanOptionInitParameters. +func (in *PlanOptionInitParameters) DeepCopy() *PlanOptionInitParameters { + if in == nil { + return nil + } + out := new(PlanOptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanOptionObservation) DeepCopyInto(out *PlanOptionObservation) { + *out = *in + if in.CatalogName != nil { + in, out := &in.CatalogName, &out.CatalogName + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]PlanOptionConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.PlanCode != nil { + in, out := &in.PlanCode, &out.PlanCode + *out = new(string) + **out = **in + } + if in.PricingMode != nil { + in, out := &in.PricingMode, &out.PricingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanOptionObservation. +func (in *PlanOptionObservation) DeepCopy() *PlanOptionObservation { + if in == nil { + return nil + } + out := new(PlanOptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanOptionParameters) DeepCopyInto(out *PlanOptionParameters) { + *out = *in + if in.CatalogName != nil { + in, out := &in.CatalogName, &out.CatalogName + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]PlanOptionConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.PlanCode != nil { + in, out := &in.PlanCode, &out.PlanCode + *out = new(string) + **out = **in + } + if in.PricingMode != nil { + in, out := &in.PricingMode, &out.PricingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanOptionParameters. +func (in *PlanOptionParameters) DeepCopy() *PlanOptionParameters { + if in == nil { + return nil + } + out := new(PlanOptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanParameters) DeepCopyInto(out *PlanParameters) { + *out = *in + if in.CatalogName != nil { + in, out := &in.CatalogName, &out.CatalogName + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]ConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.PlanCode != nil { + in, out := &in.PlanCode, &out.PlanCode + *out = new(string) + **out = **in + } + if in.PricingMode != nil { + in, out := &in.PricingMode, &out.PricingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanParameters. +func (in *PlanParameters) DeepCopy() *PlanParameters { + if in == nil { + return nil + } + out := new(PlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeInitParameters) DeepCopyInto(out *ProbeInitParameters) { + *out = *in + if in.ForceSSL != nil { + in, out := &in.ForceSSL, &out.ForceSSL + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeInitParameters. +func (in *ProbeInitParameters) DeepCopy() *ProbeInitParameters { + if in == nil { + return nil + } + out := new(ProbeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeObservation) DeepCopyInto(out *ProbeObservation) { + *out = *in + if in.ForceSSL != nil { + in, out := &in.ForceSSL, &out.ForceSSL + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeObservation. +func (in *ProbeObservation) DeepCopy() *ProbeObservation { + if in == nil { + return nil + } + out := new(ProbeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeParameters) DeepCopyInto(out *ProbeParameters) { + *out = *in + if in.ForceSSL != nil { + in, out := &in.ForceSSL, &out.ForceSSL + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeParameters. +func (in *ProbeParameters) DeepCopy() *ProbeParameters { + if in == nil { + return nil + } + out := new(ProbeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Refresh) DeepCopyInto(out *Refresh) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Refresh. +func (in *Refresh) DeepCopy() *Refresh { + if in == nil { + return nil + } + out := new(Refresh) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Refresh) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RefreshInitParameters) DeepCopyInto(out *RefreshInitParameters) { + *out = *in + if in.Keepers != nil { + in, out := &in.Keepers, &out.Keepers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefreshInitParameters. +func (in *RefreshInitParameters) DeepCopy() *RefreshInitParameters { + if in == nil { + return nil + } + out := new(RefreshInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RefreshList) DeepCopyInto(out *RefreshList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Refresh, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefreshList. +func (in *RefreshList) DeepCopy() *RefreshList { + if in == nil { + return nil + } + out := new(RefreshList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RefreshList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RefreshObservation) DeepCopyInto(out *RefreshObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Keepers != nil { + in, out := &in.Keepers, &out.Keepers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefreshObservation. +func (in *RefreshObservation) DeepCopy() *RefreshObservation { + if in == nil { + return nil + } + out := new(RefreshObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RefreshParameters) DeepCopyInto(out *RefreshParameters) { + *out = *in + if in.Keepers != nil { + in, out := &in.Keepers, &out.Keepers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefreshParameters. +func (in *RefreshParameters) DeepCopy() *RefreshParameters { + if in == nil { + return nil + } + out := new(RefreshParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RefreshSpec) DeepCopyInto(out *RefreshSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefreshSpec. +func (in *RefreshSpec) DeepCopy() *RefreshSpec { + if in == nil { + return nil + } + out := new(RefreshSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RefreshStatus) DeepCopyInto(out *RefreshStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefreshStatus. +func (in *RefreshStatus) DeepCopy() *RefreshStatus { + if in == nil { + return nil + } + out := new(RefreshStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesInitParameters) DeepCopyInto(out *RulesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesInitParameters. +func (in *RulesInitParameters) DeepCopy() *RulesInitParameters { + if in == nil { + return nil + } + out := new(RulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesObservation) DeepCopyInto(out *RulesObservation) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.RuleID != nil { + in, out := &in.RuleID, &out.RuleID + *out = new(float64) + **out = **in + } + if in.SubField != nil { + in, out := &in.SubField, &out.SubField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesObservation. +func (in *RulesObservation) DeepCopy() *RulesObservation { + if in == nil { + return nil + } + out := new(RulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesParameters) DeepCopyInto(out *RulesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesParameters. +func (in *RulesParameters) DeepCopy() *RulesParameters { + if in == nil { + return nil + } + out := new(RulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarm) DeepCopyInto(out *TCPFarm) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarm. +func (in *TCPFarm) DeepCopy() *TCPFarm { + if in == nil { + return nil + } + out := new(TCPFarm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPFarm) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmInitParameters) DeepCopyInto(out *TCPFarmInitParameters) { + *out = *in + if in.Balance != nil { + in, out := &in.Balance, &out.Balance + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = make([]TCPFarmProbeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(string) + **out = **in + } + if in.VrackNetworkID != nil { + in, out := &in.VrackNetworkID, &out.VrackNetworkID + *out = new(float64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmInitParameters. +func (in *TCPFarmInitParameters) DeepCopy() *TCPFarmInitParameters { + if in == nil { + return nil + } + out := new(TCPFarmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmList) DeepCopyInto(out *TCPFarmList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TCPFarm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmList. +func (in *TCPFarmList) DeepCopy() *TCPFarmList { + if in == nil { + return nil + } + out := new(TCPFarmList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPFarmList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmObservation) DeepCopyInto(out *TCPFarmObservation) { + *out = *in + if in.Balance != nil { + in, out := &in.Balance, &out.Balance + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = make([]TCPFarmProbeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(string) + **out = **in + } + if in.VrackNetworkID != nil { + in, out := &in.VrackNetworkID, &out.VrackNetworkID + *out = new(float64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmObservation. +func (in *TCPFarmObservation) DeepCopy() *TCPFarmObservation { + if in == nil { + return nil + } + out := new(TCPFarmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmParameters) DeepCopyInto(out *TCPFarmParameters) { + *out = *in + if in.Balance != nil { + in, out := &in.Balance, &out.Balance + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = make([]TCPFarmProbeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(string) + **out = **in + } + if in.VrackNetworkID != nil { + in, out := &in.VrackNetworkID, &out.VrackNetworkID + *out = new(float64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmParameters. +func (in *TCPFarmParameters) DeepCopy() *TCPFarmParameters { + if in == nil { + return nil + } + out := new(TCPFarmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmProbeInitParameters) DeepCopyInto(out *TCPFarmProbeInitParameters) { + *out = *in + if in.ForceSSL != nil { + in, out := &in.ForceSSL, &out.ForceSSL + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmProbeInitParameters. +func (in *TCPFarmProbeInitParameters) DeepCopy() *TCPFarmProbeInitParameters { + if in == nil { + return nil + } + out := new(TCPFarmProbeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmProbeObservation) DeepCopyInto(out *TCPFarmProbeObservation) { + *out = *in + if in.ForceSSL != nil { + in, out := &in.ForceSSL, &out.ForceSSL + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmProbeObservation. +func (in *TCPFarmProbeObservation) DeepCopy() *TCPFarmProbeObservation { + if in == nil { + return nil + } + out := new(TCPFarmProbeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmProbeParameters) DeepCopyInto(out *TCPFarmProbeParameters) { + *out = *in + if in.ForceSSL != nil { + in, out := &in.ForceSSL, &out.ForceSSL + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmProbeParameters. +func (in *TCPFarmProbeParameters) DeepCopy() *TCPFarmProbeParameters { + if in == nil { + return nil + } + out := new(TCPFarmProbeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmServer) DeepCopyInto(out *TCPFarmServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmServer. +func (in *TCPFarmServer) DeepCopy() *TCPFarmServer { + if in == nil { + return nil + } + out := new(TCPFarmServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPFarmServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmServerInitParameters) DeepCopyInto(out *TCPFarmServerInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Chain != nil { + in, out := &in.Chain, &out.Chain + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FarmID != nil { + in, out := &in.FarmID, &out.FarmID + *out = new(float64) + **out = **in + } + if in.OnMarkedDown != nil { + in, out := &in.OnMarkedDown, &out.OnMarkedDown + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = new(bool) + **out = **in + } + if in.ProxyProtocolVersion != nil { + in, out := &in.ProxyProtocolVersion, &out.ProxyProtocolVersion + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmServerInitParameters. +func (in *TCPFarmServerInitParameters) DeepCopy() *TCPFarmServerInitParameters { + if in == nil { + return nil + } + out := new(TCPFarmServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmServerList) DeepCopyInto(out *TCPFarmServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TCPFarmServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmServerList. +func (in *TCPFarmServerList) DeepCopy() *TCPFarmServerList { + if in == nil { + return nil + } + out := new(TCPFarmServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPFarmServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmServerObservation) DeepCopyInto(out *TCPFarmServerObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Chain != nil { + in, out := &in.Chain, &out.Chain + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FarmID != nil { + in, out := &in.FarmID, &out.FarmID + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OnMarkedDown != nil { + in, out := &in.OnMarkedDown, &out.OnMarkedDown + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = new(bool) + **out = **in + } + if in.ProxyProtocolVersion != nil { + in, out := &in.ProxyProtocolVersion, &out.ProxyProtocolVersion + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmServerObservation. +func (in *TCPFarmServerObservation) DeepCopy() *TCPFarmServerObservation { + if in == nil { + return nil + } + out := new(TCPFarmServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmServerParameters) DeepCopyInto(out *TCPFarmServerParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Chain != nil { + in, out := &in.Chain, &out.Chain + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FarmID != nil { + in, out := &in.FarmID, &out.FarmID + *out = new(float64) + **out = **in + } + if in.OnMarkedDown != nil { + in, out := &in.OnMarkedDown, &out.OnMarkedDown + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = new(bool) + **out = **in + } + if in.ProxyProtocolVersion != nil { + in, out := &in.ProxyProtocolVersion, &out.ProxyProtocolVersion + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmServerParameters. +func (in *TCPFarmServerParameters) DeepCopy() *TCPFarmServerParameters { + if in == nil { + return nil + } + out := new(TCPFarmServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmServerSpec) DeepCopyInto(out *TCPFarmServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmServerSpec. +func (in *TCPFarmServerSpec) DeepCopy() *TCPFarmServerSpec { + if in == nil { + return nil + } + out := new(TCPFarmServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmServerStatus) DeepCopyInto(out *TCPFarmServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmServerStatus. +func (in *TCPFarmServerStatus) DeepCopy() *TCPFarmServerStatus { + if in == nil { + return nil + } + out := new(TCPFarmServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmSpec) DeepCopyInto(out *TCPFarmSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmSpec. +func (in *TCPFarmSpec) DeepCopy() *TCPFarmSpec { + if in == nil { + return nil + } + out := new(TCPFarmSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFarmStatus) DeepCopyInto(out *TCPFarmStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFarmStatus. +func (in *TCPFarmStatus) DeepCopy() *TCPFarmStatus { + if in == nil { + return nil + } + out := new(TCPFarmStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFrontend) DeepCopyInto(out *TCPFrontend) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFrontend. +func (in *TCPFrontend) DeepCopy() *TCPFrontend { + if in == nil { + return nil + } + out := new(TCPFrontend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPFrontend) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFrontendInitParameters) DeepCopyInto(out *TCPFrontendInitParameters) { + *out = *in + if in.AllowedSource != nil { + in, out := &in.AllowedSource, &out.AllowedSource + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DedicatedIpfo != nil { + in, out := &in.DedicatedIpfo, &out.DedicatedIpfo + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultFarmID != nil { + in, out := &in.DefaultFarmID, &out.DefaultFarmID + *out = new(float64) + **out = **in + } + if in.DefaultSSLID != nil { + in, out := &in.DefaultSSLID, &out.DefaultSSLID + *out = new(float64) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFrontendInitParameters. +func (in *TCPFrontendInitParameters) DeepCopy() *TCPFrontendInitParameters { + if in == nil { + return nil + } + out := new(TCPFrontendInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFrontendList) DeepCopyInto(out *TCPFrontendList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TCPFrontend, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFrontendList. +func (in *TCPFrontendList) DeepCopy() *TCPFrontendList { + if in == nil { + return nil + } + out := new(TCPFrontendList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPFrontendList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFrontendObservation) DeepCopyInto(out *TCPFrontendObservation) { + *out = *in + if in.AllowedSource != nil { + in, out := &in.AllowedSource, &out.AllowedSource + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DedicatedIpfo != nil { + in, out := &in.DedicatedIpfo, &out.DedicatedIpfo + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultFarmID != nil { + in, out := &in.DefaultFarmID, &out.DefaultFarmID + *out = new(float64) + **out = **in + } + if in.DefaultSSLID != nil { + in, out := &in.DefaultSSLID, &out.DefaultSSLID + *out = new(float64) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFrontendObservation. +func (in *TCPFrontendObservation) DeepCopy() *TCPFrontendObservation { + if in == nil { + return nil + } + out := new(TCPFrontendObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFrontendParameters) DeepCopyInto(out *TCPFrontendParameters) { + *out = *in + if in.AllowedSource != nil { + in, out := &in.AllowedSource, &out.AllowedSource + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DedicatedIpfo != nil { + in, out := &in.DedicatedIpfo, &out.DedicatedIpfo + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultFarmID != nil { + in, out := &in.DefaultFarmID, &out.DefaultFarmID + *out = new(float64) + **out = **in + } + if in.DefaultSSLID != nil { + in, out := &in.DefaultSSLID, &out.DefaultSSLID + *out = new(float64) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.SSL != nil { + in, out := &in.SSL, &out.SSL + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFrontendParameters. +func (in *TCPFrontendParameters) DeepCopy() *TCPFrontendParameters { + if in == nil { + return nil + } + out := new(TCPFrontendParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFrontendSpec) DeepCopyInto(out *TCPFrontendSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFrontendSpec. +func (in *TCPFrontendSpec) DeepCopy() *TCPFrontendSpec { + if in == nil { + return nil + } + out := new(TCPFrontendSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFrontendStatus) DeepCopyInto(out *TCPFrontendStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFrontendStatus. +func (in *TCPFrontendStatus) DeepCopy() *TCPFrontendStatus { + if in == nil { + return nil + } + out := new(TCPFrontendStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRoute) DeepCopyInto(out *TCPRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRoute. +func (in *TCPRoute) DeepCopy() *TCPRoute { + if in == nil { + return nil + } + out := new(TCPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteActionInitParameters) DeepCopyInto(out *TCPRouteActionInitParameters) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteActionInitParameters. +func (in *TCPRouteActionInitParameters) DeepCopy() *TCPRouteActionInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteActionObservation) DeepCopyInto(out *TCPRouteActionObservation) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteActionObservation. +func (in *TCPRouteActionObservation) DeepCopy() *TCPRouteActionObservation { + if in == nil { + return nil + } + out := new(TCPRouteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteActionParameters) DeepCopyInto(out *TCPRouteActionParameters) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteActionParameters. +func (in *TCPRouteActionParameters) DeepCopy() *TCPRouteActionParameters { + if in == nil { + return nil + } + out := new(TCPRouteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteInitParameters) DeepCopyInto(out *TCPRouteInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]TCPRouteActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FrontendID != nil { + in, out := &in.FrontendID, &out.FrontendID + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteInitParameters. +func (in *TCPRouteInitParameters) DeepCopy() *TCPRouteInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteList) DeepCopyInto(out *TCPRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TCPRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteList. +func (in *TCPRouteList) DeepCopy() *TCPRouteList { + if in == nil { + return nil + } + out := new(TCPRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteObservation) DeepCopyInto(out *TCPRouteObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]TCPRouteActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FrontendID != nil { + in, out := &in.FrontendID, &out.FrontendID + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]TCPRouteRulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteObservation. +func (in *TCPRouteObservation) DeepCopy() *TCPRouteObservation { + if in == nil { + return nil + } + out := new(TCPRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteParameters) DeepCopyInto(out *TCPRouteParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]TCPRouteActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FrontendID != nil { + in, out := &in.FrontendID, &out.FrontendID + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteParameters. +func (in *TCPRouteParameters) DeepCopy() *TCPRouteParameters { + if in == nil { + return nil + } + out := new(TCPRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRule) DeepCopyInto(out *TCPRouteRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRule. +func (in *TCPRouteRule) DeepCopy() *TCPRouteRule { + if in == nil { + return nil + } + out := new(TCPRouteRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPRouteRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRuleInitParameters) DeepCopyInto(out *TCPRouteRuleInitParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.RouteID != nil { + in, out := &in.RouteID, &out.RouteID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SubField != nil { + in, out := &in.SubField, &out.SubField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRuleInitParameters. +func (in *TCPRouteRuleInitParameters) DeepCopy() *TCPRouteRuleInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRuleList) DeepCopyInto(out *TCPRouteRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TCPRouteRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRuleList. +func (in *TCPRouteRuleList) DeepCopy() *TCPRouteRuleList { + if in == nil { + return nil + } + out := new(TCPRouteRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TCPRouteRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRuleObservation) DeepCopyInto(out *TCPRouteRuleObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.RouteID != nil { + in, out := &in.RouteID, &out.RouteID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SubField != nil { + in, out := &in.SubField, &out.SubField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRuleObservation. +func (in *TCPRouteRuleObservation) DeepCopy() *TCPRouteRuleObservation { + if in == nil { + return nil + } + out := new(TCPRouteRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRuleParameters) DeepCopyInto(out *TCPRouteRuleParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.RouteID != nil { + in, out := &in.RouteID, &out.RouteID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SubField != nil { + in, out := &in.SubField, &out.SubField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRuleParameters. +func (in *TCPRouteRuleParameters) DeepCopy() *TCPRouteRuleParameters { + if in == nil { + return nil + } + out := new(TCPRouteRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRuleSpec) DeepCopyInto(out *TCPRouteRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRuleSpec. +func (in *TCPRouteRuleSpec) DeepCopy() *TCPRouteRuleSpec { + if in == nil { + return nil + } + out := new(TCPRouteRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRuleStatus) DeepCopyInto(out *TCPRouteRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRuleStatus. +func (in *TCPRouteRuleStatus) DeepCopy() *TCPRouteRuleStatus { + if in == nil { + return nil + } + out := new(TCPRouteRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRulesInitParameters) DeepCopyInto(out *TCPRouteRulesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRulesInitParameters. +func (in *TCPRouteRulesInitParameters) DeepCopy() *TCPRouteRulesInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRulesObservation) DeepCopyInto(out *TCPRouteRulesObservation) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(string) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.RuleID != nil { + in, out := &in.RuleID, &out.RuleID + *out = new(float64) + **out = **in + } + if in.SubField != nil { + in, out := &in.SubField, &out.SubField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRulesObservation. +func (in *TCPRouteRulesObservation) DeepCopy() *TCPRouteRulesObservation { + if in == nil { + return nil + } + out := new(TCPRouteRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteRulesParameters) DeepCopyInto(out *TCPRouteRulesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRulesParameters. +func (in *TCPRouteRulesParameters) DeepCopy() *TCPRouteRulesParameters { + if in == nil { + return nil + } + out := new(TCPRouteRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteSpec) DeepCopyInto(out *TCPRouteSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteSpec. +func (in *TCPRouteSpec) DeepCopy() *TCPRouteSpec { + if in == nil { + return nil + } + out := new(TCPRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteStatus) DeepCopyInto(out *TCPRouteStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteStatus. +func (in *TCPRouteStatus) DeepCopy() *TCPRouteStatus { + if in == nil { + return nil + } + out := new(TCPRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VrackNetwork) DeepCopyInto(out *VrackNetwork) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VrackNetwork. +func (in *VrackNetwork) DeepCopy() *VrackNetwork { + if in == nil { + return nil + } + out := new(VrackNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VrackNetwork) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VrackNetworkInitParameters) DeepCopyInto(out *VrackNetworkInitParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FarmID != nil { + in, out := &in.FarmID, &out.FarmID + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.NATIP != nil { + in, out := &in.NATIP, &out.NATIP + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(string) + **out = **in + } + if in.Vlan != nil { + in, out := &in.Vlan, &out.Vlan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VrackNetworkInitParameters. +func (in *VrackNetworkInitParameters) DeepCopy() *VrackNetworkInitParameters { + if in == nil { + return nil + } + out := new(VrackNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VrackNetworkList) DeepCopyInto(out *VrackNetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VrackNetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VrackNetworkList. +func (in *VrackNetworkList) DeepCopy() *VrackNetworkList { + if in == nil { + return nil + } + out := new(VrackNetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VrackNetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VrackNetworkObservation) DeepCopyInto(out *VrackNetworkObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FarmID != nil { + in, out := &in.FarmID, &out.FarmID + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NATIP != nil { + in, out := &in.NATIP, &out.NATIP + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(string) + **out = **in + } + if in.Vlan != nil { + in, out := &in.Vlan, &out.Vlan + *out = new(float64) + **out = **in + } + if in.VrackNetworkID != nil { + in, out := &in.VrackNetworkID, &out.VrackNetworkID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VrackNetworkObservation. +func (in *VrackNetworkObservation) DeepCopy() *VrackNetworkObservation { + if in == nil { + return nil + } + out := new(VrackNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VrackNetworkParameters) DeepCopyInto(out *VrackNetworkParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FarmID != nil { + in, out := &in.FarmID, &out.FarmID + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.NATIP != nil { + in, out := &in.NATIP, &out.NATIP + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(string) + **out = **in + } + if in.Vlan != nil { + in, out := &in.Vlan, &out.Vlan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VrackNetworkParameters. +func (in *VrackNetworkParameters) DeepCopy() *VrackNetworkParameters { + if in == nil { + return nil + } + out := new(VrackNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VrackNetworkSpec) DeepCopyInto(out *VrackNetworkSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VrackNetworkSpec. +func (in *VrackNetworkSpec) DeepCopy() *VrackNetworkSpec { + if in == nil { + return nil + } + out := new(VrackNetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VrackNetworkStatus) DeepCopyInto(out *VrackNetworkStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VrackNetworkStatus. +func (in *VrackNetworkStatus) DeepCopy() *VrackNetworkStatus { + if in == nil { + return nil + } + out := new(VrackNetworkStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/lb/v1alpha1/zz_generated.managed.go b/apis/lb/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..87b8589 --- /dev/null +++ b/apis/lb/v1alpha1/zz_generated.managed.go @@ -0,0 +1,788 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this HTTPFarm. +func (mg *HTTPFarm) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HTTPFarm. +func (mg *HTTPFarm) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HTTPFarm. +func (mg *HTTPFarm) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HTTPFarm. +func (mg *HTTPFarm) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HTTPFarm. +func (mg *HTTPFarm) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HTTPFarm. +func (mg *HTTPFarm) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HTTPFarm. +func (mg *HTTPFarm) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HTTPFarm. +func (mg *HTTPFarm) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HTTPFarm. +func (mg *HTTPFarm) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HTTPFarm. +func (mg *HTTPFarm) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HTTPFarm. +func (mg *HTTPFarm) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HTTPFarm. +func (mg *HTTPFarm) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this HTTPFarmServer. +func (mg *HTTPFarmServer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HTTPFarmServer. +func (mg *HTTPFarmServer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HTTPFarmServer. +func (mg *HTTPFarmServer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HTTPFarmServer. +func (mg *HTTPFarmServer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HTTPFarmServer. +func (mg *HTTPFarmServer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HTTPFarmServer. +func (mg *HTTPFarmServer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HTTPFarmServer. +func (mg *HTTPFarmServer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HTTPFarmServer. +func (mg *HTTPFarmServer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HTTPFarmServer. +func (mg *HTTPFarmServer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HTTPFarmServer. +func (mg *HTTPFarmServer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HTTPFarmServer. +func (mg *HTTPFarmServer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HTTPFarmServer. +func (mg *HTTPFarmServer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this HTTPFrontend. +func (mg *HTTPFrontend) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HTTPFrontend. +func (mg *HTTPFrontend) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HTTPFrontend. +func (mg *HTTPFrontend) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HTTPFrontend. +func (mg *HTTPFrontend) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HTTPFrontend. +func (mg *HTTPFrontend) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HTTPFrontend. +func (mg *HTTPFrontend) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HTTPFrontend. +func (mg *HTTPFrontend) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HTTPFrontend. +func (mg *HTTPFrontend) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HTTPFrontend. +func (mg *HTTPFrontend) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HTTPFrontend. +func (mg *HTTPFrontend) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HTTPFrontend. +func (mg *HTTPFrontend) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HTTPFrontend. +func (mg *HTTPFrontend) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this HTTPRoute. +func (mg *HTTPRoute) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HTTPRoute. +func (mg *HTTPRoute) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HTTPRoute. +func (mg *HTTPRoute) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HTTPRoute. +func (mg *HTTPRoute) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HTTPRoute. +func (mg *HTTPRoute) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HTTPRoute. +func (mg *HTTPRoute) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HTTPRoute. +func (mg *HTTPRoute) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HTTPRoute. +func (mg *HTTPRoute) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HTTPRoute. +func (mg *HTTPRoute) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HTTPRoute. +func (mg *HTTPRoute) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HTTPRoute. +func (mg *HTTPRoute) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HTTPRoute. +func (mg *HTTPRoute) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this HTTPRouteRule. +func (mg *HTTPRouteRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HTTPRouteRule. +func (mg *HTTPRouteRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HTTPRouteRule. +func (mg *HTTPRouteRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HTTPRouteRule. +func (mg *HTTPRouteRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HTTPRouteRule. +func (mg *HTTPRouteRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HTTPRouteRule. +func (mg *HTTPRouteRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HTTPRouteRule. +func (mg *HTTPRouteRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HTTPRouteRule. +func (mg *HTTPRouteRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HTTPRouteRule. +func (mg *HTTPRouteRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HTTPRouteRule. +func (mg *HTTPRouteRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HTTPRouteRule. +func (mg *HTTPRouteRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HTTPRouteRule. +func (mg *HTTPRouteRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Iploadbalancing. +func (mg *Iploadbalancing) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Iploadbalancing. +func (mg *Iploadbalancing) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Iploadbalancing. +func (mg *Iploadbalancing) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Iploadbalancing. +func (mg *Iploadbalancing) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Iploadbalancing. +func (mg *Iploadbalancing) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Iploadbalancing. +func (mg *Iploadbalancing) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Iploadbalancing. +func (mg *Iploadbalancing) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Iploadbalancing. +func (mg *Iploadbalancing) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Iploadbalancing. +func (mg *Iploadbalancing) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Iploadbalancing. +func (mg *Iploadbalancing) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Iploadbalancing. +func (mg *Iploadbalancing) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Iploadbalancing. +func (mg *Iploadbalancing) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Refresh. +func (mg *Refresh) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Refresh. +func (mg *Refresh) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Refresh. +func (mg *Refresh) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Refresh. +func (mg *Refresh) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Refresh. +func (mg *Refresh) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Refresh. +func (mg *Refresh) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Refresh. +func (mg *Refresh) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Refresh. +func (mg *Refresh) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Refresh. +func (mg *Refresh) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Refresh. +func (mg *Refresh) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Refresh. +func (mg *Refresh) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Refresh. +func (mg *Refresh) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TCPFarm. +func (mg *TCPFarm) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TCPFarm. +func (mg *TCPFarm) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TCPFarm. +func (mg *TCPFarm) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TCPFarm. +func (mg *TCPFarm) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TCPFarm. +func (mg *TCPFarm) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TCPFarm. +func (mg *TCPFarm) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TCPFarm. +func (mg *TCPFarm) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TCPFarm. +func (mg *TCPFarm) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TCPFarm. +func (mg *TCPFarm) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TCPFarm. +func (mg *TCPFarm) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TCPFarm. +func (mg *TCPFarm) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TCPFarm. +func (mg *TCPFarm) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TCPFarmServer. +func (mg *TCPFarmServer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TCPFarmServer. +func (mg *TCPFarmServer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TCPFarmServer. +func (mg *TCPFarmServer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TCPFarmServer. +func (mg *TCPFarmServer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TCPFarmServer. +func (mg *TCPFarmServer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TCPFarmServer. +func (mg *TCPFarmServer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TCPFarmServer. +func (mg *TCPFarmServer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TCPFarmServer. +func (mg *TCPFarmServer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TCPFarmServer. +func (mg *TCPFarmServer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TCPFarmServer. +func (mg *TCPFarmServer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TCPFarmServer. +func (mg *TCPFarmServer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TCPFarmServer. +func (mg *TCPFarmServer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TCPFrontend. +func (mg *TCPFrontend) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TCPFrontend. +func (mg *TCPFrontend) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TCPFrontend. +func (mg *TCPFrontend) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TCPFrontend. +func (mg *TCPFrontend) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TCPFrontend. +func (mg *TCPFrontend) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TCPFrontend. +func (mg *TCPFrontend) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TCPFrontend. +func (mg *TCPFrontend) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TCPFrontend. +func (mg *TCPFrontend) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TCPFrontend. +func (mg *TCPFrontend) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TCPFrontend. +func (mg *TCPFrontend) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TCPFrontend. +func (mg *TCPFrontend) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TCPFrontend. +func (mg *TCPFrontend) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TCPRoute. +func (mg *TCPRoute) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TCPRoute. +func (mg *TCPRoute) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TCPRoute. +func (mg *TCPRoute) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TCPRoute. +func (mg *TCPRoute) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TCPRoute. +func (mg *TCPRoute) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TCPRoute. +func (mg *TCPRoute) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TCPRoute. +func (mg *TCPRoute) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TCPRoute. +func (mg *TCPRoute) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TCPRoute. +func (mg *TCPRoute) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TCPRoute. +func (mg *TCPRoute) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TCPRoute. +func (mg *TCPRoute) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TCPRoute. +func (mg *TCPRoute) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TCPRouteRule. +func (mg *TCPRouteRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TCPRouteRule. +func (mg *TCPRouteRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TCPRouteRule. +func (mg *TCPRouteRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TCPRouteRule. +func (mg *TCPRouteRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TCPRouteRule. +func (mg *TCPRouteRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TCPRouteRule. +func (mg *TCPRouteRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TCPRouteRule. +func (mg *TCPRouteRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TCPRouteRule. +func (mg *TCPRouteRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TCPRouteRule. +func (mg *TCPRouteRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TCPRouteRule. +func (mg *TCPRouteRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TCPRouteRule. +func (mg *TCPRouteRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TCPRouteRule. +func (mg *TCPRouteRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VrackNetwork. +func (mg *VrackNetwork) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VrackNetwork. +func (mg *VrackNetwork) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VrackNetwork. +func (mg *VrackNetwork) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VrackNetwork. +func (mg *VrackNetwork) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VrackNetwork. +func (mg *VrackNetwork) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VrackNetwork. +func (mg *VrackNetwork) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VrackNetwork. +func (mg *VrackNetwork) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VrackNetwork. +func (mg *VrackNetwork) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VrackNetwork. +func (mg *VrackNetwork) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VrackNetwork. +func (mg *VrackNetwork) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VrackNetwork. +func (mg *VrackNetwork) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VrackNetwork. +func (mg *VrackNetwork) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/lb/v1alpha1/zz_generated.managedlist.go b/apis/lb/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..295d6b9 --- /dev/null +++ b/apis/lb/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,125 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this HTTPFarmList. +func (l *HTTPFarmList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this HTTPFarmServerList. +func (l *HTTPFarmServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this HTTPFrontendList. +func (l *HTTPFrontendList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this HTTPRouteList. +func (l *HTTPRouteList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this HTTPRouteRuleList. +func (l *HTTPRouteRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IploadbalancingList. +func (l *IploadbalancingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RefreshList. +func (l *RefreshList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TCPFarmList. +func (l *TCPFarmList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TCPFarmServerList. +func (l *TCPFarmServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TCPFrontendList. +func (l *TCPFrontendList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TCPRouteList. +func (l *TCPRouteList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TCPRouteRuleList. +func (l *TCPRouteRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VrackNetworkList. +func (l *VrackNetworkList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/lb/v1alpha1/zz_generated_terraformed.go b/apis/lb/v1alpha1/zz_generated_terraformed.go new file mode 100755 index 0000000..49b64b0 --- /dev/null +++ b/apis/lb/v1alpha1/zz_generated_terraformed.go @@ -0,0 +1,1110 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Iploadbalancing +func (mg *Iploadbalancing) GetTerraformResourceType() string { + return "ovh_iploadbalancing" +} + +// GetConnectionDetailsMapping for this Iploadbalancing +func (tr *Iploadbalancing) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"metrics_token": "status.atProvider.metricsToken"} +} + +// GetObservation of this Iploadbalancing +func (tr *Iploadbalancing) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Iploadbalancing +func (tr *Iploadbalancing) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Iploadbalancing +func (tr *Iploadbalancing) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Iploadbalancing +func (tr *Iploadbalancing) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Iploadbalancing +func (tr *Iploadbalancing) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Iploadbalancing +func (tr *Iploadbalancing) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this Iploadbalancing using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Iploadbalancing) LateInitialize(attrs []byte) (bool, error) { + params := &IploadbalancingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Iploadbalancing) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this HTTPFarm +func (mg *HTTPFarm) GetTerraformResourceType() string { + return "ovh_iploadbalancing_http_farm" +} + +// GetConnectionDetailsMapping for this HTTPFarm +func (tr *HTTPFarm) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HTTPFarm +func (tr *HTTPFarm) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HTTPFarm +func (tr *HTTPFarm) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HTTPFarm +func (tr *HTTPFarm) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HTTPFarm +func (tr *HTTPFarm) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HTTPFarm +func (tr *HTTPFarm) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HTTPFarm +func (tr *HTTPFarm) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this HTTPFarm using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HTTPFarm) LateInitialize(attrs []byte) (bool, error) { + params := &HTTPFarmParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HTTPFarm) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this HTTPFarmServer +func (mg *HTTPFarmServer) GetTerraformResourceType() string { + return "ovh_iploadbalancing_http_farm_server" +} + +// GetConnectionDetailsMapping for this HTTPFarmServer +func (tr *HTTPFarmServer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HTTPFarmServer +func (tr *HTTPFarmServer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HTTPFarmServer +func (tr *HTTPFarmServer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HTTPFarmServer +func (tr *HTTPFarmServer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HTTPFarmServer +func (tr *HTTPFarmServer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HTTPFarmServer +func (tr *HTTPFarmServer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HTTPFarmServer +func (tr *HTTPFarmServer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this HTTPFarmServer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HTTPFarmServer) LateInitialize(attrs []byte) (bool, error) { + params := &HTTPFarmServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HTTPFarmServer) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this HTTPFrontend +func (mg *HTTPFrontend) GetTerraformResourceType() string { + return "ovh_iploadbalancing_http_frontend" +} + +// GetConnectionDetailsMapping for this HTTPFrontend +func (tr *HTTPFrontend) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HTTPFrontend +func (tr *HTTPFrontend) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HTTPFrontend +func (tr *HTTPFrontend) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HTTPFrontend +func (tr *HTTPFrontend) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HTTPFrontend +func (tr *HTTPFrontend) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HTTPFrontend +func (tr *HTTPFrontend) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HTTPFrontend +func (tr *HTTPFrontend) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this HTTPFrontend using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HTTPFrontend) LateInitialize(attrs []byte) (bool, error) { + params := &HTTPFrontendParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HTTPFrontend) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this HTTPRoute +func (mg *HTTPRoute) GetTerraformResourceType() string { + return "ovh_iploadbalancing_http_route" +} + +// GetConnectionDetailsMapping for this HTTPRoute +func (tr *HTTPRoute) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HTTPRoute +func (tr *HTTPRoute) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HTTPRoute +func (tr *HTTPRoute) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HTTPRoute +func (tr *HTTPRoute) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HTTPRoute +func (tr *HTTPRoute) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HTTPRoute +func (tr *HTTPRoute) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HTTPRoute +func (tr *HTTPRoute) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this HTTPRoute using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HTTPRoute) LateInitialize(attrs []byte) (bool, error) { + params := &HTTPRouteParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HTTPRoute) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this HTTPRouteRule +func (mg *HTTPRouteRule) GetTerraformResourceType() string { + return "ovh_iploadbalancing_http_route_rule" +} + +// GetConnectionDetailsMapping for this HTTPRouteRule +func (tr *HTTPRouteRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HTTPRouteRule +func (tr *HTTPRouteRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HTTPRouteRule +func (tr *HTTPRouteRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HTTPRouteRule +func (tr *HTTPRouteRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HTTPRouteRule +func (tr *HTTPRouteRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HTTPRouteRule +func (tr *HTTPRouteRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HTTPRouteRule +func (tr *HTTPRouteRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this HTTPRouteRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HTTPRouteRule) LateInitialize(attrs []byte) (bool, error) { + params := &HTTPRouteRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HTTPRouteRule) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this Refresh +func (mg *Refresh) GetTerraformResourceType() string { + return "ovh_iploadbalancing_refresh" +} + +// GetConnectionDetailsMapping for this Refresh +func (tr *Refresh) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Refresh +func (tr *Refresh) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Refresh +func (tr *Refresh) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Refresh +func (tr *Refresh) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Refresh +func (tr *Refresh) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Refresh +func (tr *Refresh) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Refresh +func (tr *Refresh) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this Refresh using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Refresh) LateInitialize(attrs []byte) (bool, error) { + params := &RefreshParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Refresh) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this TCPFarm +func (mg *TCPFarm) GetTerraformResourceType() string { + return "ovh_iploadbalancing_tcp_farm" +} + +// GetConnectionDetailsMapping for this TCPFarm +func (tr *TCPFarm) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TCPFarm +func (tr *TCPFarm) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TCPFarm +func (tr *TCPFarm) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TCPFarm +func (tr *TCPFarm) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TCPFarm +func (tr *TCPFarm) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TCPFarm +func (tr *TCPFarm) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TCPFarm +func (tr *TCPFarm) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this TCPFarm using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TCPFarm) LateInitialize(attrs []byte) (bool, error) { + params := &TCPFarmParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TCPFarm) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this TCPFarmServer +func (mg *TCPFarmServer) GetTerraformResourceType() string { + return "ovh_iploadbalancing_tcp_farm_server" +} + +// GetConnectionDetailsMapping for this TCPFarmServer +func (tr *TCPFarmServer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TCPFarmServer +func (tr *TCPFarmServer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TCPFarmServer +func (tr *TCPFarmServer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TCPFarmServer +func (tr *TCPFarmServer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TCPFarmServer +func (tr *TCPFarmServer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TCPFarmServer +func (tr *TCPFarmServer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TCPFarmServer +func (tr *TCPFarmServer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this TCPFarmServer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TCPFarmServer) LateInitialize(attrs []byte) (bool, error) { + params := &TCPFarmServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TCPFarmServer) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this TCPFrontend +func (mg *TCPFrontend) GetTerraformResourceType() string { + return "ovh_iploadbalancing_tcp_frontend" +} + +// GetConnectionDetailsMapping for this TCPFrontend +func (tr *TCPFrontend) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TCPFrontend +func (tr *TCPFrontend) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TCPFrontend +func (tr *TCPFrontend) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TCPFrontend +func (tr *TCPFrontend) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TCPFrontend +func (tr *TCPFrontend) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TCPFrontend +func (tr *TCPFrontend) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TCPFrontend +func (tr *TCPFrontend) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this TCPFrontend using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TCPFrontend) LateInitialize(attrs []byte) (bool, error) { + params := &TCPFrontendParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TCPFrontend) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this TCPRoute +func (mg *TCPRoute) GetTerraformResourceType() string { + return "ovh_iploadbalancing_tcp_route" +} + +// GetConnectionDetailsMapping for this TCPRoute +func (tr *TCPRoute) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TCPRoute +func (tr *TCPRoute) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TCPRoute +func (tr *TCPRoute) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TCPRoute +func (tr *TCPRoute) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TCPRoute +func (tr *TCPRoute) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TCPRoute +func (tr *TCPRoute) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TCPRoute +func (tr *TCPRoute) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this TCPRoute using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TCPRoute) LateInitialize(attrs []byte) (bool, error) { + params := &TCPRouteParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TCPRoute) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this TCPRouteRule +func (mg *TCPRouteRule) GetTerraformResourceType() string { + return "ovh_iploadbalancing_tcp_route_rule" +} + +// GetConnectionDetailsMapping for this TCPRouteRule +func (tr *TCPRouteRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TCPRouteRule +func (tr *TCPRouteRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TCPRouteRule +func (tr *TCPRouteRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TCPRouteRule +func (tr *TCPRouteRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TCPRouteRule +func (tr *TCPRouteRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TCPRouteRule +func (tr *TCPRouteRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TCPRouteRule +func (tr *TCPRouteRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this TCPRouteRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TCPRouteRule) LateInitialize(attrs []byte) (bool, error) { + params := &TCPRouteRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TCPRouteRule) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this VrackNetwork +func (mg *VrackNetwork) GetTerraformResourceType() string { + return "ovh_iploadbalancing_vrack_network" +} + +// GetConnectionDetailsMapping for this VrackNetwork +func (tr *VrackNetwork) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VrackNetwork +func (tr *VrackNetwork) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VrackNetwork +func (tr *VrackNetwork) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VrackNetwork +func (tr *VrackNetwork) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VrackNetwork +func (tr *VrackNetwork) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VrackNetwork +func (tr *VrackNetwork) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VrackNetwork +func (tr *VrackNetwork) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this VrackNetwork using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VrackNetwork) LateInitialize(attrs []byte) (bool, error) { + params := &VrackNetworkParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VrackNetwork) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lb/v1alpha1/zz_groupversion_info.go b/apis/lb/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..a3d50fd --- /dev/null +++ b/apis/lb/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=lb.ovh.edixos.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "lb.ovh.edixos.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/lb/v1alpha1/zz_httpfarm_types.go b/apis/lb/v1alpha1/zz_httpfarm_types.go new file mode 100755 index 0000000..2df2421 --- /dev/null +++ b/apis/lb/v1alpha1/zz_httpfarm_types.go @@ -0,0 +1,215 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HTTPFarmInitParameters struct { + Balance *string `json:"balance,omitempty" tf:"balance,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Probe []ProbeInitParameters `json:"probe,omitempty" tf:"probe,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Stickiness *string `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + VrackNetworkID *float64 `json:"vrackNetworkId,omitempty" tf:"vrack_network_id,omitempty"` + + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type HTTPFarmObservation struct { + Balance *string `json:"balance,omitempty" tf:"balance,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Probe []ProbeObservation `json:"probe,omitempty" tf:"probe,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Stickiness *string `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + VrackNetworkID *float64 `json:"vrackNetworkId,omitempty" tf:"vrack_network_id,omitempty"` + + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type HTTPFarmParameters struct { + + // +kubebuilder:validation:Optional + Balance *string `json:"balance,omitempty" tf:"balance,omitempty"` + + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // +kubebuilder:validation:Optional + Probe []ProbeParameters `json:"probe,omitempty" tf:"probe,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // +kubebuilder:validation:Optional + Stickiness *string `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // +kubebuilder:validation:Optional + VrackNetworkID *float64 `json:"vrackNetworkId,omitempty" tf:"vrack_network_id,omitempty"` + + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type ProbeInitParameters struct { + ForceSSL *bool `json:"forceSsl,omitempty" tf:"force_ssl,omitempty"` + + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type ProbeObservation struct { + ForceSSL *bool `json:"forceSsl,omitempty" tf:"force_ssl,omitempty"` + + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type ProbeParameters struct { + + // +kubebuilder:validation:Optional + ForceSSL *bool `json:"forceSsl,omitempty" tf:"force_ssl,omitempty"` + + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // +kubebuilder:validation:Optional + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + // +kubebuilder:validation:Optional + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // +kubebuilder:validation:Optional + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + // +kubebuilder:validation:Optional + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +// HTTPFarmSpec defines the desired state of HTTPFarm +type HTTPFarmSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HTTPFarmParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HTTPFarmInitParameters `json:"initProvider,omitempty"` +} + +// HTTPFarmStatus defines the observed state of HTTPFarm. +type HTTPFarmStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HTTPFarmObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPFarm is the Schema for the HTTPFarms API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type HTTPFarm struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.zone) || (has(self.initProvider) && has(self.initProvider.zone))",message="spec.forProvider.zone is a required parameter" + Spec HTTPFarmSpec `json:"spec"` + Status HTTPFarmStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPFarmList contains a list of HTTPFarms +type HTTPFarmList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HTTPFarm `json:"items"` +} + +// Repository type metadata. +var ( + HTTPFarm_Kind = "HTTPFarm" + HTTPFarm_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HTTPFarm_Kind}.String() + HTTPFarm_KindAPIVersion = HTTPFarm_Kind + "." + CRDGroupVersion.String() + HTTPFarm_GroupVersionKind = CRDGroupVersion.WithKind(HTTPFarm_Kind) +) + +func init() { + SchemeBuilder.Register(&HTTPFarm{}, &HTTPFarmList{}) +} diff --git a/apis/lb/v1alpha1/zz_httpfarmserver_types.go b/apis/lb/v1alpha1/zz_httpfarmserver_types.go new file mode 100755 index 0000000..6346b2e --- /dev/null +++ b/apis/lb/v1alpha1/zz_httpfarmserver_types.go @@ -0,0 +1,184 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HTTPFarmServerInitParameters struct { + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + + Chain *string `json:"chain,omitempty" tf:"chain,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + FarmID *float64 `json:"farmId,omitempty" tf:"farm_id,omitempty"` + + OnMarkedDown *string `json:"onMarkedDown,omitempty" tf:"on_marked_down,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Probe *bool `json:"probe,omitempty" tf:"probe,omitempty"` + + ProxyProtocolVersion *string `json:"proxyProtocolVersion,omitempty" tf:"proxy_protocol_version,omitempty"` + + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type HTTPFarmServerObservation struct { + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + + Chain *string `json:"chain,omitempty" tf:"chain,omitempty"` + + Cookie *string `json:"cookie,omitempty" tf:"cookie,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + FarmID *float64 `json:"farmId,omitempty" tf:"farm_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + OnMarkedDown *string `json:"onMarkedDown,omitempty" tf:"on_marked_down,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Probe *bool `json:"probe,omitempty" tf:"probe,omitempty"` + + ProxyProtocolVersion *string `json:"proxyProtocolVersion,omitempty" tf:"proxy_protocol_version,omitempty"` + + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type HTTPFarmServerParameters struct { + + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // +kubebuilder:validation:Optional + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + + // +kubebuilder:validation:Optional + Chain *string `json:"chain,omitempty" tf:"chain,omitempty"` + + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // +kubebuilder:validation:Optional + FarmID *float64 `json:"farmId,omitempty" tf:"farm_id,omitempty"` + + // +kubebuilder:validation:Optional + OnMarkedDown *string `json:"onMarkedDown,omitempty" tf:"on_marked_down,omitempty"` + + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // +kubebuilder:validation:Optional + Probe *bool `json:"probe,omitempty" tf:"probe,omitempty"` + + // +kubebuilder:validation:Optional + ProxyProtocolVersion *string `json:"proxyProtocolVersion,omitempty" tf:"proxy_protocol_version,omitempty"` + + // +kubebuilder:validation:Optional + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +// HTTPFarmServerSpec defines the desired state of HTTPFarmServer +type HTTPFarmServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HTTPFarmServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HTTPFarmServerInitParameters `json:"initProvider,omitempty"` +} + +// HTTPFarmServerStatus defines the observed state of HTTPFarmServer. +type HTTPFarmServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HTTPFarmServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPFarmServer is the Schema for the HTTPFarmServers API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type HTTPFarmServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.address) || (has(self.initProvider) && has(self.initProvider.address))",message="spec.forProvider.address is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.farmId) || (has(self.initProvider) && has(self.initProvider.farmId))",message="spec.forProvider.farmId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.status) || (has(self.initProvider) && has(self.initProvider.status))",message="spec.forProvider.status is a required parameter" + Spec HTTPFarmServerSpec `json:"spec"` + Status HTTPFarmServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPFarmServerList contains a list of HTTPFarmServers +type HTTPFarmServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HTTPFarmServer `json:"items"` +} + +// Repository type metadata. +var ( + HTTPFarmServer_Kind = "HTTPFarmServer" + HTTPFarmServer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HTTPFarmServer_Kind}.String() + HTTPFarmServer_KindAPIVersion = HTTPFarmServer_Kind + "." + CRDGroupVersion.String() + HTTPFarmServer_GroupVersionKind = CRDGroupVersion.WithKind(HTTPFarmServer_Kind) +) + +func init() { + SchemeBuilder.Register(&HTTPFarmServer{}, &HTTPFarmServerList{}) +} diff --git a/apis/lb/v1alpha1/zz_httpfrontend_types.go b/apis/lb/v1alpha1/zz_httpfrontend_types.go new file mode 100755 index 0000000..eb957bd --- /dev/null +++ b/apis/lb/v1alpha1/zz_httpfrontend_types.go @@ -0,0 +1,181 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HTTPFrontendInitParameters struct { + AllowedSource []*string `json:"allowedSource,omitempty" tf:"allowed_source,omitempty"` + + DedicatedIpfo []*string `json:"dedicatedIpfo,omitempty" tf:"dedicated_ipfo,omitempty"` + + DefaultFarmID *float64 `json:"defaultFarmId,omitempty" tf:"default_farm_id,omitempty"` + + DefaultSSLID *float64 `json:"defaultSslId,omitempty" tf:"default_ssl_id,omitempty"` + + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + HTTPHeader []*string `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + Hsts *bool `json:"hsts,omitempty" tf:"hsts,omitempty"` + + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + RedirectLocation *string `json:"redirectLocation,omitempty" tf:"redirect_location,omitempty"` + + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type HTTPFrontendObservation struct { + AllowedSource []*string `json:"allowedSource,omitempty" tf:"allowed_source,omitempty"` + + DedicatedIpfo []*string `json:"dedicatedIpfo,omitempty" tf:"dedicated_ipfo,omitempty"` + + DefaultFarmID *float64 `json:"defaultFarmId,omitempty" tf:"default_farm_id,omitempty"` + + DefaultSSLID *float64 `json:"defaultSslId,omitempty" tf:"default_ssl_id,omitempty"` + + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + HTTPHeader []*string `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + Hsts *bool `json:"hsts,omitempty" tf:"hsts,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + RedirectLocation *string `json:"redirectLocation,omitempty" tf:"redirect_location,omitempty"` + + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type HTTPFrontendParameters struct { + + // +kubebuilder:validation:Optional + AllowedSource []*string `json:"allowedSource,omitempty" tf:"allowed_source,omitempty"` + + // +kubebuilder:validation:Optional + DedicatedIpfo []*string `json:"dedicatedIpfo,omitempty" tf:"dedicated_ipfo,omitempty"` + + // +kubebuilder:validation:Optional + DefaultFarmID *float64 `json:"defaultFarmId,omitempty" tf:"default_farm_id,omitempty"` + + // +kubebuilder:validation:Optional + DefaultSSLID *float64 `json:"defaultSslId,omitempty" tf:"default_ssl_id,omitempty"` + + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // +kubebuilder:validation:Optional + HTTPHeader []*string `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // +kubebuilder:validation:Optional + Hsts *bool `json:"hsts,omitempty" tf:"hsts,omitempty"` + + // +kubebuilder:validation:Optional + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // +kubebuilder:validation:Optional + RedirectLocation *string `json:"redirectLocation,omitempty" tf:"redirect_location,omitempty"` + + // +kubebuilder:validation:Optional + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +// HTTPFrontendSpec defines the desired state of HTTPFrontend +type HTTPFrontendSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HTTPFrontendParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HTTPFrontendInitParameters `json:"initProvider,omitempty"` +} + +// HTTPFrontendStatus defines the observed state of HTTPFrontend. +type HTTPFrontendStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HTTPFrontendObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPFrontend is the Schema for the HTTPFrontends API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type HTTPFrontend struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.port) || (has(self.initProvider) && has(self.initProvider.port))",message="spec.forProvider.port is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.zone) || (has(self.initProvider) && has(self.initProvider.zone))",message="spec.forProvider.zone is a required parameter" + Spec HTTPFrontendSpec `json:"spec"` + Status HTTPFrontendStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPFrontendList contains a list of HTTPFrontends +type HTTPFrontendList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HTTPFrontend `json:"items"` +} + +// Repository type metadata. +var ( + HTTPFrontend_Kind = "HTTPFrontend" + HTTPFrontend_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HTTPFrontend_Kind}.String() + HTTPFrontend_KindAPIVersion = HTTPFrontend_Kind + "." + CRDGroupVersion.String() + HTTPFrontend_GroupVersionKind = CRDGroupVersion.WithKind(HTTPFrontend_Kind) +) + +func init() { + SchemeBuilder.Register(&HTTPFrontend{}, &HTTPFrontendList{}) +} diff --git a/apis/lb/v1alpha1/zz_httproute_types.go b/apis/lb/v1alpha1/zz_httproute_types.go new file mode 100755 index 0000000..d7524d9 --- /dev/null +++ b/apis/lb/v1alpha1/zz_httproute_types.go @@ -0,0 +1,206 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // HTTP status code for "redirect" and "reject" actions + Status *float64 `json:"status,omitempty" tf:"status,omitempty"` + + // Farm ID for "farm" action type or URL template for "redirect" action. You may use ${uri}, ${protocol}, ${host}, ${port} and ${path} variables in redirect target + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Action to trigger if all the rules of this route matches + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ActionObservation struct { + + // HTTP status code for "redirect" and "reject" actions + Status *float64 `json:"status,omitempty" tf:"status,omitempty"` + + // Farm ID for "farm" action type or URL template for "redirect" action. You may use ${uri}, ${protocol}, ${host}, ${port} and ${path} variables in redirect target + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Action to trigger if all the rules of this route matches + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ActionParameters struct { + + // HTTP status code for "redirect" and "reject" actions + // +kubebuilder:validation:Optional + Status *float64 `json:"status,omitempty" tf:"status,omitempty"` + + // Farm ID for "farm" action type or URL template for "redirect" action. You may use ${uri}, ${protocol}, ${host}, ${port} and ${path} variables in redirect target + // +kubebuilder:validation:Optional + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Action to trigger if all the rules of this route matches + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type HTTPRouteInitParameters struct { + + // Action triggered when all rules match + Action []ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Human readable name for your route, this field is for you + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Route traffic for this frontend + FrontendID *float64 `json:"frontendId,omitempty" tf:"frontend_id,omitempty"` + + // The internal name of your IP load balancing + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Route priority ([0..255]). 0 if null. Highest priority routes are evaluated last. Only the first matching route will trigger an action + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type HTTPRouteObservation struct { + + // Action triggered when all rules match + Action []ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Human readable name for your route, this field is for you + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Route traffic for this frontend + FrontendID *float64 `json:"frontendId,omitempty" tf:"frontend_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of rules to match to trigger action + Rules []RulesObservation `json:"rules,omitempty" tf:"rules,omitempty"` + + // The internal name of your IP load balancing + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Route status. Routes in "ok" state are ready to operate + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Route priority ([0..255]). 0 if null. Highest priority routes are evaluated last. Only the first matching route will trigger an action + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type HTTPRouteParameters struct { + + // Action triggered when all rules match + // +kubebuilder:validation:Optional + Action []ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Human readable name for your route, this field is for you + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Route traffic for this frontend + // +kubebuilder:validation:Optional + FrontendID *float64 `json:"frontendId,omitempty" tf:"frontend_id,omitempty"` + + // The internal name of your IP load balancing + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Route priority ([0..255]). 0 if null. Highest priority routes are evaluated last. Only the first matching route will trigger an action + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type RulesInitParameters struct { +} + +type RulesObservation struct { + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + RuleID *float64 `json:"ruleId,omitempty" tf:"rule_id,omitempty"` + + SubField *string `json:"subField,omitempty" tf:"sub_field,omitempty"` +} + +type RulesParameters struct { +} + +// HTTPRouteSpec defines the desired state of HTTPRoute +type HTTPRouteSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HTTPRouteParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HTTPRouteInitParameters `json:"initProvider,omitempty"` +} + +// HTTPRouteStatus defines the observed state of HTTPRoute. +type HTTPRouteStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HTTPRouteObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPRoute is the Schema for the HTTPRoutes API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type HTTPRoute struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.action) || (has(self.initProvider) && has(self.initProvider.action))",message="spec.forProvider.action is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec HTTPRouteSpec `json:"spec"` + Status HTTPRouteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPRouteList contains a list of HTTPRoutes +type HTTPRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HTTPRoute `json:"items"` +} + +// Repository type metadata. +var ( + HTTPRoute_Kind = "HTTPRoute" + HTTPRoute_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HTTPRoute_Kind}.String() + HTTPRoute_KindAPIVersion = HTTPRoute_Kind + "." + CRDGroupVersion.String() + HTTPRoute_GroupVersionKind = CRDGroupVersion.WithKind(HTTPRoute_Kind) +) + +func init() { + SchemeBuilder.Register(&HTTPRoute{}, &HTTPRouteList{}) +} diff --git a/apis/lb/v1alpha1/zz_httprouterule_types.go b/apis/lb/v1alpha1/zz_httprouterule_types.go new file mode 100755 index 0000000..d33861b --- /dev/null +++ b/apis/lb/v1alpha1/zz_httprouterule_types.go @@ -0,0 +1,147 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HTTPRouteRuleInitParameters struct { + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + RouteID *string `json:"routeId,omitempty" tf:"route_id,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + SubField *string `json:"subField,omitempty" tf:"sub_field,omitempty"` +} + +type HTTPRouteRuleObservation struct { + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + RouteID *string `json:"routeId,omitempty" tf:"route_id,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + SubField *string `json:"subField,omitempty" tf:"sub_field,omitempty"` +} + +type HTTPRouteRuleParameters struct { + + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // +kubebuilder:validation:Optional + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // +kubebuilder:validation:Optional + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + // +kubebuilder:validation:Optional + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + // +kubebuilder:validation:Optional + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // +kubebuilder:validation:Optional + RouteID *string `json:"routeId,omitempty" tf:"route_id,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // +kubebuilder:validation:Optional + SubField *string `json:"subField,omitempty" tf:"sub_field,omitempty"` +} + +// HTTPRouteRuleSpec defines the desired state of HTTPRouteRule +type HTTPRouteRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HTTPRouteRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HTTPRouteRuleInitParameters `json:"initProvider,omitempty"` +} + +// HTTPRouteRuleStatus defines the observed state of HTTPRouteRule. +type HTTPRouteRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HTTPRouteRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPRouteRule is the Schema for the HTTPRouteRules API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type HTTPRouteRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.field) || (has(self.initProvider) && has(self.initProvider.field))",message="spec.forProvider.field is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.match) || (has(self.initProvider) && has(self.initProvider.match))",message="spec.forProvider.match is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.routeId) || (has(self.initProvider) && has(self.initProvider.routeId))",message="spec.forProvider.routeId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec HTTPRouteRuleSpec `json:"spec"` + Status HTTPRouteRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HTTPRouteRuleList contains a list of HTTPRouteRules +type HTTPRouteRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HTTPRouteRule `json:"items"` +} + +// Repository type metadata. +var ( + HTTPRouteRule_Kind = "HTTPRouteRule" + HTTPRouteRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HTTPRouteRule_Kind}.String() + HTTPRouteRule_KindAPIVersion = HTTPRouteRule_Kind + "." + CRDGroupVersion.String() + HTTPRouteRule_GroupVersionKind = CRDGroupVersion.WithKind(HTTPRouteRule_Kind) +) + +func init() { + SchemeBuilder.Register(&HTTPRouteRule{}, &HTTPRouteRuleList{}) +} diff --git a/apis/lb/v1alpha1/zz_iploadbalancing_types.go b/apis/lb/v1alpha1/zz_iploadbalancing_types.go new file mode 100755 index 0000000..0c985ee --- /dev/null +++ b/apis/lb/v1alpha1/zz_iploadbalancing_types.go @@ -0,0 +1,406 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationInitParameters struct { + + // Identifier of the resource + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Path to the resource in API.OVH.COM + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ConfigurationObservation struct { + + // Identifier of the resource + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Path to the resource in API.OVH.COM + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ConfigurationParameters struct { + + // Identifier of the resource + // +kubebuilder:validation:Optional + Label *string `json:"label" tf:"label,omitempty"` + + // Path to the resource in API.OVH.COM + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DetailsInitParameters struct { +} + +type DetailsObservation struct { + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + OrderDetailID *float64 `json:"orderDetailId,omitempty" tf:"order_detail_id,omitempty"` + + Quantity *string `json:"quantity,omitempty" tf:"quantity,omitempty"` +} + +type DetailsParameters struct { +} + +type IploadbalancingInitParameters struct { + + // Set the name displayed in ManagerV6 for your iplb (max 50 chars) + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Ovh Subsidiary + OvhSubsidiary *string `json:"ovhSubsidiary,omitempty" tf:"ovh_subsidiary,omitempty"` + + // Ovh payment mode + PaymentMean *string `json:"paymentMean,omitempty" tf:"payment_mean,omitempty"` + + // Product Plan to order + Plan []PlanInitParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Product Plan to order + PlanOption []PlanOptionInitParameters `json:"planOption,omitempty" tf:"plan_option,omitempty"` + + // Modern oldest compatible clients : Firefox 27, Chrome 30, IE 11 on Windows 7, Edge, Opera 17, Safari 9, Android 5.0, and Java 8. Intermediate oldest compatible clients : Firefox 1, Chrome 1, IE 7, Opera 5, Safari 1, Windows XP IE8, Android 2.3, Java 7. Intermediate if null. + SSLConfiguration *string `json:"sslConfiguration,omitempty" tf:"ssl_configuration,omitempty"` +} + +type IploadbalancingObservation struct { + + // Set the name displayed in ManagerV6 for your iplb (max 50 chars) + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Your IP load balancing + IPLoadbalancing *string `json:"ipLoadbalancing,omitempty" tf:"ip_loadbalancing,omitempty"` + + // The IPV4 associated to your IP load balancing + IPv4 *string `json:"ipv4,omitempty" tf:"ipv4,omitempty"` + + // The IPV6 associated to your IP load balancing. DEPRECATED. + IPv6 *string `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // The offer of your IP load balancing + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Details about an Order + Order []OrderObservation `json:"order,omitempty" tf:"order,omitempty"` + + // Available additional zone for your Load Balancer + OrderableZone []OrderableZoneObservation `json:"orderableZone,omitempty" tf:"orderable_zone,omitempty"` + + // Ovh Subsidiary + OvhSubsidiary *string `json:"ovhSubsidiary,omitempty" tf:"ovh_subsidiary,omitempty"` + + // Ovh payment mode + PaymentMean *string `json:"paymentMean,omitempty" tf:"payment_mean,omitempty"` + + // Product Plan to order + Plan []PlanObservation `json:"plan,omitempty" tf:"plan,omitempty"` + + // Product Plan to order + PlanOption []PlanOptionObservation `json:"planOption,omitempty" tf:"plan_option,omitempty"` + + // Modern oldest compatible clients : Firefox 27, Chrome 30, IE 11 on Windows 7, Edge, Opera 17, Safari 9, Android 5.0, and Java 8. Intermediate oldest compatible clients : Firefox 1, Chrome 1, IE 7, Opera 5, Safari 1, Windows XP IE8, Android 2.3, Java 7. Intermediate if null. + SSLConfiguration *string `json:"sslConfiguration,omitempty" tf:"ssl_configuration,omitempty"` + + // The internal name of your IP load balancing + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Current state of your IP + State *string `json:"state,omitempty" tf:"state,omitempty"` + + Urn *string `json:"urn,omitempty" tf:"urn,omitempty"` + + // Vrack eligibility + VrackEligibility *bool `json:"vrackEligibility,omitempty" tf:"vrack_eligibility,omitempty"` + + // Name of the vRack on which the current Load Balancer is attached to, as it is named on vRack product + VrackName *string `json:"vrackName,omitempty" tf:"vrack_name,omitempty"` + + // Location where your service is + Zone []*string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type IploadbalancingParameters struct { + + // Set the name displayed in ManagerV6 for your iplb (max 50 chars) + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Ovh Subsidiary + // +kubebuilder:validation:Optional + OvhSubsidiary *string `json:"ovhSubsidiary,omitempty" tf:"ovh_subsidiary,omitempty"` + + // Ovh payment mode + // +kubebuilder:validation:Optional + PaymentMean *string `json:"paymentMean,omitempty" tf:"payment_mean,omitempty"` + + // Product Plan to order + // +kubebuilder:validation:Optional + Plan []PlanParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Product Plan to order + // +kubebuilder:validation:Optional + PlanOption []PlanOptionParameters `json:"planOption,omitempty" tf:"plan_option,omitempty"` + + // Modern oldest compatible clients : Firefox 27, Chrome 30, IE 11 on Windows 7, Edge, Opera 17, Safari 9, Android 5.0, and Java 8. Intermediate oldest compatible clients : Firefox 1, Chrome 1, IE 7, Opera 5, Safari 1, Windows XP IE8, Android 2.3, Java 7. Intermediate if null. + // +kubebuilder:validation:Optional + SSLConfiguration *string `json:"sslConfiguration,omitempty" tf:"ssl_configuration,omitempty"` +} + +type OrderInitParameters struct { +} + +type OrderObservation struct { + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + Details []DetailsObservation `json:"details,omitempty" tf:"details,omitempty"` + + ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` + + OrderID *float64 `json:"orderId,omitempty" tf:"order_id,omitempty"` +} + +type OrderParameters struct { +} + +type OrderableZoneInitParameters struct { +} + +type OrderableZoneObservation struct { + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + PlanCode *string `json:"planCode,omitempty" tf:"plan_code,omitempty"` +} + +type OrderableZoneParameters struct { +} + +type PlanInitParameters struct { + + // Catalog name + CatalogName *string `json:"catalogName,omitempty" tf:"catalog_name,omitempty"` + + // Representation of a configuration item for personalizing product + Configuration []ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // duration + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Plan code + PlanCode *string `json:"planCode,omitempty" tf:"plan_code,omitempty"` + + // Pricing model identifier + PricingMode *string `json:"pricingMode,omitempty" tf:"pricing_mode,omitempty"` +} + +type PlanObservation struct { + + // Catalog name + CatalogName *string `json:"catalogName,omitempty" tf:"catalog_name,omitempty"` + + // Representation of a configuration item for personalizing product + Configuration []ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // duration + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Plan code + PlanCode *string `json:"planCode,omitempty" tf:"plan_code,omitempty"` + + // Pricing model identifier + PricingMode *string `json:"pricingMode,omitempty" tf:"pricing_mode,omitempty"` +} + +type PlanOptionConfigurationInitParameters struct { + + // Identifier of the resource + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Path to the resource in API.OVH.COM + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PlanOptionConfigurationObservation struct { + + // Identifier of the resource + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Path to the resource in API.OVH.COM + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PlanOptionConfigurationParameters struct { + + // Identifier of the resource + // +kubebuilder:validation:Optional + Label *string `json:"label" tf:"label,omitempty"` + + // Path to the resource in API.OVH.COM + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type PlanOptionInitParameters struct { + + // Catalog name + CatalogName *string `json:"catalogName,omitempty" tf:"catalog_name,omitempty"` + + // Representation of a configuration item for personalizing product + Configuration []PlanOptionConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // duration + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Plan code + PlanCode *string `json:"planCode,omitempty" tf:"plan_code,omitempty"` + + // Pricing model identifier + PricingMode *string `json:"pricingMode,omitempty" tf:"pricing_mode,omitempty"` +} + +type PlanOptionObservation struct { + + // Catalog name + CatalogName *string `json:"catalogName,omitempty" tf:"catalog_name,omitempty"` + + // Representation of a configuration item for personalizing product + Configuration []PlanOptionConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // duration + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Plan code + PlanCode *string `json:"planCode,omitempty" tf:"plan_code,omitempty"` + + // Pricing model identifier + PricingMode *string `json:"pricingMode,omitempty" tf:"pricing_mode,omitempty"` +} + +type PlanOptionParameters struct { + + // Catalog name + // +kubebuilder:validation:Optional + CatalogName *string `json:"catalogName,omitempty" tf:"catalog_name,omitempty"` + + // Representation of a configuration item for personalizing product + // +kubebuilder:validation:Optional + Configuration []PlanOptionConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // duration + // +kubebuilder:validation:Optional + Duration *string `json:"duration" tf:"duration,omitempty"` + + // Plan code + // +kubebuilder:validation:Optional + PlanCode *string `json:"planCode" tf:"plan_code,omitempty"` + + // Pricing model identifier + // +kubebuilder:validation:Optional + PricingMode *string `json:"pricingMode" tf:"pricing_mode,omitempty"` +} + +type PlanParameters struct { + + // Catalog name + // +kubebuilder:validation:Optional + CatalogName *string `json:"catalogName,omitempty" tf:"catalog_name,omitempty"` + + // Representation of a configuration item for personalizing product + // +kubebuilder:validation:Optional + Configuration []ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // duration + // +kubebuilder:validation:Optional + Duration *string `json:"duration" tf:"duration,omitempty"` + + // Plan code + // +kubebuilder:validation:Optional + PlanCode *string `json:"planCode" tf:"plan_code,omitempty"` + + // Pricing model identifier + // +kubebuilder:validation:Optional + PricingMode *string `json:"pricingMode" tf:"pricing_mode,omitempty"` +} + +// IploadbalancingSpec defines the desired state of Iploadbalancing +type IploadbalancingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IploadbalancingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IploadbalancingInitParameters `json:"initProvider,omitempty"` +} + +// IploadbalancingStatus defines the observed state of Iploadbalancing. +type IploadbalancingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IploadbalancingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Iploadbalancing is the Schema for the Iploadbalancings API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type Iploadbalancing struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ovhSubsidiary) || (has(self.initProvider) && has(self.initProvider.ovhSubsidiary))",message="spec.forProvider.ovhSubsidiary is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.plan) || (has(self.initProvider) && has(self.initProvider.plan))",message="spec.forProvider.plan is a required parameter" + Spec IploadbalancingSpec `json:"spec"` + Status IploadbalancingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IploadbalancingList contains a list of Iploadbalancings +type IploadbalancingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Iploadbalancing `json:"items"` +} + +// Repository type metadata. +var ( + Iploadbalancing_Kind = "Iploadbalancing" + Iploadbalancing_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Iploadbalancing_Kind}.String() + Iploadbalancing_KindAPIVersion = Iploadbalancing_Kind + "." + CRDGroupVersion.String() + Iploadbalancing_GroupVersionKind = CRDGroupVersion.WithKind(Iploadbalancing_Kind) +) + +func init() { + SchemeBuilder.Register(&Iploadbalancing{}, &IploadbalancingList{}) +} diff --git a/apis/lb/v1alpha1/zz_refresh_types.go b/apis/lb/v1alpha1/zz_refresh_types.go new file mode 100755 index 0000000..fc9eb13 --- /dev/null +++ b/apis/lb/v1alpha1/zz_refresh_types.go @@ -0,0 +1,103 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RefreshInitParameters struct { + Keepers []*string `json:"keepers,omitempty" tf:"keepers,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type RefreshObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Keepers []*string `json:"keepers,omitempty" tf:"keepers,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type RefreshParameters struct { + + // +kubebuilder:validation:Optional + Keepers []*string `json:"keepers,omitempty" tf:"keepers,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// RefreshSpec defines the desired state of Refresh +type RefreshSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RefreshParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RefreshInitParameters `json:"initProvider,omitempty"` +} + +// RefreshStatus defines the observed state of Refresh. +type RefreshStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RefreshObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Refresh is the Schema for the Refreshs API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type Refresh struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.keepers) || (has(self.initProvider) && has(self.initProvider.keepers))",message="spec.forProvider.keepers is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec RefreshSpec `json:"spec"` + Status RefreshStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RefreshList contains a list of Refreshs +type RefreshList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Refresh `json:"items"` +} + +// Repository type metadata. +var ( + Refresh_Kind = "Refresh" + Refresh_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Refresh_Kind}.String() + Refresh_KindAPIVersion = Refresh_Kind + "." + CRDGroupVersion.String() + Refresh_GroupVersionKind = CRDGroupVersion.WithKind(Refresh_Kind) +) + +func init() { + SchemeBuilder.Register(&Refresh{}, &RefreshList{}) +} diff --git a/apis/lb/v1alpha1/zz_tcpfarm_types.go b/apis/lb/v1alpha1/zz_tcpfarm_types.go new file mode 100755 index 0000000..9814c09 --- /dev/null +++ b/apis/lb/v1alpha1/zz_tcpfarm_types.go @@ -0,0 +1,215 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TCPFarmInitParameters struct { + Balance *string `json:"balance,omitempty" tf:"balance,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Probe []TCPFarmProbeInitParameters `json:"probe,omitempty" tf:"probe,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Stickiness *string `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + VrackNetworkID *float64 `json:"vrackNetworkId,omitempty" tf:"vrack_network_id,omitempty"` + + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type TCPFarmObservation struct { + Balance *string `json:"balance,omitempty" tf:"balance,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Probe []TCPFarmProbeObservation `json:"probe,omitempty" tf:"probe,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Stickiness *string `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + VrackNetworkID *float64 `json:"vrackNetworkId,omitempty" tf:"vrack_network_id,omitempty"` + + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type TCPFarmParameters struct { + + // +kubebuilder:validation:Optional + Balance *string `json:"balance,omitempty" tf:"balance,omitempty"` + + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // +kubebuilder:validation:Optional + Probe []TCPFarmProbeParameters `json:"probe,omitempty" tf:"probe,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // +kubebuilder:validation:Optional + Stickiness *string `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // +kubebuilder:validation:Optional + VrackNetworkID *float64 `json:"vrackNetworkId,omitempty" tf:"vrack_network_id,omitempty"` + + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type TCPFarmProbeInitParameters struct { + ForceSSL *bool `json:"forceSsl,omitempty" tf:"force_ssl,omitempty"` + + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type TCPFarmProbeObservation struct { + ForceSSL *bool `json:"forceSsl,omitempty" tf:"force_ssl,omitempty"` + + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type TCPFarmProbeParameters struct { + + // +kubebuilder:validation:Optional + ForceSSL *bool `json:"forceSsl,omitempty" tf:"force_ssl,omitempty"` + + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // +kubebuilder:validation:Optional + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + // +kubebuilder:validation:Optional + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // +kubebuilder:validation:Optional + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + // +kubebuilder:validation:Optional + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +// TCPFarmSpec defines the desired state of TCPFarm +type TCPFarmSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TCPFarmParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TCPFarmInitParameters `json:"initProvider,omitempty"` +} + +// TCPFarmStatus defines the observed state of TCPFarm. +type TCPFarmStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TCPFarmObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPFarm is the Schema for the TCPFarms API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type TCPFarm struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.zone) || (has(self.initProvider) && has(self.initProvider.zone))",message="spec.forProvider.zone is a required parameter" + Spec TCPFarmSpec `json:"spec"` + Status TCPFarmStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPFarmList contains a list of TCPFarms +type TCPFarmList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TCPFarm `json:"items"` +} + +// Repository type metadata. +var ( + TCPFarm_Kind = "TCPFarm" + TCPFarm_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TCPFarm_Kind}.String() + TCPFarm_KindAPIVersion = TCPFarm_Kind + "." + CRDGroupVersion.String() + TCPFarm_GroupVersionKind = CRDGroupVersion.WithKind(TCPFarm_Kind) +) + +func init() { + SchemeBuilder.Register(&TCPFarm{}, &TCPFarmList{}) +} diff --git a/apis/lb/v1alpha1/zz_tcpfarmserver_types.go b/apis/lb/v1alpha1/zz_tcpfarmserver_types.go new file mode 100755 index 0000000..97a8cdd --- /dev/null +++ b/apis/lb/v1alpha1/zz_tcpfarmserver_types.go @@ -0,0 +1,182 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TCPFarmServerInitParameters struct { + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + + Chain *string `json:"chain,omitempty" tf:"chain,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + FarmID *float64 `json:"farmId,omitempty" tf:"farm_id,omitempty"` + + OnMarkedDown *string `json:"onMarkedDown,omitempty" tf:"on_marked_down,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Probe *bool `json:"probe,omitempty" tf:"probe,omitempty"` + + ProxyProtocolVersion *string `json:"proxyProtocolVersion,omitempty" tf:"proxy_protocol_version,omitempty"` + + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type TCPFarmServerObservation struct { + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + + Chain *string `json:"chain,omitempty" tf:"chain,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + FarmID *float64 `json:"farmId,omitempty" tf:"farm_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + OnMarkedDown *string `json:"onMarkedDown,omitempty" tf:"on_marked_down,omitempty"` + + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + Probe *bool `json:"probe,omitempty" tf:"probe,omitempty"` + + ProxyProtocolVersion *string `json:"proxyProtocolVersion,omitempty" tf:"proxy_protocol_version,omitempty"` + + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type TCPFarmServerParameters struct { + + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // +kubebuilder:validation:Optional + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + + // +kubebuilder:validation:Optional + Chain *string `json:"chain,omitempty" tf:"chain,omitempty"` + + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // +kubebuilder:validation:Optional + FarmID *float64 `json:"farmId,omitempty" tf:"farm_id,omitempty"` + + // +kubebuilder:validation:Optional + OnMarkedDown *string `json:"onMarkedDown,omitempty" tf:"on_marked_down,omitempty"` + + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // +kubebuilder:validation:Optional + Probe *bool `json:"probe,omitempty" tf:"probe,omitempty"` + + // +kubebuilder:validation:Optional + ProxyProtocolVersion *string `json:"proxyProtocolVersion,omitempty" tf:"proxy_protocol_version,omitempty"` + + // +kubebuilder:validation:Optional + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +// TCPFarmServerSpec defines the desired state of TCPFarmServer +type TCPFarmServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TCPFarmServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TCPFarmServerInitParameters `json:"initProvider,omitempty"` +} + +// TCPFarmServerStatus defines the observed state of TCPFarmServer. +type TCPFarmServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TCPFarmServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPFarmServer is the Schema for the TCPFarmServers API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type TCPFarmServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.address) || (has(self.initProvider) && has(self.initProvider.address))",message="spec.forProvider.address is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.farmId) || (has(self.initProvider) && has(self.initProvider.farmId))",message="spec.forProvider.farmId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.status) || (has(self.initProvider) && has(self.initProvider.status))",message="spec.forProvider.status is a required parameter" + Spec TCPFarmServerSpec `json:"spec"` + Status TCPFarmServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPFarmServerList contains a list of TCPFarmServers +type TCPFarmServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TCPFarmServer `json:"items"` +} + +// Repository type metadata. +var ( + TCPFarmServer_Kind = "TCPFarmServer" + TCPFarmServer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TCPFarmServer_Kind}.String() + TCPFarmServer_KindAPIVersion = TCPFarmServer_Kind + "." + CRDGroupVersion.String() + TCPFarmServer_GroupVersionKind = CRDGroupVersion.WithKind(TCPFarmServer_Kind) +) + +func init() { + SchemeBuilder.Register(&TCPFarmServer{}, &TCPFarmServerList{}) +} diff --git a/apis/lb/v1alpha1/zz_tcpfrontend_types.go b/apis/lb/v1alpha1/zz_tcpfrontend_types.go new file mode 100755 index 0000000..eea84b2 --- /dev/null +++ b/apis/lb/v1alpha1/zz_tcpfrontend_types.go @@ -0,0 +1,160 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TCPFrontendInitParameters struct { + AllowedSource []*string `json:"allowedSource,omitempty" tf:"allowed_source,omitempty"` + + DedicatedIpfo []*string `json:"dedicatedIpfo,omitempty" tf:"dedicated_ipfo,omitempty"` + + DefaultFarmID *float64 `json:"defaultFarmId,omitempty" tf:"default_farm_id,omitempty"` + + DefaultSSLID *float64 `json:"defaultSslId,omitempty" tf:"default_ssl_id,omitempty"` + + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type TCPFrontendObservation struct { + AllowedSource []*string `json:"allowedSource,omitempty" tf:"allowed_source,omitempty"` + + DedicatedIpfo []*string `json:"dedicatedIpfo,omitempty" tf:"dedicated_ipfo,omitempty"` + + DefaultFarmID *float64 `json:"defaultFarmId,omitempty" tf:"default_farm_id,omitempty"` + + DefaultSSLID *float64 `json:"defaultSslId,omitempty" tf:"default_ssl_id,omitempty"` + + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type TCPFrontendParameters struct { + + // +kubebuilder:validation:Optional + AllowedSource []*string `json:"allowedSource,omitempty" tf:"allowed_source,omitempty"` + + // +kubebuilder:validation:Optional + DedicatedIpfo []*string `json:"dedicatedIpfo,omitempty" tf:"dedicated_ipfo,omitempty"` + + // +kubebuilder:validation:Optional + DefaultFarmID *float64 `json:"defaultFarmId,omitempty" tf:"default_farm_id,omitempty"` + + // +kubebuilder:validation:Optional + DefaultSSLID *float64 `json:"defaultSslId,omitempty" tf:"default_ssl_id,omitempty"` + + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // +kubebuilder:validation:Optional + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // +kubebuilder:validation:Optional + SSL *bool `json:"ssl,omitempty" tf:"ssl,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +// TCPFrontendSpec defines the desired state of TCPFrontend +type TCPFrontendSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TCPFrontendParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TCPFrontendInitParameters `json:"initProvider,omitempty"` +} + +// TCPFrontendStatus defines the observed state of TCPFrontend. +type TCPFrontendStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TCPFrontendObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPFrontend is the Schema for the TCPFrontends API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type TCPFrontend struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.port) || (has(self.initProvider) && has(self.initProvider.port))",message="spec.forProvider.port is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.zone) || (has(self.initProvider) && has(self.initProvider.zone))",message="spec.forProvider.zone is a required parameter" + Spec TCPFrontendSpec `json:"spec"` + Status TCPFrontendStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPFrontendList contains a list of TCPFrontends +type TCPFrontendList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TCPFrontend `json:"items"` +} + +// Repository type metadata. +var ( + TCPFrontend_Kind = "TCPFrontend" + TCPFrontend_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TCPFrontend_Kind}.String() + TCPFrontend_KindAPIVersion = TCPFrontend_Kind + "." + CRDGroupVersion.String() + TCPFrontend_GroupVersionKind = CRDGroupVersion.WithKind(TCPFrontend_Kind) +) + +func init() { + SchemeBuilder.Register(&TCPFrontend{}, &TCPFrontendList{}) +} diff --git a/apis/lb/v1alpha1/zz_tcproute_types.go b/apis/lb/v1alpha1/zz_tcproute_types.go new file mode 100755 index 0000000..16e08dd --- /dev/null +++ b/apis/lb/v1alpha1/zz_tcproute_types.go @@ -0,0 +1,196 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TCPRouteActionInitParameters struct { + + // Farm ID for "farm" action type, empty for others + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Action to trigger if all the rules of this route matches + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TCPRouteActionObservation struct { + + // Farm ID for "farm" action type, empty for others + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Action to trigger if all the rules of this route matches + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TCPRouteActionParameters struct { + + // Farm ID for "farm" action type, empty for others + // +kubebuilder:validation:Optional + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Action to trigger if all the rules of this route matches + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type TCPRouteInitParameters struct { + + // Action triggered when all rules match + Action []TCPRouteActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Human readable name for your route, this field is for you + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Route traffic for this frontend + FrontendID *float64 `json:"frontendId,omitempty" tf:"frontend_id,omitempty"` + + // The internal name of your IP load balancing + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Route priority ([0..255]). 0 if null. Highest priority routes are evaluated last. Only the first matching route will trigger an action + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type TCPRouteObservation struct { + + // Action triggered when all rules match + Action []TCPRouteActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Human readable name for your route, this field is for you + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Route traffic for this frontend + FrontendID *float64 `json:"frontendId,omitempty" tf:"frontend_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of rules to match to trigger action + Rules []TCPRouteRulesObservation `json:"rules,omitempty" tf:"rules,omitempty"` + + // The internal name of your IP load balancing + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Route status. Routes in "ok" state are ready to operate + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Route priority ([0..255]). 0 if null. Highest priority routes are evaluated last. Only the first matching route will trigger an action + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type TCPRouteParameters struct { + + // Action triggered when all rules match + // +kubebuilder:validation:Optional + Action []TCPRouteActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Human readable name for your route, this field is for you + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Route traffic for this frontend + // +kubebuilder:validation:Optional + FrontendID *float64 `json:"frontendId,omitempty" tf:"frontend_id,omitempty"` + + // The internal name of your IP load balancing + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Route priority ([0..255]). 0 if null. Highest priority routes are evaluated last. Only the first matching route will trigger an action + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type TCPRouteRulesInitParameters struct { +} + +type TCPRouteRulesObservation struct { + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + RuleID *float64 `json:"ruleId,omitempty" tf:"rule_id,omitempty"` + + SubField *string `json:"subField,omitempty" tf:"sub_field,omitempty"` +} + +type TCPRouteRulesParameters struct { +} + +// TCPRouteSpec defines the desired state of TCPRoute +type TCPRouteSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TCPRouteParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TCPRouteInitParameters `json:"initProvider,omitempty"` +} + +// TCPRouteStatus defines the observed state of TCPRoute. +type TCPRouteStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TCPRouteObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPRoute is the Schema for the TCPRoutes API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type TCPRoute struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.action) || (has(self.initProvider) && has(self.initProvider.action))",message="spec.forProvider.action is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec TCPRouteSpec `json:"spec"` + Status TCPRouteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPRouteList contains a list of TCPRoutes +type TCPRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TCPRoute `json:"items"` +} + +// Repository type metadata. +var ( + TCPRoute_Kind = "TCPRoute" + TCPRoute_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TCPRoute_Kind}.String() + TCPRoute_KindAPIVersion = TCPRoute_Kind + "." + CRDGroupVersion.String() + TCPRoute_GroupVersionKind = CRDGroupVersion.WithKind(TCPRoute_Kind) +) + +func init() { + SchemeBuilder.Register(&TCPRoute{}, &TCPRouteList{}) +} diff --git a/apis/lb/v1alpha1/zz_tcprouterule_types.go b/apis/lb/v1alpha1/zz_tcprouterule_types.go new file mode 100755 index 0000000..0cc2fa0 --- /dev/null +++ b/apis/lb/v1alpha1/zz_tcprouterule_types.go @@ -0,0 +1,147 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TCPRouteRuleInitParameters struct { + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + RouteID *string `json:"routeId,omitempty" tf:"route_id,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + SubField *string `json:"subField,omitempty" tf:"sub_field,omitempty"` +} + +type TCPRouteRuleObservation struct { + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + RouteID *string `json:"routeId,omitempty" tf:"route_id,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + SubField *string `json:"subField,omitempty" tf:"sub_field,omitempty"` +} + +type TCPRouteRuleParameters struct { + + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // +kubebuilder:validation:Optional + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // +kubebuilder:validation:Optional + Match *string `json:"match,omitempty" tf:"match,omitempty"` + + // +kubebuilder:validation:Optional + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + // +kubebuilder:validation:Optional + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // +kubebuilder:validation:Optional + RouteID *string `json:"routeId,omitempty" tf:"route_id,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // +kubebuilder:validation:Optional + SubField *string `json:"subField,omitempty" tf:"sub_field,omitempty"` +} + +// TCPRouteRuleSpec defines the desired state of TCPRouteRule +type TCPRouteRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TCPRouteRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TCPRouteRuleInitParameters `json:"initProvider,omitempty"` +} + +// TCPRouteRuleStatus defines the observed state of TCPRouteRule. +type TCPRouteRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TCPRouteRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPRouteRule is the Schema for the TCPRouteRules API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type TCPRouteRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.field) || (has(self.initProvider) && has(self.initProvider.field))",message="spec.forProvider.field is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.match) || (has(self.initProvider) && has(self.initProvider.match))",message="spec.forProvider.match is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.routeId) || (has(self.initProvider) && has(self.initProvider.routeId))",message="spec.forProvider.routeId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec TCPRouteRuleSpec `json:"spec"` + Status TCPRouteRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TCPRouteRuleList contains a list of TCPRouteRules +type TCPRouteRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TCPRouteRule `json:"items"` +} + +// Repository type metadata. +var ( + TCPRouteRule_Kind = "TCPRouteRule" + TCPRouteRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TCPRouteRule_Kind}.String() + TCPRouteRule_KindAPIVersion = TCPRouteRule_Kind + "." + CRDGroupVersion.String() + TCPRouteRule_GroupVersionKind = CRDGroupVersion.WithKind(TCPRouteRule_Kind) +) + +func init() { + SchemeBuilder.Register(&TCPRouteRule{}, &TCPRouteRuleList{}) +} diff --git a/apis/lb/v1alpha1/zz_vracknetwork_types.go b/apis/lb/v1alpha1/zz_vracknetwork_types.go new file mode 100755 index 0000000..723be40 --- /dev/null +++ b/apis/lb/v1alpha1/zz_vracknetwork_types.go @@ -0,0 +1,155 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type VrackNetworkInitParameters struct { + + // Human readable name for your vrack network + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // This attribute is there for documentation purpose only and isnt passed to the OVH API as it may conflicts with http/tcp farms `vrack_network_id` attribute + FarmID []*float64 `json:"farmId,omitempty" tf:"farm_id,omitempty"` + + // An IP block used as a pool of IPs by this Load Balancer to connect to the servers in this private network. The blck must be in the private network and reserved for the Load Balancer + NATIP *string `json:"natIp,omitempty" tf:"nat_ip,omitempty"` + + // The internal name of your IPloadbalancer + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // IP block of the private network in the vRack + Subnet *string `json:"subnet,omitempty" tf:"subnet,omitempty"` + + // VLAN of the private network in the vRack. 0 if the private network is not in a VLAN + Vlan *float64 `json:"vlan,omitempty" tf:"vlan,omitempty"` +} + +type VrackNetworkObservation struct { + + // Human readable name for your vrack network + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // This attribute is there for documentation purpose only and isnt passed to the OVH API as it may conflicts with http/tcp farms `vrack_network_id` attribute + FarmID []*float64 `json:"farmId,omitempty" tf:"farm_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An IP block used as a pool of IPs by this Load Balancer to connect to the servers in this private network. The blck must be in the private network and reserved for the Load Balancer + NATIP *string `json:"natIp,omitempty" tf:"nat_ip,omitempty"` + + // The internal name of your IPloadbalancer + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // IP block of the private network in the vRack + Subnet *string `json:"subnet,omitempty" tf:"subnet,omitempty"` + + // VLAN of the private network in the vRack. 0 if the private network is not in a VLAN + Vlan *float64 `json:"vlan,omitempty" tf:"vlan,omitempty"` + + // Internal Load Balancer identifier of the vRack private network + VrackNetworkID *float64 `json:"vrackNetworkId,omitempty" tf:"vrack_network_id,omitempty"` +} + +type VrackNetworkParameters struct { + + // Human readable name for your vrack network + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // This attribute is there for documentation purpose only and isnt passed to the OVH API as it may conflicts with http/tcp farms `vrack_network_id` attribute + // +kubebuilder:validation:Optional + FarmID []*float64 `json:"farmId,omitempty" tf:"farm_id,omitempty"` + + // An IP block used as a pool of IPs by this Load Balancer to connect to the servers in this private network. The blck must be in the private network and reserved for the Load Balancer + // +kubebuilder:validation:Optional + NATIP *string `json:"natIp,omitempty" tf:"nat_ip,omitempty"` + + // The internal name of your IPloadbalancer + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // IP block of the private network in the vRack + // +kubebuilder:validation:Optional + Subnet *string `json:"subnet,omitempty" tf:"subnet,omitempty"` + + // VLAN of the private network in the vRack. 0 if the private network is not in a VLAN + // +kubebuilder:validation:Optional + Vlan *float64 `json:"vlan,omitempty" tf:"vlan,omitempty"` +} + +// VrackNetworkSpec defines the desired state of VrackNetwork +type VrackNetworkSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VrackNetworkParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VrackNetworkInitParameters `json:"initProvider,omitempty"` +} + +// VrackNetworkStatus defines the observed state of VrackNetwork. +type VrackNetworkStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VrackNetworkObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// VrackNetwork is the Schema for the VrackNetworks API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type VrackNetwork struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.natIp) || (has(self.initProvider) && has(self.initProvider.natIp))",message="spec.forProvider.natIp is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.subnet) || (has(self.initProvider) && has(self.initProvider.subnet))",message="spec.forProvider.subnet is a required parameter" + Spec VrackNetworkSpec `json:"spec"` + Status VrackNetworkStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VrackNetworkList contains a list of VrackNetworks +type VrackNetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VrackNetwork `json:"items"` +} + +// Repository type metadata. +var ( + VrackNetwork_Kind = "VrackNetwork" + VrackNetwork_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VrackNetwork_Kind}.String() + VrackNetwork_KindAPIVersion = VrackNetwork_Kind + "." + CRDGroupVersion.String() + VrackNetwork_GroupVersionKind = CRDGroupVersion.WithKind(VrackNetwork_Kind) +) + +func init() { + SchemeBuilder.Register(&VrackNetwork{}, &VrackNetworkList{}) +} diff --git a/apis/logs/v1alpha1/zz_generated.deepcopy.go b/apis/logs/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..fdae8a8 --- /dev/null +++ b/apis/logs/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,886 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.Flowgger != nil { + in, out := &in.Flowgger, &out.Flowgger + *out = make([]FlowggerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logstash != nil { + in, out := &in.Logstash, &out.Logstash + *out = make([]LogstashInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.Flowgger != nil { + in, out := &in.Flowgger, &out.Flowgger + *out = make([]FlowggerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logstash != nil { + in, out := &in.Logstash, &out.Logstash + *out = make([]LogstashObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.Flowgger != nil { + in, out := &in.Flowgger, &out.Flowgger + *out = make([]FlowggerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logstash != nil { + in, out := &in.Logstash, &out.Logstash + *out = make([]LogstashParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowggerInitParameters) DeepCopyInto(out *FlowggerInitParameters) { + *out = *in + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(string) + **out = **in + } + if in.LogFraming != nil { + in, out := &in.LogFraming, &out.LogFraming + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowggerInitParameters. +func (in *FlowggerInitParameters) DeepCopy() *FlowggerInitParameters { + if in == nil { + return nil + } + out := new(FlowggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowggerObservation) DeepCopyInto(out *FlowggerObservation) { + *out = *in + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(string) + **out = **in + } + if in.LogFraming != nil { + in, out := &in.LogFraming, &out.LogFraming + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowggerObservation. +func (in *FlowggerObservation) DeepCopy() *FlowggerObservation { + if in == nil { + return nil + } + out := new(FlowggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowggerParameters) DeepCopyInto(out *FlowggerParameters) { + *out = *in + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(string) + **out = **in + } + if in.LogFraming != nil { + in, out := &in.LogFraming, &out.LogFraming + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowggerParameters. +func (in *FlowggerParameters) DeepCopy() *FlowggerParameters { + if in == nil { + return nil + } + out := new(FlowggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsCluster) DeepCopyInto(out *LogsCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsCluster. +func (in *LogsCluster) DeepCopy() *LogsCluster { + if in == nil { + return nil + } + out := new(LogsCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LogsCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsClusterInitParameters) DeepCopyInto(out *LogsClusterInitParameters) { + *out = *in + if in.ArchiveAllowedNetworks != nil { + in, out := &in.ArchiveAllowedNetworks, &out.ArchiveAllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DirectInputAllowedNetworks != nil { + in, out := &in.DirectInputAllowedNetworks, &out.DirectInputAllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryAllowedNetworks != nil { + in, out := &in.QueryAllowedNetworks, &out.QueryAllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsClusterInitParameters. +func (in *LogsClusterInitParameters) DeepCopy() *LogsClusterInitParameters { + if in == nil { + return nil + } + out := new(LogsClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsClusterList) DeepCopyInto(out *LogsClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LogsCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsClusterList. +func (in *LogsClusterList) DeepCopy() *LogsClusterList { + if in == nil { + return nil + } + out := new(LogsClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LogsClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsClusterObservation) DeepCopyInto(out *LogsClusterObservation) { + *out = *in + if in.ArchiveAllowedNetworks != nil { + in, out := &in.ArchiveAllowedNetworks, &out.ArchiveAllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterType != nil { + in, out := &in.ClusterType, &out.ClusterType + *out = new(string) + **out = **in + } + if in.DirectInputAllowedNetworks != nil { + in, out := &in.DirectInputAllowedNetworks, &out.DirectInputAllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.IsUnlocked != nil { + in, out := &in.IsUnlocked, &out.IsUnlocked + *out = new(bool) + **out = **in + } + if in.QueryAllowedNetworks != nil { + in, out := &in.QueryAllowedNetworks, &out.QueryAllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsClusterObservation. +func (in *LogsClusterObservation) DeepCopy() *LogsClusterObservation { + if in == nil { + return nil + } + out := new(LogsClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsClusterParameters) DeepCopyInto(out *LogsClusterParameters) { + *out = *in + if in.ArchiveAllowedNetworks != nil { + in, out := &in.ArchiveAllowedNetworks, &out.ArchiveAllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DirectInputAllowedNetworks != nil { + in, out := &in.DirectInputAllowedNetworks, &out.DirectInputAllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryAllowedNetworks != nil { + in, out := &in.QueryAllowedNetworks, &out.QueryAllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsClusterParameters. +func (in *LogsClusterParameters) DeepCopy() *LogsClusterParameters { + if in == nil { + return nil + } + out := new(LogsClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsClusterSpec) DeepCopyInto(out *LogsClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsClusterSpec. +func (in *LogsClusterSpec) DeepCopy() *LogsClusterSpec { + if in == nil { + return nil + } + out := new(LogsClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsClusterStatus) DeepCopyInto(out *LogsClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsClusterStatus. +func (in *LogsClusterStatus) DeepCopy() *LogsClusterStatus { + if in == nil { + return nil + } + out := new(LogsClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInput) DeepCopyInto(out *LogsInput) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInput. +func (in *LogsInput) DeepCopy() *LogsInput { + if in == nil { + return nil + } + out := new(LogsInput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LogsInput) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInputInitParameters) DeepCopyInto(out *LogsInputInitParameters) { + *out = *in + if in.AllowedNetworks != nil { + in, out := &in.AllowedNetworks, &out.AllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]ConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EngineID != nil { + in, out := &in.EngineID, &out.EngineID + *out = new(string) + **out = **in + } + if in.ExposedPort != nil { + in, out := &in.ExposedPort, &out.ExposedPort + *out = new(string) + **out = **in + } + if in.NbInstance != nil { + in, out := &in.NbInstance, &out.NbInstance + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.StreamID != nil { + in, out := &in.StreamID, &out.StreamID + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInputInitParameters. +func (in *LogsInputInitParameters) DeepCopy() *LogsInputInitParameters { + if in == nil { + return nil + } + out := new(LogsInputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInputList) DeepCopyInto(out *LogsInputList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LogsInput, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInputList. +func (in *LogsInputList) DeepCopy() *LogsInputList { + if in == nil { + return nil + } + out := new(LogsInputList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LogsInputList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInputObservation) DeepCopyInto(out *LogsInputObservation) { + *out = *in + if in.AllowedNetworks != nil { + in, out := &in.AllowedNetworks, &out.AllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]ConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EngineID != nil { + in, out := &in.EngineID, &out.EngineID + *out = new(string) + **out = **in + } + if in.ExposedPort != nil { + in, out := &in.ExposedPort, &out.ExposedPort + *out = new(string) + **out = **in + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InputID != nil { + in, out := &in.InputID, &out.InputID + *out = new(string) + **out = **in + } + if in.IsRestartRequired != nil { + in, out := &in.IsRestartRequired, &out.IsRestartRequired + *out = new(bool) + **out = **in + } + if in.NbInstance != nil { + in, out := &in.NbInstance, &out.NbInstance + *out = new(float64) + **out = **in + } + if in.PublicAddress != nil { + in, out := &in.PublicAddress, &out.PublicAddress + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.StreamID != nil { + in, out := &in.StreamID, &out.StreamID + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInputObservation. +func (in *LogsInputObservation) DeepCopy() *LogsInputObservation { + if in == nil { + return nil + } + out := new(LogsInputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInputParameters) DeepCopyInto(out *LogsInputParameters) { + *out = *in + if in.AllowedNetworks != nil { + in, out := &in.AllowedNetworks, &out.AllowedNetworks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]ConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EngineID != nil { + in, out := &in.EngineID, &out.EngineID + *out = new(string) + **out = **in + } + if in.ExposedPort != nil { + in, out := &in.ExposedPort, &out.ExposedPort + *out = new(string) + **out = **in + } + if in.NbInstance != nil { + in, out := &in.NbInstance, &out.NbInstance + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.StreamID != nil { + in, out := &in.StreamID, &out.StreamID + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInputParameters. +func (in *LogsInputParameters) DeepCopy() *LogsInputParameters { + if in == nil { + return nil + } + out := new(LogsInputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInputSpec) DeepCopyInto(out *LogsInputSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInputSpec. +func (in *LogsInputSpec) DeepCopy() *LogsInputSpec { + if in == nil { + return nil + } + out := new(LogsInputSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInputStatus) DeepCopyInto(out *LogsInputStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInputStatus. +func (in *LogsInputStatus) DeepCopy() *LogsInputStatus { + if in == nil { + return nil + } + out := new(LogsInputStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogstashInitParameters) DeepCopyInto(out *LogstashInitParameters) { + *out = *in + if in.FilterSection != nil { + in, out := &in.FilterSection, &out.FilterSection + *out = new(string) + **out = **in + } + if in.InputSection != nil { + in, out := &in.InputSection, &out.InputSection + *out = new(string) + **out = **in + } + if in.PatternSection != nil { + in, out := &in.PatternSection, &out.PatternSection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogstashInitParameters. +func (in *LogstashInitParameters) DeepCopy() *LogstashInitParameters { + if in == nil { + return nil + } + out := new(LogstashInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogstashObservation) DeepCopyInto(out *LogstashObservation) { + *out = *in + if in.FilterSection != nil { + in, out := &in.FilterSection, &out.FilterSection + *out = new(string) + **out = **in + } + if in.InputSection != nil { + in, out := &in.InputSection, &out.InputSection + *out = new(string) + **out = **in + } + if in.PatternSection != nil { + in, out := &in.PatternSection, &out.PatternSection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogstashObservation. +func (in *LogstashObservation) DeepCopy() *LogstashObservation { + if in == nil { + return nil + } + out := new(LogstashObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogstashParameters) DeepCopyInto(out *LogstashParameters) { + *out = *in + if in.FilterSection != nil { + in, out := &in.FilterSection, &out.FilterSection + *out = new(string) + **out = **in + } + if in.InputSection != nil { + in, out := &in.InputSection, &out.InputSection + *out = new(string) + **out = **in + } + if in.PatternSection != nil { + in, out := &in.PatternSection, &out.PatternSection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogstashParameters. +func (in *LogstashParameters) DeepCopy() *LogstashParameters { + if in == nil { + return nil + } + out := new(LogstashParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/logs/v1alpha1/zz_generated.managed.go b/apis/logs/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..42ce166 --- /dev/null +++ b/apis/logs/v1alpha1/zz_generated.managed.go @@ -0,0 +1,128 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this LogsCluster. +func (mg *LogsCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LogsCluster. +func (mg *LogsCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LogsCluster. +func (mg *LogsCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LogsCluster. +func (mg *LogsCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LogsCluster. +func (mg *LogsCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LogsCluster. +func (mg *LogsCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LogsCluster. +func (mg *LogsCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LogsCluster. +func (mg *LogsCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LogsCluster. +func (mg *LogsCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LogsCluster. +func (mg *LogsCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LogsCluster. +func (mg *LogsCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LogsCluster. +func (mg *LogsCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LogsInput. +func (mg *LogsInput) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LogsInput. +func (mg *LogsInput) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LogsInput. +func (mg *LogsInput) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LogsInput. +func (mg *LogsInput) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LogsInput. +func (mg *LogsInput) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LogsInput. +func (mg *LogsInput) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LogsInput. +func (mg *LogsInput) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LogsInput. +func (mg *LogsInput) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LogsInput. +func (mg *LogsInput) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LogsInput. +func (mg *LogsInput) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LogsInput. +func (mg *LogsInput) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LogsInput. +func (mg *LogsInput) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/logs/v1alpha1/zz_generated.managedlist.go b/apis/logs/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..ea86014 --- /dev/null +++ b/apis/logs/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +/* +Copyright 2022 Upbound Inc. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this LogsClusterList. +func (l *LogsClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LogsInputList. +func (l *LogsInputList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/logs/v1alpha1/zz_generated_terraformed.go b/apis/logs/v1alpha1/zz_generated_terraformed.go new file mode 100755 index 0000000..8f94e7c --- /dev/null +++ b/apis/logs/v1alpha1/zz_generated_terraformed.go @@ -0,0 +1,186 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LogsCluster +func (mg *LogsCluster) GetTerraformResourceType() string { + return "ovh_dbaas_logs_cluster" +} + +// GetConnectionDetailsMapping for this LogsCluster +func (tr *LogsCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"dedicated_input_pem": "status.atProvider.dedicatedInputPem", "direct_input_pem": "status.atProvider.directInputPem", "initial_archive_allowed_networks[*]": "status.atProvider.initialArchiveAllowedNetworks[*]", "initial_direct_input_allowed_networks[*]": "status.atProvider.initialDirectInputAllowedNetworks[*]", "initial_query_allowed_networks[*]": "status.atProvider.initialQueryAllowedNetworks[*]"} +} + +// GetObservation of this LogsCluster +func (tr *LogsCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LogsCluster +func (tr *LogsCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LogsCluster +func (tr *LogsCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LogsCluster +func (tr *LogsCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LogsCluster +func (tr *LogsCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LogsCluster +func (tr *LogsCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this LogsCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LogsCluster) LateInitialize(attrs []byte) (bool, error) { + params := &LogsClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LogsCluster) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this LogsInput +func (mg *LogsInput) GetTerraformResourceType() string { + return "ovh_dbaas_logs_input" +} + +// GetConnectionDetailsMapping for this LogsInput +func (tr *LogsInput) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"ssl_certificate": "status.atProvider.sslCertificate"} +} + +// GetObservation of this LogsInput +func (tr *LogsInput) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LogsInput +func (tr *LogsInput) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LogsInput +func (tr *LogsInput) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LogsInput +func (tr *LogsInput) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LogsInput +func (tr *LogsInput) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LogsInput +func (tr *LogsInput) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// LateInitialize this LogsInput using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LogsInput) LateInitialize(attrs []byte) (bool, error) { + params := &LogsInputParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LogsInput) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/logs/v1alpha1/zz_groupversion_info.go b/apis/logs/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..17e1a72 --- /dev/null +++ b/apis/logs/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=logs.ovh.edixos.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "logs.ovh.edixos.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/logs/v1alpha1/zz_logscluster_types.go b/apis/logs/v1alpha1/zz_logscluster_types.go new file mode 100755 index 0000000..2ee7538 --- /dev/null +++ b/apis/logs/v1alpha1/zz_logscluster_types.go @@ -0,0 +1,142 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LogsClusterInitParameters struct { + + // Allowed networks for ARCHIVE flow type + ArchiveAllowedNetworks []*string `json:"archiveAllowedNetworks,omitempty" tf:"archive_allowed_networks,omitempty"` + + // Allowed networks for DIRECT_INPUT flow type + DirectInputAllowedNetworks []*string `json:"directInputAllowedNetworks,omitempty" tf:"direct_input_allowed_networks,omitempty"` + + // Allowed networks for QUERY flow type + QueryAllowedNetworks []*string `json:"queryAllowedNetworks,omitempty" tf:"query_allowed_networks,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type LogsClusterObservation struct { + + // Allowed networks for ARCHIVE flow type + ArchiveAllowedNetworks []*string `json:"archiveAllowedNetworks,omitempty" tf:"archive_allowed_networks,omitempty"` + + // Cluster type + ClusterType *string `json:"clusterType,omitempty" tf:"cluster_type,omitempty"` + + // Allowed networks for DIRECT_INPUT flow type + DirectInputAllowedNetworks []*string `json:"directInputAllowedNetworks,omitempty" tf:"direct_input_allowed_networks,omitempty"` + + // hostname + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // All content generated by given service will be placed on this cluster + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // Allow given service to perform advanced operations on cluster + IsUnlocked *bool `json:"isUnlocked,omitempty" tf:"is_unlocked,omitempty"` + + // Allowed networks for QUERY flow type + QueryAllowedNetworks []*string `json:"queryAllowedNetworks,omitempty" tf:"query_allowed_networks,omitempty"` + + // Data center localization + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type LogsClusterParameters struct { + + // Allowed networks for ARCHIVE flow type + // +kubebuilder:validation:Optional + ArchiveAllowedNetworks []*string `json:"archiveAllowedNetworks,omitempty" tf:"archive_allowed_networks,omitempty"` + + // Allowed networks for DIRECT_INPUT flow type + // +kubebuilder:validation:Optional + DirectInputAllowedNetworks []*string `json:"directInputAllowedNetworks,omitempty" tf:"direct_input_allowed_networks,omitempty"` + + // Allowed networks for QUERY flow type + // +kubebuilder:validation:Optional + QueryAllowedNetworks []*string `json:"queryAllowedNetworks,omitempty" tf:"query_allowed_networks,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +// LogsClusterSpec defines the desired state of LogsCluster +type LogsClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LogsClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LogsClusterInitParameters `json:"initProvider,omitempty"` +} + +// LogsClusterStatus defines the observed state of LogsCluster. +type LogsClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LogsClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// LogsCluster is the Schema for the LogsClusters API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type LogsCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + Spec LogsClusterSpec `json:"spec"` + Status LogsClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LogsClusterList contains a list of LogsClusters +type LogsClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LogsCluster `json:"items"` +} + +// Repository type metadata. +var ( + LogsCluster_Kind = "LogsCluster" + LogsCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LogsCluster_Kind}.String() + LogsCluster_KindAPIVersion = LogsCluster_Kind + "." + CRDGroupVersion.String() + LogsCluster_GroupVersionKind = CRDGroupVersion.WithKind(LogsCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&LogsCluster{}, &LogsClusterList{}) +} diff --git a/apis/logs/v1alpha1/zz_logsinput_types.go b/apis/logs/v1alpha1/zz_logsinput_types.go new file mode 100755 index 0000000..4e919aa --- /dev/null +++ b/apis/logs/v1alpha1/zz_logsinput_types.go @@ -0,0 +1,300 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationInitParameters struct { + + // Flowgger configuration + Flowgger []FlowggerInitParameters `json:"flowgger,omitempty" tf:"flowgger,omitempty"` + + // Logstash configuration + Logstash []LogstashInitParameters `json:"logstash,omitempty" tf:"logstash,omitempty"` +} + +type ConfigurationObservation struct { + + // Flowgger configuration + Flowgger []FlowggerObservation `json:"flowgger,omitempty" tf:"flowgger,omitempty"` + + // Logstash configuration + Logstash []LogstashObservation `json:"logstash,omitempty" tf:"logstash,omitempty"` +} + +type ConfigurationParameters struct { + + // Flowgger configuration + // +kubebuilder:validation:Optional + Flowgger []FlowggerParameters `json:"flowgger,omitempty" tf:"flowgger,omitempty"` + + // Logstash configuration + // +kubebuilder:validation:Optional + Logstash []LogstashParameters `json:"logstash,omitempty" tf:"logstash,omitempty"` +} + +type FlowggerInitParameters struct { + + // Type of format to decode + LogFormat *string `json:"logFormat,omitempty" tf:"log_format,omitempty"` + + // Indicates how messages are delimited + LogFraming *string `json:"logFraming,omitempty" tf:"log_framing,omitempty"` +} + +type FlowggerObservation struct { + + // Type of format to decode + LogFormat *string `json:"logFormat,omitempty" tf:"log_format,omitempty"` + + // Indicates how messages are delimited + LogFraming *string `json:"logFraming,omitempty" tf:"log_framing,omitempty"` +} + +type FlowggerParameters struct { + + // Type of format to decode + // +kubebuilder:validation:Optional + LogFormat *string `json:"logFormat" tf:"log_format,omitempty"` + + // Indicates how messages are delimited + // +kubebuilder:validation:Optional + LogFraming *string `json:"logFraming" tf:"log_framing,omitempty"` +} + +type LogsInputInitParameters struct { + + // IP blocks + AllowedNetworks []*string `json:"allowedNetworks,omitempty" tf:"allowed_networks,omitempty"` + + // Input configuration + Configuration []ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Input description + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Input engine ID + EngineID *string `json:"engineId,omitempty" tf:"engine_id,omitempty"` + + // Port + ExposedPort *string `json:"exposedPort,omitempty" tf:"exposed_port,omitempty"` + + // Number of instance running + NbInstance *float64 `json:"nbInstance,omitempty" tf:"nb_instance,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Associated Graylog stream + StreamID *string `json:"streamId,omitempty" tf:"stream_id,omitempty"` + + // Input title + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type LogsInputObservation struct { + + // IP blocks + AllowedNetworks []*string `json:"allowedNetworks,omitempty" tf:"allowed_networks,omitempty"` + + // Input configuration + Configuration []ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Input creation + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Input description + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Input engine ID + EngineID *string `json:"engineId,omitempty" tf:"engine_id,omitempty"` + + // Port + ExposedPort *string `json:"exposedPort,omitempty" tf:"exposed_port,omitempty"` + + // Hostname + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Input ID + InputID *string `json:"inputId,omitempty" tf:"input_id,omitempty"` + + // Indicate if input need to be restarted + IsRestartRequired *bool `json:"isRestartRequired,omitempty" tf:"is_restart_required,omitempty"` + + // Number of instance running + NbInstance *float64 `json:"nbInstance,omitempty" tf:"nb_instance,omitempty"` + + // Input IP address + PublicAddress *string `json:"publicAddress,omitempty" tf:"public_address,omitempty"` + + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // init: configuration required, pending: ready to start, running: available + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Associated Graylog stream + StreamID *string `json:"streamId,omitempty" tf:"stream_id,omitempty"` + + // Input title + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // Input last update + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type LogsInputParameters struct { + + // IP blocks + // +kubebuilder:validation:Optional + AllowedNetworks []*string `json:"allowedNetworks,omitempty" tf:"allowed_networks,omitempty"` + + // Input configuration + // +kubebuilder:validation:Optional + Configuration []ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Input description + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Input engine ID + // +kubebuilder:validation:Optional + EngineID *string `json:"engineId,omitempty" tf:"engine_id,omitempty"` + + // Port + // +kubebuilder:validation:Optional + ExposedPort *string `json:"exposedPort,omitempty" tf:"exposed_port,omitempty"` + + // Number of instance running + // +kubebuilder:validation:Optional + NbInstance *float64 `json:"nbInstance,omitempty" tf:"nb_instance,omitempty"` + + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Associated Graylog stream + // +kubebuilder:validation:Optional + StreamID *string `json:"streamId,omitempty" tf:"stream_id,omitempty"` + + // Input title + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type LogstashInitParameters struct { + + // The filter section of logstash.conf + FilterSection *string `json:"filterSection,omitempty" tf:"filter_section,omitempty"` + + // The filter section of logstash.conf + InputSection *string `json:"inputSection,omitempty" tf:"input_section,omitempty"` + + // The list of customs Grok patterns + PatternSection *string `json:"patternSection,omitempty" tf:"pattern_section,omitempty"` +} + +type LogstashObservation struct { + + // The filter section of logstash.conf + FilterSection *string `json:"filterSection,omitempty" tf:"filter_section,omitempty"` + + // The filter section of logstash.conf + InputSection *string `json:"inputSection,omitempty" tf:"input_section,omitempty"` + + // The list of customs Grok patterns + PatternSection *string `json:"patternSection,omitempty" tf:"pattern_section,omitempty"` +} + +type LogstashParameters struct { + + // The filter section of logstash.conf + // +kubebuilder:validation:Optional + FilterSection *string `json:"filterSection,omitempty" tf:"filter_section,omitempty"` + + // The filter section of logstash.conf + // +kubebuilder:validation:Optional + InputSection *string `json:"inputSection" tf:"input_section,omitempty"` + + // The list of customs Grok patterns + // +kubebuilder:validation:Optional + PatternSection *string `json:"patternSection,omitempty" tf:"pattern_section,omitempty"` +} + +// LogsInputSpec defines the desired state of LogsInput +type LogsInputSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LogsInputParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LogsInputInitParameters `json:"initProvider,omitempty"` +} + +// LogsInputStatus defines the observed state of LogsInput. +type LogsInputStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LogsInputObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// LogsInput is the Schema for the LogsInputs API. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,ovh} +type LogsInput struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.configuration) || (has(self.initProvider) && has(self.initProvider.configuration))",message="spec.forProvider.configuration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.description) || (has(self.initProvider) && has(self.initProvider.description))",message="spec.forProvider.description is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engineId) || (has(self.initProvider) && has(self.initProvider.engineId))",message="spec.forProvider.engineId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.streamId) || (has(self.initProvider) && has(self.initProvider.streamId))",message="spec.forProvider.streamId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.title) || (has(self.initProvider) && has(self.initProvider.title))",message="spec.forProvider.title is a required parameter" + Spec LogsInputSpec `json:"spec"` + Status LogsInputStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LogsInputList contains a list of LogsInputs +type LogsInputList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LogsInput `json:"items"` +} + +// Repository type metadata. +var ( + LogsInput_Kind = "LogsInput" + LogsInput_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LogsInput_Kind}.String() + LogsInput_KindAPIVersion = LogsInput_Kind + "." + CRDGroupVersion.String() + LogsInput_GroupVersionKind = CRDGroupVersion.WithKind(LogsInput_Kind) +) + +func init() { + SchemeBuilder.Register(&LogsInput{}, &LogsInputList{}) +} diff --git a/apis/zz_register.go b/apis/zz_register.go index 71abb48..f29c3d2 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -20,6 +20,7 @@ import ( v1alpha1databases "github.com/edixos/provider-ovh/apis/databases/v1alpha1" v1alpha1dedicatedserver "github.com/edixos/provider-ovh/apis/dedicatedserver/v1alpha1" v1alpha1dns "github.com/edixos/provider-ovh/apis/dns/v1alpha1" + v1alpha1kube "github.com/edixos/provider-ovh/apis/kube/v1alpha1" v1alpha1lb "github.com/edixos/provider-ovh/apis/lb/v1alpha1" v1alpha1logs "github.com/edixos/provider-ovh/apis/logs/v1alpha1" v1alpha1publiccloudnetwork "github.com/edixos/provider-ovh/apis/publiccloudnetwork/v1alpha1" @@ -37,6 +38,7 @@ func init() { v1alpha1databases.SchemeBuilder.AddToScheme, v1alpha1dedicatedserver.SchemeBuilder.AddToScheme, v1alpha1dns.SchemeBuilder.AddToScheme, + v1alpha1kube.SchemeBuilder.AddToScheme, v1alpha1lb.SchemeBuilder.AddToScheme, v1alpha1logs.SchemeBuilder.AddToScheme, v1alpha1publiccloudnetwork.SchemeBuilder.AddToScheme, diff --git a/config/external_name.go b/config/external_name.go index fe0f2f0..e46fe30 100644 --- a/config/external_name.go +++ b/config/external_name.go @@ -68,6 +68,10 @@ var ExternalNameConfigs = map[string]config.ExternalName{ "ovh_cloud_project_database_postgresql_user": config.NameAsIdentifier, "ovh_cloud_project_database_redis_user": config.NameAsIdentifier, "ovh_cloud_project_database_user": config.NameAsIdentifier, + "ovh_cloud_project_kube": config.NameAsIdentifier, + "ovh_cloud_project_kube_iprestrictions": config.NameAsIdentifier, + "ovh_cloud_project_kube_nodepool": config.NameAsIdentifier, + "ovh_cloud_project_kube_oidc": config.NameAsIdentifier, } // ExternalNameConfigurations applies all external name configs listed in the diff --git a/config/kube/config.go b/config/kube/config.go new file mode 100644 index 0000000..102b976 --- /dev/null +++ b/config/kube/config.go @@ -0,0 +1,23 @@ +package kube + +import "github.com/crossplane/upjet/pkg/config" + +const ( + shortGroup = "kube" +) + +// Configure configures individual resources by adding custom ResourceConfigurators. +func Configure(p *config.Provider) { + p.AddResourceConfigurator("ovh_cloud_project_kube", func(r *config.Resource) { + r.ShortGroup = shortGroup + }) + p.AddResourceConfigurator("ovh_cloud_project_kube_iprestrictions", func(r *config.Resource) { + r.ShortGroup = shortGroup + }) + p.AddResourceConfigurator("ovh_cloud_project_kube_nodepool", func(r *config.Resource) { + r.ShortGroup = shortGroup + }) + p.AddResourceConfigurator("ovh_cloud_project_kube_oidc", func(r *config.Resource) { + r.ShortGroup = shortGroup + }) +} diff --git a/config/provider.go b/config/provider.go index 24e71b1..ad62447 100644 --- a/config/provider.go +++ b/config/provider.go @@ -8,6 +8,8 @@ import ( // Note(turkenh): we are importing this to embed provider schema document _ "embed" + "github.com/edixos/provider-ovh/config/kube" + "github.com/edixos/provider-ovh/config/databases" "github.com/edixos/provider-ovh/config/logs" @@ -62,6 +64,7 @@ func GetProvider() *ujconfig.Provider { lb.Configure, logs.Configure, databases.Configure, + kube.Configure, } { configure(pc) } diff --git a/internal/controller/databases/projectdatabase/zz_controller.go b/internal/controller/databases/projectdatabase/zz_controller.go new file mode 100755 index 0000000..390b02b --- /dev/null +++ b/internal/controller/databases/projectdatabase/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabase + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabase managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabase_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabase_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabase_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabase_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabase{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabasedatabase/zz_controller.go b/internal/controller/databases/projectdatabasedatabase/zz_controller.go new file mode 100755 index 0000000..95d7bba --- /dev/null +++ b/internal/controller/databases/projectdatabasedatabase/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabasedatabase + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseDatabase managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseDatabase_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseDatabase_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseDatabase_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_database"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseDatabase_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseDatabase{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabaseintegration/zz_controller.go b/internal/controller/databases/projectdatabaseintegration/zz_controller.go new file mode 100755 index 0000000..538cdfa --- /dev/null +++ b/internal/controller/databases/projectdatabaseintegration/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabaseintegration + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseIntegration managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseIntegration_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseIntegration_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseIntegration_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_integration"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseIntegration_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseIntegration{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabaseiprestriction/zz_controller.go b/internal/controller/databases/projectdatabaseiprestriction/zz_controller.go new file mode 100755 index 0000000..54244e7 --- /dev/null +++ b/internal/controller/databases/projectdatabaseiprestriction/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabaseiprestriction + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseIPRestriction managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseIPRestriction_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseIPRestriction_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseIPRestriction_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_ip_restriction"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseIPRestriction_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseIPRestriction{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabasekafkaacl/zz_controller.go b/internal/controller/databases/projectdatabasekafkaacl/zz_controller.go new file mode 100755 index 0000000..bcd3c08 --- /dev/null +++ b/internal/controller/databases/projectdatabasekafkaacl/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabasekafkaacl + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseKafkaACL managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseKafkaACL_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseKafkaACL_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseKafkaACL_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_kafka_acl"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseKafkaACL_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseKafkaACL{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabasekafkaschemaregistryacl/zz_controller.go b/internal/controller/databases/projectdatabasekafkaschemaregistryacl/zz_controller.go new file mode 100755 index 0000000..d438a8a --- /dev/null +++ b/internal/controller/databases/projectdatabasekafkaschemaregistryacl/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabasekafkaschemaregistryacl + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseKafkaSchemaregistryacl managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseKafkaSchemaregistryacl_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseKafkaSchemaregistryacl_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseKafkaSchemaregistryacl_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_kafka_schemaregistryacl"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseKafkaSchemaregistryacl_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseKafkaSchemaregistryacl{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabasekafkatopic/zz_controller.go b/internal/controller/databases/projectdatabasekafkatopic/zz_controller.go new file mode 100755 index 0000000..a87d523 --- /dev/null +++ b/internal/controller/databases/projectdatabasekafkatopic/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabasekafkatopic + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseKafkaTopic managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseKafkaTopic_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseKafkaTopic_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseKafkaTopic_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_kafka_topic"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseKafkaTopic_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseKafkaTopic{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabasem3dbnamespace/zz_controller.go b/internal/controller/databases/projectdatabasem3dbnamespace/zz_controller.go new file mode 100755 index 0000000..8fc6bd2 --- /dev/null +++ b/internal/controller/databases/projectdatabasem3dbnamespace/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabasem3dbnamespace + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseM3DbNamespace managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseM3DbNamespace_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseM3DbNamespace_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseM3DbNamespace_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_m3db_namespace"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseM3DbNamespace_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseM3DbNamespace{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabasem3dbuser/zz_controller.go b/internal/controller/databases/projectdatabasem3dbuser/zz_controller.go new file mode 100755 index 0000000..d2d5eba --- /dev/null +++ b/internal/controller/databases/projectdatabasem3dbuser/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabasem3dbuser + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseM3DbUser managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseM3DbUser_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseM3DbUser_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseM3DbUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_m3db_user"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseM3DbUser_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseM3DbUser{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabasemongodbuser/zz_controller.go b/internal/controller/databases/projectdatabasemongodbuser/zz_controller.go new file mode 100755 index 0000000..3289f6c --- /dev/null +++ b/internal/controller/databases/projectdatabasemongodbuser/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabasemongodbuser + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseMongodbUser managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseMongodbUser_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseMongodbUser_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseMongodbUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_mongodb_user"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseMongodbUser_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseMongodbUser{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabaseopensearchpattern/zz_controller.go b/internal/controller/databases/projectdatabaseopensearchpattern/zz_controller.go new file mode 100755 index 0000000..b41e522 --- /dev/null +++ b/internal/controller/databases/projectdatabaseopensearchpattern/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabaseopensearchpattern + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseOpensearchPattern managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseOpensearchPattern_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseOpensearchPattern_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseOpensearchPattern_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_opensearch_pattern"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseOpensearchPattern_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseOpensearchPattern{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabaseopensearchuser/zz_controller.go b/internal/controller/databases/projectdatabaseopensearchuser/zz_controller.go new file mode 100755 index 0000000..0410a2f --- /dev/null +++ b/internal/controller/databases/projectdatabaseopensearchuser/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabaseopensearchuser + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseOpensearchUser managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseOpensearchUser_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseOpensearchUser_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseOpensearchUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_opensearch_user"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseOpensearchUser_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseOpensearchUser{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabasepostgresqluser/zz_controller.go b/internal/controller/databases/projectdatabasepostgresqluser/zz_controller.go new file mode 100755 index 0000000..0bce9fc --- /dev/null +++ b/internal/controller/databases/projectdatabasepostgresqluser/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabasepostgresqluser + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabasePostgresqlUser managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabasePostgresqlUser_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabasePostgresqlUser_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabasePostgresqlUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_postgresql_user"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabasePostgresqlUser_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabasePostgresqlUser{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabaseredisuser/zz_controller.go b/internal/controller/databases/projectdatabaseredisuser/zz_controller.go new file mode 100755 index 0000000..2ac0ddf --- /dev/null +++ b/internal/controller/databases/projectdatabaseredisuser/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabaseredisuser + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseRedisUser managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseRedisUser_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseRedisUser_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseRedisUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_redis_user"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseRedisUser_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseRedisUser{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/databases/projectdatabaseuser/zz_controller.go b/internal/controller/databases/projectdatabaseuser/zz_controller.go new file mode 100755 index 0000000..58bc6bd --- /dev/null +++ b/internal/controller/databases/projectdatabaseuser/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectdatabaseuser + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/databases/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectDatabaseUser managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectDatabaseUser_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectDatabaseUser_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_database_user"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectDatabaseUser_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectDatabaseUser{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kube/projectkube/zz_controller.go b/internal/controller/kube/projectkube/zz_controller.go new file mode 100755 index 0000000..b1fd1b6 --- /dev/null +++ b/internal/controller/kube/projectkube/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectkube + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/kube/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectKube managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectKube_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectKube_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectKube_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_kube"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectKube_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectKube{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kube/projectkubeiprestrictions/zz_controller.go b/internal/controller/kube/projectkubeiprestrictions/zz_controller.go new file mode 100755 index 0000000..8d08031 --- /dev/null +++ b/internal/controller/kube/projectkubeiprestrictions/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectkubeiprestrictions + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/kube/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectKubeIprestrictions managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectKubeIprestrictions_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectKubeIprestrictions_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectKubeIprestrictions_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_kube_iprestrictions"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectKubeIprestrictions_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectKubeIprestrictions{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kube/projectkubenodepool/zz_controller.go b/internal/controller/kube/projectkubenodepool/zz_controller.go new file mode 100755 index 0000000..97af6a4 --- /dev/null +++ b/internal/controller/kube/projectkubenodepool/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectkubenodepool + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/kube/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectKubeNodepool managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectKubeNodepool_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectKubeNodepool_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectKubeNodepool_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_kube_nodepool"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectKubeNodepool_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectKubeNodepool{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/kube/projectkubeoidc/zz_controller.go b/internal/controller/kube/projectkubeoidc/zz_controller.go new file mode 100755 index 0000000..2979a20 --- /dev/null +++ b/internal/controller/kube/projectkubeoidc/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package projectkubeoidc + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/kube/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles ProjectKubeOidc managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ProjectKubeOidc_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ProjectKubeOidc_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ProjectKubeOidc_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_cloud_project_kube_oidc"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ProjectKubeOidc_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ProjectKubeOidc{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/httpfarm/zz_controller.go b/internal/controller/lb/httpfarm/zz_controller.go new file mode 100755 index 0000000..d2f9d03 --- /dev/null +++ b/internal/controller/lb/httpfarm/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package httpfarm + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles HTTPFarm managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.HTTPFarm_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.HTTPFarm_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.HTTPFarm_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_http_farm"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.HTTPFarm_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.HTTPFarm{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/httpfarmserver/zz_controller.go b/internal/controller/lb/httpfarmserver/zz_controller.go new file mode 100755 index 0000000..e5c28de --- /dev/null +++ b/internal/controller/lb/httpfarmserver/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package httpfarmserver + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles HTTPFarmServer managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.HTTPFarmServer_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.HTTPFarmServer_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.HTTPFarmServer_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_http_farm_server"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.HTTPFarmServer_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.HTTPFarmServer{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/httpfrontend/zz_controller.go b/internal/controller/lb/httpfrontend/zz_controller.go new file mode 100755 index 0000000..7cc1f79 --- /dev/null +++ b/internal/controller/lb/httpfrontend/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package httpfrontend + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles HTTPFrontend managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.HTTPFrontend_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.HTTPFrontend_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.HTTPFrontend_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_http_frontend"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.HTTPFrontend_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.HTTPFrontend{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/httproute/zz_controller.go b/internal/controller/lb/httproute/zz_controller.go new file mode 100755 index 0000000..2b66bbe --- /dev/null +++ b/internal/controller/lb/httproute/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package httproute + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles HTTPRoute managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.HTTPRoute_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.HTTPRoute_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.HTTPRoute_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_http_route"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.HTTPRoute_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.HTTPRoute{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/httprouterule/zz_controller.go b/internal/controller/lb/httprouterule/zz_controller.go new file mode 100755 index 0000000..a08fe23 --- /dev/null +++ b/internal/controller/lb/httprouterule/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package httprouterule + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles HTTPRouteRule managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.HTTPRouteRule_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.HTTPRouteRule_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.HTTPRouteRule_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_http_route_rule"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.HTTPRouteRule_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.HTTPRouteRule{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/iploadbalancing/zz_controller.go b/internal/controller/lb/iploadbalancing/zz_controller.go new file mode 100755 index 0000000..8662fc6 --- /dev/null +++ b/internal/controller/lb/iploadbalancing/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package iploadbalancing + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles Iploadbalancing managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Iploadbalancing_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Iploadbalancing_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Iploadbalancing_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Iploadbalancing_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Iploadbalancing{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/refresh/zz_controller.go b/internal/controller/lb/refresh/zz_controller.go new file mode 100755 index 0000000..56519a6 --- /dev/null +++ b/internal/controller/lb/refresh/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package refresh + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles Refresh managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Refresh_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Refresh_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Refresh_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_refresh"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Refresh_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Refresh{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/tcpfarm/zz_controller.go b/internal/controller/lb/tcpfarm/zz_controller.go new file mode 100755 index 0000000..af1393b --- /dev/null +++ b/internal/controller/lb/tcpfarm/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package tcpfarm + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles TCPFarm managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.TCPFarm_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.TCPFarm_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TCPFarm_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_tcp_farm"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.TCPFarm_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.TCPFarm{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/tcpfarmserver/zz_controller.go b/internal/controller/lb/tcpfarmserver/zz_controller.go new file mode 100755 index 0000000..65d04ab --- /dev/null +++ b/internal/controller/lb/tcpfarmserver/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package tcpfarmserver + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles TCPFarmServer managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.TCPFarmServer_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.TCPFarmServer_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TCPFarmServer_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_tcp_farm_server"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.TCPFarmServer_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.TCPFarmServer{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/tcpfrontend/zz_controller.go b/internal/controller/lb/tcpfrontend/zz_controller.go new file mode 100755 index 0000000..9c3dae9 --- /dev/null +++ b/internal/controller/lb/tcpfrontend/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package tcpfrontend + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles TCPFrontend managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.TCPFrontend_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.TCPFrontend_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TCPFrontend_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_tcp_frontend"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.TCPFrontend_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.TCPFrontend{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/tcproute/zz_controller.go b/internal/controller/lb/tcproute/zz_controller.go new file mode 100755 index 0000000..8568b1d --- /dev/null +++ b/internal/controller/lb/tcproute/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package tcproute + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles TCPRoute managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.TCPRoute_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.TCPRoute_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TCPRoute_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_tcp_route"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.TCPRoute_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.TCPRoute{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/tcprouterule/zz_controller.go b/internal/controller/lb/tcprouterule/zz_controller.go new file mode 100755 index 0000000..75f5536 --- /dev/null +++ b/internal/controller/lb/tcprouterule/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package tcprouterule + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles TCPRouteRule managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.TCPRouteRule_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.TCPRouteRule_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.TCPRouteRule_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_tcp_route_rule"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.TCPRouteRule_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.TCPRouteRule{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/lb/vracknetwork/zz_controller.go b/internal/controller/lb/vracknetwork/zz_controller.go new file mode 100755 index 0000000..55c897c --- /dev/null +++ b/internal/controller/lb/vracknetwork/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package vracknetwork + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/lb/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles VrackNetwork managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.VrackNetwork_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.VrackNetwork_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.VrackNetwork_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_iploadbalancing_vrack_network"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.VrackNetwork_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.VrackNetwork{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/logs/logscluster/zz_controller.go b/internal/controller/logs/logscluster/zz_controller.go new file mode 100755 index 0000000..8887323 --- /dev/null +++ b/internal/controller/logs/logscluster/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package logscluster + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/logs/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles LogsCluster managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.LogsCluster_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.LogsCluster_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.LogsCluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_dbaas_logs_cluster"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.LogsCluster_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.LogsCluster{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/logs/logsinput/zz_controller.go b/internal/controller/logs/logsinput/zz_controller.go new file mode 100755 index 0000000..d23cce8 --- /dev/null +++ b/internal/controller/logs/logsinput/zz_controller.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2023 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Copyright 2022 Upbound Inc. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package logsinput + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/edixos/provider-ovh/apis/logs/v1alpha1" + features "github.com/edixos/provider-ovh/internal/features" +) + +// Setup adds a controller that reconciles LogsInput managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.LogsInput_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.LogsInput_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.LogsInput_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["ovh_dbaas_logs_input"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), + tjcontroller.WithCallbackProvider(ac), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.LogsInput_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.LogsInput{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go index 8561921..efa639e 100755 --- a/internal/controller/zz_setup.go +++ b/internal/controller/zz_setup.go @@ -48,6 +48,10 @@ import ( zone "github.com/edixos/provider-ovh/internal/controller/dns/zone" zonerecord "github.com/edixos/provider-ovh/internal/controller/dns/zonerecord" zoneredirection "github.com/edixos/provider-ovh/internal/controller/dns/zoneredirection" + projectkube "github.com/edixos/provider-ovh/internal/controller/kube/projectkube" + projectkubeiprestrictions "github.com/edixos/provider-ovh/internal/controller/kube/projectkubeiprestrictions" + projectkubenodepool "github.com/edixos/provider-ovh/internal/controller/kube/projectkubenodepool" + projectkubeoidc "github.com/edixos/provider-ovh/internal/controller/kube/projectkubeoidc" httpfarm "github.com/edixos/provider-ovh/internal/controller/lb/httpfarm" httpfarmserver "github.com/edixos/provider-ovh/internal/controller/lb/httpfarmserver" httpfrontend "github.com/edixos/provider-ovh/internal/controller/lb/httpfrontend" @@ -112,6 +116,10 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { zone.Setup, zonerecord.Setup, zoneredirection.Setup, + projectkube.Setup, + projectkubeiprestrictions.Setup, + projectkubenodepool.Setup, + projectkubeoidc.Setup, httpfarm.Setup, httpfarmserver.Setup, httpfrontend.Setup, diff --git a/package/crds/databases.ovh.edixos.io_projectdatabasedatabases.yaml b/package/crds/databases.ovh.edixos.io_projectdatabasedatabases.yaml new file mode 100644 index 0000000..5ec6668 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabasedatabases.yaml @@ -0,0 +1,337 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabasedatabases.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseDatabase + listKind: ProjectDatabaseDatabaseList + plural: projectdatabasedatabases + singular: projectdatabasedatabase + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseDatabase is the Schema for the ProjectDatabaseDatabases + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseDatabaseSpec defines the desired state of + ProjectDatabaseDatabase + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + engine: + description: Name of the engine of the service + type: string + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + engine: + description: Name of the engine of the service + type: string + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.engine is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engine) + || (has(self.initProvider) && has(self.initProvider.engine))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseDatabaseStatus defines the observed state + of ProjectDatabaseDatabase. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + default: + description: Defines if the database has been created by default + type: boolean + engine: + description: Name of the engine of the service + type: string + id: + type: string + serviceName: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabaseintegrations.yaml b/package/crds/databases.ovh.edixos.io_projectdatabaseintegrations.yaml new file mode 100644 index 0000000..8206cf9 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabaseintegrations.yaml @@ -0,0 +1,387 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabaseintegrations.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseIntegration + listKind: ProjectDatabaseIntegrationList + plural: projectdatabaseintegrations + singular: projectdatabaseintegration + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseIntegration is the Schema for the ProjectDatabaseIntegrations + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseIntegrationSpec defines the desired state + of ProjectDatabaseIntegration + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + destinationServiceId: + description: ID of the destination service + type: string + engine: + description: Name of the engine of the service + type: string + parameters: + additionalProperties: + type: string + description: Parameters for the integration + type: object + serviceName: + type: string + sourceServiceId: + description: ID of the source service + type: string + type: + description: Type of the integration + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + destinationServiceId: + description: ID of the destination service + type: string + engine: + description: Name of the engine of the service + type: string + parameters: + additionalProperties: + type: string + description: Parameters for the integration + type: object + serviceName: + type: string + sourceServiceId: + description: ID of the source service + type: string + type: + description: Type of the integration + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.destinationServiceId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destinationServiceId) + || (has(self.initProvider) && has(self.initProvider.destinationServiceId))' + - message: spec.forProvider.engine is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engine) + || (has(self.initProvider) && has(self.initProvider.engine))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.sourceServiceId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sourceServiceId) + || (has(self.initProvider) && has(self.initProvider.sourceServiceId))' + status: + description: ProjectDatabaseIntegrationStatus defines the observed state + of ProjectDatabaseIntegration. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + destinationServiceId: + description: ID of the destination service + type: string + engine: + description: Name of the engine of the service + type: string + id: + type: string + parameters: + additionalProperties: + type: string + description: Parameters for the integration + type: object + serviceName: + type: string + sourceServiceId: + description: ID of the source service + type: string + status: + description: Current status of the integration + type: string + type: + description: Type of the integration + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabaseiprestrictions.yaml b/package/crds/databases.ovh.edixos.io_projectdatabaseiprestrictions.yaml new file mode 100644 index 0000000..76adff2 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabaseiprestrictions.yaml @@ -0,0 +1,359 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabaseiprestrictions.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseIPRestriction + listKind: ProjectDatabaseIPRestrictionList + plural: projectdatabaseiprestrictions + singular: projectdatabaseiprestriction + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseIPRestriction is the Schema for the ProjectDatabaseIPRestrictions + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseIPRestrictionSpec defines the desired state + of ProjectDatabaseIPRestriction + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + description: + description: Description of the IP restriction + type: string + engine: + description: Name of the engine of the service + type: string + ip: + description: Authorized IP + type: string + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + description: + description: Description of the IP restriction + type: string + engine: + description: Name of the engine of the service + type: string + ip: + description: Authorized IP + type: string + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.engine is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engine) + || (has(self.initProvider) && has(self.initProvider.engine))' + - message: spec.forProvider.ip is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ip) + || (has(self.initProvider) && has(self.initProvider.ip))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseIPRestrictionStatus defines the observed state + of ProjectDatabaseIPRestriction. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + description: + description: Description of the IP restriction + type: string + engine: + description: Name of the engine of the service + type: string + id: + type: string + ip: + description: Authorized IP + type: string + serviceName: + type: string + status: + description: Current status of the IP restriction + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabasekafkaacls.yaml b/package/crds/databases.ovh.edixos.io_projectdatabasekafkaacls.yaml new file mode 100644 index 0000000..04864a9 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabasekafkaacls.yaml @@ -0,0 +1,360 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabasekafkaacls.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseKafkaACL + listKind: ProjectDatabaseKafkaACLList + plural: projectdatabasekafkaacls + singular: projectdatabasekafkaacl + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseKafkaACL is the Schema for the ProjectDatabaseKafkaACLs + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseKafkaACLSpec defines the desired state of + ProjectDatabaseKafkaACL + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + permission: + description: Permission to give to this username on this topic + type: string + serviceName: + type: string + topic: + description: Topic affected by this acl + type: string + username: + description: Username affected by this acl + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + permission: + description: Permission to give to this username on this topic + type: string + serviceName: + type: string + topic: + description: Topic affected by this acl + type: string + username: + description: Username affected by this acl + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.permission is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.permission) + || (has(self.initProvider) && has(self.initProvider.permission))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.topic is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.topic) + || (has(self.initProvider) && has(self.initProvider.topic))' + - message: spec.forProvider.username is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.username) + || (has(self.initProvider) && has(self.initProvider.username))' + status: + description: ProjectDatabaseKafkaACLStatus defines the observed state + of ProjectDatabaseKafkaACL. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + id: + type: string + permission: + description: Permission to give to this username on this topic + type: string + serviceName: + type: string + topic: + description: Topic affected by this acl + type: string + username: + description: Username affected by this acl + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabasekafkaschemaregistryacls.yaml b/package/crds/databases.ovh.edixos.io_projectdatabasekafkaschemaregistryacls.yaml new file mode 100644 index 0000000..05c5d38 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabasekafkaschemaregistryacls.yaml @@ -0,0 +1,360 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabasekafkaschemaregistryacls.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseKafkaSchemaregistryacl + listKind: ProjectDatabaseKafkaSchemaregistryaclList + plural: projectdatabasekafkaschemaregistryacls + singular: projectdatabasekafkaschemaregistryacl + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseKafkaSchemaregistryacl is the Schema for the ProjectDatabaseKafkaSchemaregistryacls + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseKafkaSchemaregistryaclSpec defines the desired + state of ProjectDatabaseKafkaSchemaregistryacl + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + permission: + description: Permission to give to this username on this resource + type: string + resource: + description: Resource affected by this acl + type: string + serviceName: + type: string + username: + description: Username affected by this acl + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + permission: + description: Permission to give to this username on this resource + type: string + resource: + description: Resource affected by this acl + type: string + serviceName: + type: string + username: + description: Username affected by this acl + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.permission is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.permission) + || (has(self.initProvider) && has(self.initProvider.permission))' + - message: spec.forProvider.resource is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resource) + || (has(self.initProvider) && has(self.initProvider.resource))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.username is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.username) + || (has(self.initProvider) && has(self.initProvider.username))' + status: + description: ProjectDatabaseKafkaSchemaregistryaclStatus defines the observed + state of ProjectDatabaseKafkaSchemaregistryacl. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + id: + type: string + permission: + description: Permission to give to this username on this resource + type: string + resource: + description: Resource affected by this acl + type: string + serviceName: + type: string + username: + description: Username affected by this acl + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabasekafkatopics.yaml b/package/crds/databases.ovh.edixos.io_projectdatabasekafkatopics.yaml new file mode 100644 index 0000000..fe501b7 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabasekafkatopics.yaml @@ -0,0 +1,372 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabasekafkatopics.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseKafkaTopic + listKind: ProjectDatabaseKafkaTopicList + plural: projectdatabasekafkatopics + singular: projectdatabasekafkatopic + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseKafkaTopic is the Schema for the ProjectDatabaseKafkaTopics + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseKafkaTopicSpec defines the desired state of + ProjectDatabaseKafkaTopic + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + minInsyncReplicas: + description: Minimum insync replica accepted for this topic + type: number + partitions: + description: Number of partitions for this topic + type: number + replication: + description: Number of replication for this topic + type: number + retentionBytes: + description: Number of bytes for the retention of the data for + this topic + type: number + retentionHours: + description: Number of hours for the retention of the data for + this topic + type: number + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + minInsyncReplicas: + description: Minimum insync replica accepted for this topic + type: number + partitions: + description: Number of partitions for this topic + type: number + replication: + description: Number of replication for this topic + type: number + retentionBytes: + description: Number of bytes for the retention of the data for + this topic + type: number + retentionHours: + description: Number of hours for the retention of the data for + this topic + type: number + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseKafkaTopicStatus defines the observed state + of ProjectDatabaseKafkaTopic. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + id: + type: string + minInsyncReplicas: + description: Minimum insync replica accepted for this topic + type: number + partitions: + description: Number of partitions for this topic + type: number + replication: + description: Number of replication for this topic + type: number + retentionBytes: + description: Number of bytes for the retention of the data for + this topic + type: number + retentionHours: + description: Number of hours for the retention of the data for + this topic + type: number + serviceName: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabasem3dbnamespaces.yaml b/package/crds/databases.ovh.edixos.io_projectdatabasem3dbnamespaces.yaml new file mode 100644 index 0000000..0aa4d49 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabasem3dbnamespaces.yaml @@ -0,0 +1,422 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabasem3dbnamespaces.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseM3DbNamespace + listKind: ProjectDatabaseM3DbNamespaceList + plural: projectdatabasem3dbnamespaces + singular: projectdatabasem3dbnamespace + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseM3DbNamespace is the Schema for the ProjectDatabaseM3DbNamespaces + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseM3DbNamespaceSpec defines the desired state + of ProjectDatabaseM3DbNamespace + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + resolution: + description: Resolution for an aggregated namespace + type: string + retentionBlockDataExpirationDuration: + description: Controls how long we wait before expiring stale data + type: string + retentionBlockSizeDuration: + description: Controls how long to keep a block in memory before + flushing to a fileset on disk + type: string + retentionBufferFutureDuration: + description: Controls how far into the future writes to the namespace + will be accepted + type: string + retentionBufferPastDuration: + description: Controls how far into the past writes to the namespace + will be accepted + type: string + retentionPeriodDuration: + description: Controls the duration of time that M3DB will retain + data for the namespace + type: string + serviceName: + type: string + snapshotEnabled: + description: Defines whether M3db will create snapshot files for + this namespace + type: boolean + writesToCommitLogEnabled: + description: Defines whether M3db will include writes to this + namespace in the commit log + type: boolean + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + resolution: + description: Resolution for an aggregated namespace + type: string + retentionBlockDataExpirationDuration: + description: Controls how long we wait before expiring stale data + type: string + retentionBlockSizeDuration: + description: Controls how long to keep a block in memory before + flushing to a fileset on disk + type: string + retentionBufferFutureDuration: + description: Controls how far into the future writes to the namespace + will be accepted + type: string + retentionBufferPastDuration: + description: Controls how far into the past writes to the namespace + will be accepted + type: string + retentionPeriodDuration: + description: Controls the duration of time that M3DB will retain + data for the namespace + type: string + serviceName: + type: string + snapshotEnabled: + description: Defines whether M3db will create snapshot files for + this namespace + type: boolean + writesToCommitLogEnabled: + description: Defines whether M3db will include writes to this + namespace in the commit log + type: boolean + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.resolution is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resolution) + || (has(self.initProvider) && has(self.initProvider.resolution))' + - message: spec.forProvider.retentionPeriodDuration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.retentionPeriodDuration) + || (has(self.initProvider) && has(self.initProvider.retentionPeriodDuration))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseM3DbNamespaceStatus defines the observed state + of ProjectDatabaseM3DbNamespace. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + id: + type: string + resolution: + description: Resolution for an aggregated namespace + type: string + retentionBlockDataExpirationDuration: + description: Controls how long we wait before expiring stale data + type: string + retentionBlockSizeDuration: + description: Controls how long to keep a block in memory before + flushing to a fileset on disk + type: string + retentionBufferFutureDuration: + description: Controls how far into the future writes to the namespace + will be accepted + type: string + retentionBufferPastDuration: + description: Controls how far into the past writes to the namespace + will be accepted + type: string + retentionPeriodDuration: + description: Controls the duration of time that M3DB will retain + data for the namespace + type: string + serviceName: + type: string + snapshotEnabled: + description: Defines whether M3db will create snapshot files for + this namespace + type: boolean + type: + description: Type of namespace + type: string + writesToCommitLogEnabled: + description: Defines whether M3db will include writes to this + namespace in the commit log + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabasem3dbusers.yaml b/package/crds/databases.ovh.edixos.io_projectdatabasem3dbusers.yaml new file mode 100644 index 0000000..134cd6a --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabasem3dbusers.yaml @@ -0,0 +1,348 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabasem3dbusers.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseM3DbUser + listKind: ProjectDatabaseM3DbUserList + plural: projectdatabasem3dbusers + singular: projectdatabasem3dbuser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseM3DbUser is the Schema for the ProjectDatabaseM3DbUsers + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseM3DbUserSpec defines the desired state of + ProjectDatabaseM3DbUser + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + group: + description: Group of the user + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + group: + description: Group of the user + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseM3DbUserStatus defines the observed state + of ProjectDatabaseM3DbUser. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + createdAt: + description: Date of the creation of the user + type: string + group: + description: Group of the user + type: string + id: + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + status: + description: Current status of the user + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabasemongodbusers.yaml b/package/crds/databases.ovh.edixos.io_projectdatabasemongodbusers.yaml new file mode 100644 index 0000000..49a7337 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabasemongodbusers.yaml @@ -0,0 +1,357 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabasemongodbusers.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseMongodbUser + listKind: ProjectDatabaseMongodbUserList + plural: projectdatabasemongodbusers + singular: projectdatabasemongodbuser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseMongodbUser is the Schema for the ProjectDatabaseMongodbUsers + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseMongodbUserSpec defines the desired state + of ProjectDatabaseMongodbUser + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + roles: + description: Roles the user belongs to (without authentication + database) + items: + type: string + type: array + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + roles: + description: Roles the user belongs to (without authentication + database) + items: + type: string + type: array + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseMongodbUserStatus defines the observed state + of ProjectDatabaseMongodbUser. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + createdAt: + description: Date of the creation of the user + type: string + id: + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + roles: + description: Roles the user belongs to (without authentication + database) + items: + type: string + type: array + serviceName: + type: string + status: + description: Current status of the user + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabaseopensearchpatterns.yaml b/package/crds/databases.ovh.edixos.io_projectdatabaseopensearchpatterns.yaml new file mode 100644 index 0000000..ea0ed16 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabaseopensearchpatterns.yaml @@ -0,0 +1,343 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabaseopensearchpatterns.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseOpensearchPattern + listKind: ProjectDatabaseOpensearchPatternList + plural: projectdatabaseopensearchpatterns + singular: projectdatabaseopensearchpattern + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseOpensearchPattern is the Schema for the ProjectDatabaseOpensearchPatterns + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseOpensearchPatternSpec defines the desired + state of ProjectDatabaseOpensearchPattern + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + maxIndexCount: + description: Maximum number of index for this pattern + type: number + pattern: + description: Pattern format + type: string + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + maxIndexCount: + description: Maximum number of index for this pattern + type: number + pattern: + description: Pattern format + type: string + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.pattern is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.pattern) + || (has(self.initProvider) && has(self.initProvider.pattern))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseOpensearchPatternStatus defines the observed + state of ProjectDatabaseOpensearchPattern. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + id: + type: string + maxIndexCount: + description: Maximum number of index for this pattern + type: number + pattern: + description: Pattern format + type: string + serviceName: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabaseopensearchusers.yaml b/package/crds/databases.ovh.edixos.io_projectdatabaseopensearchusers.yaml new file mode 100644 index 0000000..55093da --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabaseopensearchusers.yaml @@ -0,0 +1,375 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabaseopensearchusers.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseOpensearchUser + listKind: ProjectDatabaseOpensearchUserList + plural: projectdatabaseopensearchusers + singular: projectdatabaseopensearchuser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseOpensearchUser is the Schema for the ProjectDatabaseOpensearchUsers + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseOpensearchUserSpec defines the desired state + of ProjectDatabaseOpensearchUser + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + acls: + description: Acls of the user + items: + properties: + pattern: + description: Pattern of the ACL + type: string + permission: + description: Permission of the ACL + type: string + type: object + type: array + clusterId: + description: Id of the database cluster + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + acls: + description: Acls of the user + items: + properties: + pattern: + description: Pattern of the ACL + type: string + permission: + description: Permission of the ACL + type: string + type: object + type: array + clusterId: + description: Id of the database cluster + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseOpensearchUserStatus defines the observed + state of ProjectDatabaseOpensearchUser. + properties: + atProvider: + properties: + acls: + description: Acls of the user + items: + properties: + pattern: + description: Pattern of the ACL + type: string + permission: + description: Permission of the ACL + type: string + type: object + type: array + clusterId: + description: Id of the database cluster + type: string + createdAt: + description: Date of the creation of the user + type: string + id: + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + status: + description: Current status of the user + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabasepostgresqlusers.yaml b/package/crds/databases.ovh.edixos.io_projectdatabasepostgresqlusers.yaml new file mode 100644 index 0000000..d93a5d5 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabasepostgresqlusers.yaml @@ -0,0 +1,354 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabasepostgresqlusers.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabasePostgresqlUser + listKind: ProjectDatabasePostgresqlUserList + plural: projectdatabasepostgresqlusers + singular: projectdatabasepostgresqluser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabasePostgresqlUser is the Schema for the ProjectDatabasePostgresqlUsers + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabasePostgresqlUserSpec defines the desired state + of ProjectDatabasePostgresqlUser + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + roles: + description: Roles the user belongs to + items: + type: string + type: array + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + roles: + description: Roles the user belongs to + items: + type: string + type: array + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabasePostgresqlUserStatus defines the observed + state of ProjectDatabasePostgresqlUser. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + createdAt: + description: Date of the creation of the user + type: string + id: + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + roles: + description: Roles the user belongs to + items: + type: string + type: array + serviceName: + type: string + status: + description: Current status of the user + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabaseredisusers.yaml b/package/crds/databases.ovh.edixos.io_projectdatabaseredisusers.yaml new file mode 100644 index 0000000..fece4a5 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabaseredisusers.yaml @@ -0,0 +1,399 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabaseredisusers.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseRedisUser + listKind: ProjectDatabaseRedisUserList + plural: projectdatabaseredisusers + singular: projectdatabaseredisuser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseRedisUser is the Schema for the ProjectDatabaseRedisUsers + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseRedisUserSpec defines the desired state of + ProjectDatabaseRedisUser + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + categories: + description: Categories of the user + items: + type: string + type: array + channels: + description: Channels of the user + items: + type: string + type: array + clusterId: + description: Id of the database cluster + type: string + commands: + description: Commands of the user + items: + type: string + type: array + keys: + description: Keys of the user + items: + type: string + type: array + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + categories: + description: Categories of the user + items: + type: string + type: array + channels: + description: Channels of the user + items: + type: string + type: array + clusterId: + description: Id of the database cluster + type: string + commands: + description: Commands of the user + items: + type: string + type: array + keys: + description: Keys of the user + items: + type: string + type: array + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseRedisUserStatus defines the observed state + of ProjectDatabaseRedisUser. + properties: + atProvider: + properties: + categories: + description: Categories of the user + items: + type: string + type: array + channels: + description: Channels of the user + items: + type: string + type: array + clusterId: + description: Id of the database cluster + type: string + commands: + description: Commands of the user + items: + type: string + type: array + createdAt: + description: Date of the creation of the user + type: string + id: + type: string + keys: + description: Keys of the user + items: + type: string + type: array + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + status: + description: Current status of the user + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabases.yaml b/package/crds/databases.ovh.edixos.io_projectdatabases.yaml new file mode 100644 index 0000000..8fdf315 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabases.yaml @@ -0,0 +1,507 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabases.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabase + listKind: ProjectDatabaseList + plural: projectdatabases + singular: projectdatabase + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabase is the Schema for the ProjectDatabases API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseSpec defines the desired state of ProjectDatabase + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + advancedConfiguration: + additionalProperties: + type: string + description: Advanced configuration key / value + type: object + description: + description: Description of the cluster + type: string + diskSize: + description: Disk size attributes of the cluster + type: number + engine: + description: Name of the engine of the service + type: string + flavor: + description: The node flavor used for this cluster + type: string + kafkaRestApi: + description: Defines whether the REST API is enabled on a Kafka + cluster + type: boolean + nodes: + description: List of nodes composing the service + items: + properties: + networkId: + description: Private network ID in which the node is. It's + the regional openstackId of the private network. + type: string + region: + description: Region of the node + type: string + subnetId: + description: Private subnet ID in which the node is + type: string + type: object + type: array + opensearchAclsEnabled: + description: Defines whether the ACLs are enabled on an Opensearch + cluster + type: boolean + plan: + description: Plan of the cluster + type: string + serviceName: + type: string + version: + description: Version of the engine deployed on the cluster + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + advancedConfiguration: + additionalProperties: + type: string + description: Advanced configuration key / value + type: object + description: + description: Description of the cluster + type: string + diskSize: + description: Disk size attributes of the cluster + type: number + engine: + description: Name of the engine of the service + type: string + flavor: + description: The node flavor used for this cluster + type: string + kafkaRestApi: + description: Defines whether the REST API is enabled on a Kafka + cluster + type: boolean + nodes: + description: List of nodes composing the service + items: + properties: + networkId: + description: Private network ID in which the node is. It's + the regional openstackId of the private network. + type: string + region: + description: Region of the node + type: string + subnetId: + description: Private subnet ID in which the node is + type: string + type: object + type: array + opensearchAclsEnabled: + description: Defines whether the ACLs are enabled on an Opensearch + cluster + type: boolean + plan: + description: Plan of the cluster + type: string + serviceName: + type: string + version: + description: Version of the engine deployed on the cluster + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.engine is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engine) + || (has(self.initProvider) && has(self.initProvider.engine))' + - message: spec.forProvider.flavor is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.flavor) + || (has(self.initProvider) && has(self.initProvider.flavor))' + - message: spec.forProvider.nodes is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.nodes) + || (has(self.initProvider) && has(self.initProvider.nodes))' + - message: spec.forProvider.plan is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.plan) + || (has(self.initProvider) && has(self.initProvider.plan))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: ProjectDatabaseStatus defines the observed state of ProjectDatabase. + properties: + atProvider: + properties: + advancedConfiguration: + additionalProperties: + type: string + description: Advanced configuration key / value + type: object + backupTime: + description: Time on which backups start every day + type: string + createdAt: + description: Date of the creation of the cluster + type: string + description: + description: Description of the cluster + type: string + diskSize: + description: Disk size attributes of the cluster + type: number + diskType: + description: Disk type attributes of the cluster + type: string + endpoints: + description: List of all endpoints of the service + items: + properties: + component: + type: string + domain: + type: string + path: + type: string + port: + type: number + scheme: + type: string + ssl: + type: boolean + sslMode: + type: string + uri: + type: string + type: object + type: array + engine: + description: Name of the engine of the service + type: string + flavor: + description: The node flavor used for this cluster + type: string + id: + type: string + kafkaRestApi: + description: Defines whether the REST API is enabled on a Kafka + cluster + type: boolean + maintenanceTime: + description: Time on which maintenances can start every day + type: string + networkType: + description: Type of network of the cluster + type: string + nodes: + description: List of nodes composing the service + items: + properties: + networkId: + description: Private network ID in which the node is. It's + the regional openstackId of the private network. + type: string + region: + description: Region of the node + type: string + subnetId: + description: Private subnet ID in which the node is + type: string + type: object + type: array + opensearchAclsEnabled: + description: Defines whether the ACLs are enabled on an Opensearch + cluster + type: boolean + plan: + description: Plan of the cluster + type: string + serviceName: + type: string + status: + description: Current status of the cluster + type: string + version: + description: Version of the engine deployed on the cluster + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/databases.ovh.edixos.io_projectdatabaseusers.yaml b/package/crds/databases.ovh.edixos.io_projectdatabaseusers.yaml new file mode 100644 index 0000000..d906ec4 --- /dev/null +++ b/package/crds/databases.ovh.edixos.io_projectdatabaseusers.yaml @@ -0,0 +1,350 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectdatabaseusers.databases.ovh.edixos.io +spec: + group: databases.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectDatabaseUser + listKind: ProjectDatabaseUserList + plural: projectdatabaseusers + singular: projectdatabaseuser + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectDatabaseUser is the Schema for the ProjectDatabaseUsers + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectDatabaseUserSpec defines the desired state of ProjectDatabaseUser + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + engine: + description: Name of the engine of the service + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clusterId: + description: Id of the database cluster + type: string + engine: + description: Name of the engine of the service + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterId) + || (has(self.initProvider) && has(self.initProvider.clusterId))' + - message: spec.forProvider.engine is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engine) + || (has(self.initProvider) && has(self.initProvider.engine))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectDatabaseUserStatus defines the observed state of ProjectDatabaseUser. + properties: + atProvider: + properties: + clusterId: + description: Id of the database cluster + type: string + createdAt: + description: Date of the creation of the user + type: string + engine: + description: Name of the engine of the service + type: string + id: + type: string + passwordReset: + description: Arbitrary string to change to trigger a password + update + type: string + serviceName: + type: string + status: + description: Current status of the user + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kube.ovh.edixos.io_projectkubeiprestrictions.yaml b/package/crds/kube.ovh.edixos.io_projectkubeiprestrictions.yaml new file mode 100644 index 0000000..39a5402 --- /dev/null +++ b/package/crds/kube.ovh.edixos.io_projectkubeiprestrictions.yaml @@ -0,0 +1,343 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectkubeiprestrictions.kube.ovh.edixos.io +spec: + group: kube.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectKubeIprestrictions + listKind: ProjectKubeIprestrictionsList + plural: projectkubeiprestrictions + singular: projectkubeiprestrictions + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectKubeIprestrictions is the Schema for the ProjectKubeIprestrictionss + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectKubeIprestrictionsSpec defines the desired state of + ProjectKubeIprestrictions + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + ips: + description: List of IP restrictions for the cluster + items: + type: string + type: array + kubeId: + description: Kube ID + type: string + serviceName: + description: Service name + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + ips: + description: List of IP restrictions for the cluster + items: + type: string + type: array + kubeId: + description: Kube ID + type: string + serviceName: + description: Service name + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.ips is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ips) + || (has(self.initProvider) && has(self.initProvider.ips))' + - message: spec.forProvider.kubeId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kubeId) + || (has(self.initProvider) && has(self.initProvider.kubeId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectKubeIprestrictionsStatus defines the observed state + of ProjectKubeIprestrictions. + properties: + atProvider: + properties: + id: + type: string + ips: + description: List of IP restrictions for the cluster + items: + type: string + type: array + kubeId: + description: Kube ID + type: string + serviceName: + description: Service name + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kube.ovh.edixos.io_projectkubenodepools.yaml b/package/crds/kube.ovh.edixos.io_projectkubenodepools.yaml new file mode 100644 index 0000000..7aeada9 --- /dev/null +++ b/package/crds/kube.ovh.edixos.io_projectkubenodepools.yaml @@ -0,0 +1,547 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectkubenodepools.kube.ovh.edixos.io +spec: + group: kube.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectKubeNodepool + listKind: ProjectKubeNodepoolList + plural: projectkubenodepools + singular: projectkubenodepool + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectKubeNodepool is the Schema for the ProjectKubeNodepools + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectKubeNodepoolSpec defines the desired state of ProjectKubeNodepool + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + antiAffinity: + description: Enable anti affinity groups for nodes in the pool + type: boolean + autoscale: + description: Enable auto-scaling for the pool + type: boolean + desiredNodes: + description: Number of nodes you desire in the pool + type: number + flavorName: + description: Flavor name + type: string + kubeId: + description: Kube ID + type: string + maxNodes: + description: Number of nodes you desire in the pool + type: number + minNodes: + description: Number of nodes you desire in the pool + type: number + monthlyBilled: + description: Enable monthly billing on all nodes in the pool + type: boolean + serviceName: + description: Service name + type: string + template: + description: Node pool template + items: + properties: + metadata: + description: metadata + items: + properties: + annotations: + additionalProperties: + type: string + description: annotations + type: object + finalizers: + description: finalizers + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: labels + type: object + type: object + type: array + spec: + description: spec + items: + properties: + taints: + description: taints + items: + additionalProperties: + type: string + type: object + type: array + unschedulable: + description: unschedulable + type: boolean + type: object + type: array + type: object + type: array + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + antiAffinity: + description: Enable anti affinity groups for nodes in the pool + type: boolean + autoscale: + description: Enable auto-scaling for the pool + type: boolean + desiredNodes: + description: Number of nodes you desire in the pool + type: number + flavorName: + description: Flavor name + type: string + kubeId: + description: Kube ID + type: string + maxNodes: + description: Number of nodes you desire in the pool + type: number + minNodes: + description: Number of nodes you desire in the pool + type: number + monthlyBilled: + description: Enable monthly billing on all nodes in the pool + type: boolean + serviceName: + description: Service name + type: string + template: + description: Node pool template + items: + properties: + metadata: + description: metadata + items: + properties: + annotations: + additionalProperties: + type: string + description: annotations + type: object + finalizers: + description: finalizers + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: labels + type: object + type: object + type: array + spec: + description: spec + items: + properties: + taints: + description: taints + items: + additionalProperties: + type: string + type: object + type: array + unschedulable: + description: unschedulable + type: boolean + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.flavorName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.flavorName) + || (has(self.initProvider) && has(self.initProvider.flavorName))' + - message: spec.forProvider.kubeId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kubeId) + || (has(self.initProvider) && has(self.initProvider.kubeId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectKubeNodepoolStatus defines the observed state of ProjectKubeNodepool. + properties: + atProvider: + properties: + antiAffinity: + description: Enable anti affinity groups for nodes in the pool + type: boolean + autoscale: + description: Enable auto-scaling for the pool + type: boolean + availableNodes: + description: Number of nodes which are actually ready in the pool + type: number + createdAt: + description: Creation date + type: string + currentNodes: + description: Number of nodes present in the pool + type: number + desiredNodes: + description: Number of nodes you desire in the pool + type: number + flavor: + description: Flavor name + type: string + flavorName: + description: Flavor name + type: string + id: + type: string + kubeId: + description: Kube ID + type: string + maxNodes: + description: Number of nodes you desire in the pool + type: number + minNodes: + description: Number of nodes you desire in the pool + type: number + monthlyBilled: + description: Enable monthly billing on all nodes in the pool + type: boolean + projectId: + description: Project id + type: string + serviceName: + description: Service name + type: string + sizeStatus: + description: Status describing the state between number of nodes + wanted and available ones + type: string + status: + description: Current status + type: string + template: + description: Node pool template + items: + properties: + metadata: + description: metadata + items: + properties: + annotations: + additionalProperties: + type: string + description: annotations + type: object + finalizers: + description: finalizers + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: labels + type: object + type: object + type: array + spec: + description: spec + items: + properties: + taints: + description: taints + items: + additionalProperties: + type: string + type: object + type: array + unschedulable: + description: unschedulable + type: boolean + type: object + type: array + type: object + type: array + upToDateNodes: + description: Number of nodes with latest version installed in + the pool + type: number + updatedAt: + description: Last update date + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kube.ovh.edixos.io_projectkubeoidcs.yaml b/package/crds/kube.ovh.edixos.io_projectkubeoidcs.yaml new file mode 100644 index 0000000..b6a0ae3 --- /dev/null +++ b/package/crds/kube.ovh.edixos.io_projectkubeoidcs.yaml @@ -0,0 +1,396 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectkubeoidcs.kube.ovh.edixos.io +spec: + group: kube.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectKubeOidc + listKind: ProjectKubeOidcList + plural: projectkubeoidcs + singular: projectkubeoidc + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectKubeOidc is the Schema for the ProjectKubeOidcs API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectKubeOidcSpec defines the desired state of ProjectKubeOidc + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clientId: + type: string + issuerUrl: + type: string + kubeId: + type: string + oidcCaContent: + type: string + oidcGroupsClaim: + items: + type: string + type: array + oidcGroupsPrefix: + type: string + oidcRequiredClaim: + items: + type: string + type: array + oidcSigningAlgs: + items: + type: string + type: array + oidcUsernameClaim: + type: string + oidcUsernamePrefix: + type: string + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + clientId: + type: string + issuerUrl: + type: string + kubeId: + type: string + oidcCaContent: + type: string + oidcGroupsClaim: + items: + type: string + type: array + oidcGroupsPrefix: + type: string + oidcRequiredClaim: + items: + type: string + type: array + oidcSigningAlgs: + items: + type: string + type: array + oidcUsernameClaim: + type: string + oidcUsernamePrefix: + type: string + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clientId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clientId) + || (has(self.initProvider) && has(self.initProvider.clientId))' + - message: spec.forProvider.issuerUrl is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.issuerUrl) + || (has(self.initProvider) && has(self.initProvider.issuerUrl))' + - message: spec.forProvider.kubeId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kubeId) + || (has(self.initProvider) && has(self.initProvider.kubeId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectKubeOidcStatus defines the observed state of ProjectKubeOidc. + properties: + atProvider: + properties: + clientId: + type: string + id: + type: string + issuerUrl: + type: string + kubeId: + type: string + oidcCaContent: + type: string + oidcGroupsClaim: + items: + type: string + type: array + oidcGroupsPrefix: + type: string + oidcRequiredClaim: + items: + type: string + type: array + oidcSigningAlgs: + items: + type: string + type: array + oidcUsernameClaim: + type: string + oidcUsernamePrefix: + type: string + serviceName: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/kube.ovh.edixos.io_projectkubes.yaml b/package/crds/kube.ovh.edixos.io_projectkubes.yaml new file mode 100644 index 0000000..6d95db7 --- /dev/null +++ b/package/crds/kube.ovh.edixos.io_projectkubes.yaml @@ -0,0 +1,614 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: projectkubes.kube.ovh.edixos.io +spec: + group: kube.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: ProjectKube + listKind: ProjectKubeList + plural: projectkubes + singular: projectkube + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ProjectKube is the Schema for the ProjectKubes API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectKubeSpec defines the desired state of ProjectKube + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customization: + items: + properties: + apiserver: + items: + properties: + admissionplugins: + items: + properties: + disabled: + items: + type: string + type: array + enabled: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + customizationApiserver: + items: + properties: + admissionplugins: + items: + properties: + disabled: + items: + type: string + type: array + enabled: + items: + type: string + type: array + type: object + type: array + type: object + type: array + customizationKubeProxy: + items: + properties: + iptables: + items: + properties: + minSyncPeriod: + type: string + syncPeriod: + type: string + type: object + type: array + ipvs: + items: + properties: + minSyncPeriod: + type: string + scheduler: + type: string + syncPeriod: + type: string + tcpFinTimeout: + type: string + tcpTimeout: + type: string + udpTimeout: + type: string + type: object + type: array + type: object + type: array + kubeProxyMode: + type: string + privateNetworkConfiguration: + items: + properties: + defaultVrackGateway: + description: If defined, all egress traffic will be routed + towards this IP address, which should belong to the private + network. Empty string means disabled. + type: string + privateNetworkRoutingAsDefault: + description: Defines whether routing should default to using + the nodes' private interface, instead of their public + interface. Default is false. + type: boolean + type: object + type: array + privateNetworkId: + type: string + region: + type: string + serviceName: + type: string + updatePolicy: + type: string + version: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + customization: + items: + properties: + apiserver: + items: + properties: + admissionplugins: + items: + properties: + disabled: + items: + type: string + type: array + enabled: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + customizationApiserver: + items: + properties: + admissionplugins: + items: + properties: + disabled: + items: + type: string + type: array + enabled: + items: + type: string + type: array + type: object + type: array + type: object + type: array + customizationKubeProxy: + items: + properties: + iptables: + items: + properties: + minSyncPeriod: + type: string + syncPeriod: + type: string + type: object + type: array + ipvs: + items: + properties: + minSyncPeriod: + type: string + scheduler: + type: string + syncPeriod: + type: string + tcpFinTimeout: + type: string + tcpTimeout: + type: string + udpTimeout: + type: string + type: object + type: array + type: object + type: array + kubeProxyMode: + type: string + privateNetworkConfiguration: + items: + properties: + defaultVrackGateway: + description: If defined, all egress traffic will be routed + towards this IP address, which should belong to the private + network. Empty string means disabled. + type: string + privateNetworkRoutingAsDefault: + description: Defines whether routing should default to using + the nodes' private interface, instead of their public + interface. Default is false. + type: boolean + type: object + type: array + privateNetworkId: + type: string + region: + type: string + serviceName: + type: string + updatePolicy: + type: string + version: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.region is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.region) + || (has(self.initProvider) && has(self.initProvider.region))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: ProjectKubeStatus defines the observed state of ProjectKube. + properties: + atProvider: + properties: + controlPlaneIsUpToDate: + type: boolean + customization: + items: + properties: + apiserver: + items: + properties: + admissionplugins: + items: + properties: + disabled: + items: + type: string + type: array + enabled: + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + type: array + customizationApiserver: + items: + properties: + admissionplugins: + items: + properties: + disabled: + items: + type: string + type: array + enabled: + items: + type: string + type: array + type: object + type: array + type: object + type: array + customizationKubeProxy: + items: + properties: + iptables: + items: + properties: + minSyncPeriod: + type: string + syncPeriod: + type: string + type: object + type: array + ipvs: + items: + properties: + minSyncPeriod: + type: string + scheduler: + type: string + syncPeriod: + type: string + tcpFinTimeout: + type: string + tcpTimeout: + type: string + udpTimeout: + type: string + type: object + type: array + type: object + type: array + id: + type: string + isUpToDate: + type: boolean + kubeProxyMode: + type: string + nextUpgradeVersions: + items: + type: string + type: array + nodesUrl: + type: string + privateNetworkConfiguration: + items: + properties: + defaultVrackGateway: + description: If defined, all egress traffic will be routed + towards this IP address, which should belong to the private + network. Empty string means disabled. + type: string + privateNetworkRoutingAsDefault: + description: Defines whether routing should default to using + the nodes' private interface, instead of their public + interface. Default is false. + type: boolean + type: object + type: array + privateNetworkId: + type: string + region: + type: string + serviceName: + type: string + status: + type: string + updatePolicy: + type: string + url: + type: string + version: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_httpfarms.yaml b/package/crds/lb.ovh.edixos.io_httpfarms.yaml new file mode 100644 index 0000000..dea445b --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_httpfarms.yaml @@ -0,0 +1,414 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: httpfarms.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: HTTPFarm + listKind: HTTPFarmList + plural: httpfarms + singular: httpfarm + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: HTTPFarm is the Schema for the HTTPFarms API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HTTPFarmSpec defines the desired state of HTTPFarm + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + balance: + type: string + displayName: + type: string + port: + type: number + probe: + items: + properties: + forceSsl: + type: boolean + interval: + type: number + match: + type: string + method: + type: string + negate: + type: boolean + pattern: + type: string + port: + type: number + type: + type: string + url: + type: string + type: object + type: array + serviceName: + type: string + stickiness: + type: string + vrackNetworkId: + type: number + zone: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + balance: + type: string + displayName: + type: string + port: + type: number + probe: + items: + properties: + forceSsl: + type: boolean + interval: + type: number + match: + type: string + method: + type: string + negate: + type: boolean + pattern: + type: string + port: + type: number + type: + type: string + url: + type: string + type: object + type: array + serviceName: + type: string + stickiness: + type: string + vrackNetworkId: + type: number + zone: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.zone is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.zone) + || (has(self.initProvider) && has(self.initProvider.zone))' + status: + description: HTTPFarmStatus defines the observed state of HTTPFarm. + properties: + atProvider: + properties: + balance: + type: string + displayName: + type: string + id: + type: string + port: + type: number + probe: + items: + properties: + forceSsl: + type: boolean + interval: + type: number + match: + type: string + method: + type: string + negate: + type: boolean + pattern: + type: string + port: + type: number + type: + type: string + url: + type: string + type: object + type: array + serviceName: + type: string + stickiness: + type: string + vrackNetworkId: + type: number + zone: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_httpfarmservers.yaml b/package/crds/lb.ovh.edixos.io_httpfarmservers.yaml new file mode 100644 index 0000000..ad55bb3 --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_httpfarmservers.yaml @@ -0,0 +1,392 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: httpfarmservers.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: HTTPFarmServer + listKind: HTTPFarmServerList + plural: httpfarmservers + singular: httpfarmserver + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: HTTPFarmServer is the Schema for the HTTPFarmServers API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HTTPFarmServerSpec defines the desired state of HTTPFarmServer + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + address: + type: string + backup: + type: boolean + chain: + type: string + displayName: + type: string + farmId: + type: number + onMarkedDown: + type: string + port: + type: number + probe: + type: boolean + proxyProtocolVersion: + type: string + serviceName: + type: string + ssl: + type: boolean + status: + type: string + weight: + type: number + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + address: + type: string + backup: + type: boolean + chain: + type: string + displayName: + type: string + farmId: + type: number + onMarkedDown: + type: string + port: + type: number + probe: + type: boolean + proxyProtocolVersion: + type: string + serviceName: + type: string + ssl: + type: boolean + status: + type: string + weight: + type: number + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.address is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.address) + || (has(self.initProvider) && has(self.initProvider.address))' + - message: spec.forProvider.farmId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.farmId) + || (has(self.initProvider) && has(self.initProvider.farmId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.status is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.status) + || (has(self.initProvider) && has(self.initProvider.status))' + status: + description: HTTPFarmServerStatus defines the observed state of HTTPFarmServer. + properties: + atProvider: + properties: + address: + type: string + backup: + type: boolean + chain: + type: string + cookie: + type: string + displayName: + type: string + farmId: + type: number + id: + type: string + onMarkedDown: + type: string + port: + type: number + probe: + type: boolean + proxyProtocolVersion: + type: string + serviceName: + type: string + ssl: + type: boolean + status: + type: string + weight: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_httpfrontends.yaml b/package/crds/lb.ovh.edixos.io_httpfrontends.yaml new file mode 100644 index 0000000..6da4cb5 --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_httpfrontends.yaml @@ -0,0 +1,403 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: httpfrontends.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: HTTPFrontend + listKind: HTTPFrontendList + plural: httpfrontends + singular: httpfrontend + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: HTTPFrontend is the Schema for the HTTPFrontends API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HTTPFrontendSpec defines the desired state of HTTPFrontend + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowedSource: + items: + type: string + type: array + dedicatedIpfo: + items: + type: string + type: array + defaultFarmId: + type: number + defaultSslId: + type: number + disabled: + type: boolean + displayName: + type: string + hsts: + type: boolean + httpHeader: + items: + type: string + type: array + port: + type: string + redirectLocation: + type: string + serviceName: + type: string + ssl: + type: boolean + zone: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + allowedSource: + items: + type: string + type: array + dedicatedIpfo: + items: + type: string + type: array + defaultFarmId: + type: number + defaultSslId: + type: number + disabled: + type: boolean + displayName: + type: string + hsts: + type: boolean + httpHeader: + items: + type: string + type: array + port: + type: string + redirectLocation: + type: string + serviceName: + type: string + ssl: + type: boolean + zone: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.port is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.port) + || (has(self.initProvider) && has(self.initProvider.port))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.zone is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.zone) + || (has(self.initProvider) && has(self.initProvider.zone))' + status: + description: HTTPFrontendStatus defines the observed state of HTTPFrontend. + properties: + atProvider: + properties: + allowedSource: + items: + type: string + type: array + dedicatedIpfo: + items: + type: string + type: array + defaultFarmId: + type: number + defaultSslId: + type: number + disabled: + type: boolean + displayName: + type: string + hsts: + type: boolean + httpHeader: + items: + type: string + type: array + id: + type: string + port: + type: string + redirectLocation: + type: string + serviceName: + type: string + ssl: + type: boolean + zone: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_httprouterules.yaml b/package/crds/lb.ovh.edixos.io_httprouterules.yaml new file mode 100644 index 0000000..53e130c --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_httprouterules.yaml @@ -0,0 +1,359 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: httprouterules.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: HTTPRouteRule + listKind: HTTPRouteRuleList + plural: httprouterules + singular: httprouterule + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: HTTPRouteRule is the Schema for the HTTPRouteRules API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HTTPRouteRuleSpec defines the desired state of HTTPRouteRule + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + displayName: + type: string + field: + type: string + match: + type: string + negate: + type: boolean + pattern: + type: string + routeId: + type: string + serviceName: + type: string + subField: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + displayName: + type: string + field: + type: string + match: + type: string + negate: + type: boolean + pattern: + type: string + routeId: + type: string + serviceName: + type: string + subField: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.field is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.field) + || (has(self.initProvider) && has(self.initProvider.field))' + - message: spec.forProvider.match is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.match) + || (has(self.initProvider) && has(self.initProvider.match))' + - message: spec.forProvider.routeId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.routeId) + || (has(self.initProvider) && has(self.initProvider.routeId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: HTTPRouteRuleStatus defines the observed state of HTTPRouteRule. + properties: + atProvider: + properties: + displayName: + type: string + field: + type: string + id: + type: string + match: + type: string + negate: + type: boolean + pattern: + type: string + routeId: + type: string + serviceName: + type: string + subField: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_httproutes.yaml b/package/crds/lb.ovh.edixos.io_httproutes.yaml new file mode 100644 index 0000000..2f83e1a --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_httproutes.yaml @@ -0,0 +1,426 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: httproutes.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: HTTPRoute + listKind: HTTPRouteList + plural: httproutes + singular: httproute + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: HTTPRoute is the Schema for the HTTPRoutes API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HTTPRouteSpec defines the desired state of HTTPRoute + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: Action triggered when all rules match + items: + properties: + status: + description: HTTP status code for "redirect" and "reject" + actions + type: number + target: + description: Farm ID for "farm" action type or URL template + for "redirect" action. You may use ${uri}, ${protocol}, + ${host}, ${port} and ${path} variables in redirect target + type: string + type: + description: Action to trigger if all the rules of this + route matches + type: string + type: object + type: array + displayName: + description: Human readable name for your route, this field is + for you + type: string + frontendId: + description: Route traffic for this frontend + type: number + serviceName: + description: The internal name of your IP load balancing + type: string + weight: + description: Route priority ([0..255]). 0 if null. Highest priority + routes are evaluated last. Only the first matching route will + trigger an action + type: number + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + action: + description: Action triggered when all rules match + items: + properties: + status: + description: HTTP status code for "redirect" and "reject" + actions + type: number + target: + description: Farm ID for "farm" action type or URL template + for "redirect" action. You may use ${uri}, ${protocol}, + ${host}, ${port} and ${path} variables in redirect target + type: string + type: + description: Action to trigger if all the rules of this + route matches + type: string + type: object + type: array + displayName: + description: Human readable name for your route, this field is + for you + type: string + frontendId: + description: Route traffic for this frontend + type: number + serviceName: + description: The internal name of your IP load balancing + type: string + weight: + description: Route priority ([0..255]). 0 if null. Highest priority + routes are evaluated last. Only the first matching route will + trigger an action + type: number + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.action is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.action) + || (has(self.initProvider) && has(self.initProvider.action))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: HTTPRouteStatus defines the observed state of HTTPRoute. + properties: + atProvider: + properties: + action: + description: Action triggered when all rules match + items: + properties: + status: + description: HTTP status code for "redirect" and "reject" + actions + type: number + target: + description: Farm ID for "farm" action type or URL template + for "redirect" action. You may use ${uri}, ${protocol}, + ${host}, ${port} and ${path} variables in redirect target + type: string + type: + description: Action to trigger if all the rules of this + route matches + type: string + type: object + type: array + displayName: + description: Human readable name for your route, this field is + for you + type: string + frontendId: + description: Route traffic for this frontend + type: number + id: + type: string + rules: + description: List of rules to match to trigger action + items: + properties: + field: + type: string + match: + type: string + negate: + type: boolean + pattern: + type: string + ruleId: + type: number + subField: + type: string + type: object + type: array + serviceName: + description: The internal name of your IP load balancing + type: string + status: + description: Route status. Routes in "ok" state are ready to operate + type: string + weight: + description: Route priority ([0..255]). 0 if null. Highest priority + routes are evaluated last. Only the first matching route will + trigger an action + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_iploadbalancings.yaml b/package/crds/lb.ovh.edixos.io_iploadbalancings.yaml new file mode 100644 index 0000000..600d617 --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_iploadbalancings.yaml @@ -0,0 +1,608 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: iploadbalancings.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: Iploadbalancing + listKind: IploadbalancingList + plural: iploadbalancings + singular: iploadbalancing + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Iploadbalancing is the Schema for the Iploadbalancings API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IploadbalancingSpec defines the desired state of Iploadbalancing + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + displayName: + description: Set the name displayed in ManagerV6 for your iplb + (max 50 chars) + type: string + ovhSubsidiary: + description: Ovh Subsidiary + type: string + paymentMean: + description: Ovh payment mode + type: string + plan: + description: Product Plan to order + items: + properties: + catalogName: + description: Catalog name + type: string + configuration: + description: Representation of a configuration item for + personalizing product + items: + properties: + label: + description: Identifier of the resource + type: string + value: + description: Path to the resource in API.OVH.COM + type: string + type: object + type: array + duration: + description: duration + type: string + planCode: + description: Plan code + type: string + pricingMode: + description: Pricing model identifier + type: string + type: object + type: array + planOption: + description: Product Plan to order + items: + properties: + catalogName: + description: Catalog name + type: string + configuration: + description: Representation of a configuration item for + personalizing product + items: + properties: + label: + description: Identifier of the resource + type: string + value: + description: Path to the resource in API.OVH.COM + type: string + type: object + type: array + duration: + description: duration + type: string + planCode: + description: Plan code + type: string + pricingMode: + description: Pricing model identifier + type: string + type: object + type: array + sslConfiguration: + description: 'Modern oldest compatible clients : Firefox 27, Chrome + 30, IE 11 on Windows 7, Edge, Opera 17, Safari 9, Android 5.0, + and Java 8. Intermediate oldest compatible clients : Firefox + 1, Chrome 1, IE 7, Opera 5, Safari 1, Windows XP IE8, Android + 2.3, Java 7. Intermediate if null.' + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + displayName: + description: Set the name displayed in ManagerV6 for your iplb + (max 50 chars) + type: string + ovhSubsidiary: + description: Ovh Subsidiary + type: string + paymentMean: + description: Ovh payment mode + type: string + plan: + description: Product Plan to order + items: + properties: + catalogName: + description: Catalog name + type: string + configuration: + description: Representation of a configuration item for + personalizing product + items: + properties: + label: + description: Identifier of the resource + type: string + value: + description: Path to the resource in API.OVH.COM + type: string + type: object + type: array + duration: + description: duration + type: string + planCode: + description: Plan code + type: string + pricingMode: + description: Pricing model identifier + type: string + type: object + type: array + planOption: + description: Product Plan to order + items: + properties: + catalogName: + description: Catalog name + type: string + configuration: + description: Representation of a configuration item for + personalizing product + items: + properties: + label: + description: Identifier of the resource + type: string + value: + description: Path to the resource in API.OVH.COM + type: string + type: object + type: array + duration: + description: duration + type: string + planCode: + description: Plan code + type: string + pricingMode: + description: Pricing model identifier + type: string + type: object + type: array + sslConfiguration: + description: 'Modern oldest compatible clients : Firefox 27, Chrome + 30, IE 11 on Windows 7, Edge, Opera 17, Safari 9, Android 5.0, + and Java 8. Intermediate oldest compatible clients : Firefox + 1, Chrome 1, IE 7, Opera 5, Safari 1, Windows XP IE8, Android + 2.3, Java 7. Intermediate if null.' + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.ovhSubsidiary is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ovhSubsidiary) + || (has(self.initProvider) && has(self.initProvider.ovhSubsidiary))' + - message: spec.forProvider.plan is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.plan) + || (has(self.initProvider) && has(self.initProvider.plan))' + status: + description: IploadbalancingStatus defines the observed state of Iploadbalancing. + properties: + atProvider: + properties: + displayName: + description: Set the name displayed in ManagerV6 for your iplb + (max 50 chars) + type: string + id: + type: string + ipLoadbalancing: + description: Your IP load balancing + type: string + ipv4: + description: The IPV4 associated to your IP load balancing + type: string + ipv6: + description: The IPV6 associated to your IP load balancing. DEPRECATED. + type: string + offer: + description: The offer of your IP load balancing + type: string + order: + description: Details about an Order + items: + properties: + date: + type: string + details: + items: + properties: + description: + type: string + domain: + type: string + orderDetailId: + type: number + quantity: + type: string + type: object + type: array + expirationDate: + type: string + orderId: + type: number + type: object + type: array + orderableZone: + description: Available additional zone for your Load Balancer + items: + properties: + name: + type: string + planCode: + type: string + type: object + type: array + ovhSubsidiary: + description: Ovh Subsidiary + type: string + paymentMean: + description: Ovh payment mode + type: string + plan: + description: Product Plan to order + items: + properties: + catalogName: + description: Catalog name + type: string + configuration: + description: Representation of a configuration item for + personalizing product + items: + properties: + label: + description: Identifier of the resource + type: string + value: + description: Path to the resource in API.OVH.COM + type: string + type: object + type: array + duration: + description: duration + type: string + planCode: + description: Plan code + type: string + pricingMode: + description: Pricing model identifier + type: string + type: object + type: array + planOption: + description: Product Plan to order + items: + properties: + catalogName: + description: Catalog name + type: string + configuration: + description: Representation of a configuration item for + personalizing product + items: + properties: + label: + description: Identifier of the resource + type: string + value: + description: Path to the resource in API.OVH.COM + type: string + type: object + type: array + duration: + description: duration + type: string + planCode: + description: Plan code + type: string + pricingMode: + description: Pricing model identifier + type: string + type: object + type: array + serviceName: + description: The internal name of your IP load balancing + type: string + sslConfiguration: + description: 'Modern oldest compatible clients : Firefox 27, Chrome + 30, IE 11 on Windows 7, Edge, Opera 17, Safari 9, Android 5.0, + and Java 8. Intermediate oldest compatible clients : Firefox + 1, Chrome 1, IE 7, Opera 5, Safari 1, Windows XP IE8, Android + 2.3, Java 7. Intermediate if null.' + type: string + state: + description: Current state of your IP + type: string + urn: + type: string + vrackEligibility: + description: Vrack eligibility + type: boolean + vrackName: + description: Name of the vRack on which the current Load Balancer + is attached to, as it is named on vRack product + type: string + zone: + description: Location where your service is + items: + type: string + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_refreshes.yaml b/package/crds/lb.ovh.edixos.io_refreshes.yaml new file mode 100644 index 0000000..a626002 --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_refreshes.yaml @@ -0,0 +1,321 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: refreshes.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: Refresh + listKind: RefreshList + plural: refreshes + singular: refresh + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Refresh is the Schema for the Refreshs API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RefreshSpec defines the desired state of Refresh + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + keepers: + items: + type: string + type: array + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + keepers: + items: + type: string + type: array + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.keepers is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.keepers) + || (has(self.initProvider) && has(self.initProvider.keepers))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: RefreshStatus defines the observed state of Refresh. + properties: + atProvider: + properties: + id: + type: string + keepers: + items: + type: string + type: array + serviceName: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_tcpfarms.yaml b/package/crds/lb.ovh.edixos.io_tcpfarms.yaml new file mode 100644 index 0000000..a3b146e --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_tcpfarms.yaml @@ -0,0 +1,414 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: tcpfarms.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: TCPFarm + listKind: TCPFarmList + plural: tcpfarms + singular: tcpfarm + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TCPFarm is the Schema for the TCPFarms API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TCPFarmSpec defines the desired state of TCPFarm + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + balance: + type: string + displayName: + type: string + port: + type: number + probe: + items: + properties: + forceSsl: + type: boolean + interval: + type: number + match: + type: string + method: + type: string + negate: + type: boolean + pattern: + type: string + port: + type: number + type: + type: string + url: + type: string + type: object + type: array + serviceName: + type: string + stickiness: + type: string + vrackNetworkId: + type: number + zone: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + balance: + type: string + displayName: + type: string + port: + type: number + probe: + items: + properties: + forceSsl: + type: boolean + interval: + type: number + match: + type: string + method: + type: string + negate: + type: boolean + pattern: + type: string + port: + type: number + type: + type: string + url: + type: string + type: object + type: array + serviceName: + type: string + stickiness: + type: string + vrackNetworkId: + type: number + zone: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.zone is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.zone) + || (has(self.initProvider) && has(self.initProvider.zone))' + status: + description: TCPFarmStatus defines the observed state of TCPFarm. + properties: + atProvider: + properties: + balance: + type: string + displayName: + type: string + id: + type: string + port: + type: number + probe: + items: + properties: + forceSsl: + type: boolean + interval: + type: number + match: + type: string + method: + type: string + negate: + type: boolean + pattern: + type: string + port: + type: number + type: + type: string + url: + type: string + type: object + type: array + serviceName: + type: string + stickiness: + type: string + vrackNetworkId: + type: number + zone: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_tcpfarmservers.yaml b/package/crds/lb.ovh.edixos.io_tcpfarmservers.yaml new file mode 100644 index 0000000..492061a --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_tcpfarmservers.yaml @@ -0,0 +1,389 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: tcpfarmservers.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: TCPFarmServer + listKind: TCPFarmServerList + plural: tcpfarmservers + singular: tcpfarmserver + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TCPFarmServer is the Schema for the TCPFarmServers API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TCPFarmServerSpec defines the desired state of TCPFarmServer + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + address: + type: string + backup: + type: boolean + chain: + type: string + displayName: + type: string + farmId: + type: number + onMarkedDown: + type: string + port: + type: number + probe: + type: boolean + proxyProtocolVersion: + type: string + serviceName: + type: string + ssl: + type: boolean + status: + type: string + weight: + type: number + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + address: + type: string + backup: + type: boolean + chain: + type: string + displayName: + type: string + farmId: + type: number + onMarkedDown: + type: string + port: + type: number + probe: + type: boolean + proxyProtocolVersion: + type: string + serviceName: + type: string + ssl: + type: boolean + status: + type: string + weight: + type: number + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.address is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.address) + || (has(self.initProvider) && has(self.initProvider.address))' + - message: spec.forProvider.farmId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.farmId) + || (has(self.initProvider) && has(self.initProvider.farmId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.status is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.status) + || (has(self.initProvider) && has(self.initProvider.status))' + status: + description: TCPFarmServerStatus defines the observed state of TCPFarmServer. + properties: + atProvider: + properties: + address: + type: string + backup: + type: boolean + chain: + type: string + displayName: + type: string + farmId: + type: number + id: + type: string + onMarkedDown: + type: string + port: + type: number + probe: + type: boolean + proxyProtocolVersion: + type: string + serviceName: + type: string + ssl: + type: boolean + status: + type: string + weight: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_tcpfrontends.yaml b/package/crds/lb.ovh.edixos.io_tcpfrontends.yaml new file mode 100644 index 0000000..789c0b8 --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_tcpfrontends.yaml @@ -0,0 +1,379 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: tcpfrontends.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: TCPFrontend + listKind: TCPFrontendList + plural: tcpfrontends + singular: tcpfrontend + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TCPFrontend is the Schema for the TCPFrontends API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TCPFrontendSpec defines the desired state of TCPFrontend + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowedSource: + items: + type: string + type: array + dedicatedIpfo: + items: + type: string + type: array + defaultFarmId: + type: number + defaultSslId: + type: number + disabled: + type: boolean + displayName: + type: string + port: + type: string + serviceName: + type: string + ssl: + type: boolean + zone: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + allowedSource: + items: + type: string + type: array + dedicatedIpfo: + items: + type: string + type: array + defaultFarmId: + type: number + defaultSslId: + type: number + disabled: + type: boolean + displayName: + type: string + port: + type: string + serviceName: + type: string + ssl: + type: boolean + zone: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.port is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.port) + || (has(self.initProvider) && has(self.initProvider.port))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.zone is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.zone) + || (has(self.initProvider) && has(self.initProvider.zone))' + status: + description: TCPFrontendStatus defines the observed state of TCPFrontend. + properties: + atProvider: + properties: + allowedSource: + items: + type: string + type: array + dedicatedIpfo: + items: + type: string + type: array + defaultFarmId: + type: number + defaultSslId: + type: number + disabled: + type: boolean + displayName: + type: string + id: + type: string + port: + type: string + serviceName: + type: string + ssl: + type: boolean + zone: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_tcprouterules.yaml b/package/crds/lb.ovh.edixos.io_tcprouterules.yaml new file mode 100644 index 0000000..62ce2e8 --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_tcprouterules.yaml @@ -0,0 +1,359 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: tcprouterules.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: TCPRouteRule + listKind: TCPRouteRuleList + plural: tcprouterules + singular: tcprouterule + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TCPRouteRule is the Schema for the TCPRouteRules API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TCPRouteRuleSpec defines the desired state of TCPRouteRule + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + displayName: + type: string + field: + type: string + match: + type: string + negate: + type: boolean + pattern: + type: string + routeId: + type: string + serviceName: + type: string + subField: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + displayName: + type: string + field: + type: string + match: + type: string + negate: + type: boolean + pattern: + type: string + routeId: + type: string + serviceName: + type: string + subField: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.field is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.field) + || (has(self.initProvider) && has(self.initProvider.field))' + - message: spec.forProvider.match is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.match) + || (has(self.initProvider) && has(self.initProvider.match))' + - message: spec.forProvider.routeId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.routeId) + || (has(self.initProvider) && has(self.initProvider.routeId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: TCPRouteRuleStatus defines the observed state of TCPRouteRule. + properties: + atProvider: + properties: + displayName: + type: string + field: + type: string + id: + type: string + match: + type: string + negate: + type: boolean + pattern: + type: string + routeId: + type: string + serviceName: + type: string + subField: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_tcproutes.yaml b/package/crds/lb.ovh.edixos.io_tcproutes.yaml new file mode 100644 index 0000000..fef3a17 --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_tcproutes.yaml @@ -0,0 +1,408 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: tcproutes.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: TCPRoute + listKind: TCPRouteList + plural: tcproutes + singular: tcproute + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TCPRoute is the Schema for the TCPRoutes API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TCPRouteSpec defines the desired state of TCPRoute + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: Action triggered when all rules match + items: + properties: + target: + description: Farm ID for "farm" action type, empty for others + type: string + type: + description: Action to trigger if all the rules of this + route matches + type: string + type: object + type: array + displayName: + description: Human readable name for your route, this field is + for you + type: string + frontendId: + description: Route traffic for this frontend + type: number + serviceName: + description: The internal name of your IP load balancing + type: string + weight: + description: Route priority ([0..255]). 0 if null. Highest priority + routes are evaluated last. Only the first matching route will + trigger an action + type: number + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + action: + description: Action triggered when all rules match + items: + properties: + target: + description: Farm ID for "farm" action type, empty for others + type: string + type: + description: Action to trigger if all the rules of this + route matches + type: string + type: object + type: array + displayName: + description: Human readable name for your route, this field is + for you + type: string + frontendId: + description: Route traffic for this frontend + type: number + serviceName: + description: The internal name of your IP load balancing + type: string + weight: + description: Route priority ([0..255]). 0 if null. Highest priority + routes are evaluated last. Only the first matching route will + trigger an action + type: number + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.action is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.action) + || (has(self.initProvider) && has(self.initProvider.action))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: TCPRouteStatus defines the observed state of TCPRoute. + properties: + atProvider: + properties: + action: + description: Action triggered when all rules match + items: + properties: + target: + description: Farm ID for "farm" action type, empty for others + type: string + type: + description: Action to trigger if all the rules of this + route matches + type: string + type: object + type: array + displayName: + description: Human readable name for your route, this field is + for you + type: string + frontendId: + description: Route traffic for this frontend + type: number + id: + type: string + rules: + description: List of rules to match to trigger action + items: + properties: + field: + type: string + match: + type: string + negate: + type: boolean + pattern: + type: string + ruleId: + type: number + subField: + type: string + type: object + type: array + serviceName: + description: The internal name of your IP load balancing + type: string + status: + description: Route status. Routes in "ok" state are ready to operate + type: string + weight: + description: Route priority ([0..255]). 0 if null. Highest priority + routes are evaluated last. Only the first matching route will + trigger an action + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/lb.ovh.edixos.io_vracknetworks.yaml b/package/crds/lb.ovh.edixos.io_vracknetworks.yaml new file mode 100644 index 0000000..a5be745 --- /dev/null +++ b/package/crds/lb.ovh.edixos.io_vracknetworks.yaml @@ -0,0 +1,386 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: vracknetworks.lb.ovh.edixos.io +spec: + group: lb.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: VrackNetwork + listKind: VrackNetworkList + plural: vracknetworks + singular: vracknetwork + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: VrackNetwork is the Schema for the VrackNetworks API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VrackNetworkSpec defines the desired state of VrackNetwork + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + displayName: + description: Human readable name for your vrack network + type: string + farmId: + description: This attribute is there for documentation purpose + only and isnt passed to the OVH API as it may conflicts with + http/tcp farms `vrack_network_id` attribute + items: + type: number + type: array + natIp: + description: An IP block used as a pool of IPs by this Load Balancer + to connect to the servers in this private network. The blck + must be in the private network and reserved for the Load Balancer + type: string + serviceName: + description: The internal name of your IPloadbalancer + type: string + subnet: + description: IP block of the private network in the vRack + type: string + vlan: + description: VLAN of the private network in the vRack. 0 if the + private network is not in a VLAN + type: number + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + displayName: + description: Human readable name for your vrack network + type: string + farmId: + description: This attribute is there for documentation purpose + only and isnt passed to the OVH API as it may conflicts with + http/tcp farms `vrack_network_id` attribute + items: + type: number + type: array + natIp: + description: An IP block used as a pool of IPs by this Load Balancer + to connect to the servers in this private network. The blck + must be in the private network and reserved for the Load Balancer + type: string + serviceName: + description: The internal name of your IPloadbalancer + type: string + subnet: + description: IP block of the private network in the vRack + type: string + vlan: + description: VLAN of the private network in the vRack. 0 if the + private network is not in a VLAN + type: number + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.natIp is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.natIp) + || (has(self.initProvider) && has(self.initProvider.natIp))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.subnet is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.subnet) + || (has(self.initProvider) && has(self.initProvider.subnet))' + status: + description: VrackNetworkStatus defines the observed state of VrackNetwork. + properties: + atProvider: + properties: + displayName: + description: Human readable name for your vrack network + type: string + farmId: + description: This attribute is there for documentation purpose + only and isnt passed to the OVH API as it may conflicts with + http/tcp farms `vrack_network_id` attribute + items: + type: number + type: array + id: + type: string + natIp: + description: An IP block used as a pool of IPs by this Load Balancer + to connect to the servers in this private network. The blck + must be in the private network and reserved for the Load Balancer + type: string + serviceName: + description: The internal name of your IPloadbalancer + type: string + subnet: + description: IP block of the private network in the vRack + type: string + vlan: + description: VLAN of the private network in the vRack. 0 if the + private network is not in a VLAN + type: number + vrackNetworkId: + description: Internal Load Balancer identifier of the vRack private + network + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/logs.ovh.edixos.io_logsclusters.yaml b/package/crds/logs.ovh.edixos.io_logsclusters.yaml new file mode 100644 index 0000000..94d43bf --- /dev/null +++ b/package/crds/logs.ovh.edixos.io_logsclusters.yaml @@ -0,0 +1,367 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: logsclusters.logs.ovh.edixos.io +spec: + group: logs.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: LogsCluster + listKind: LogsClusterList + plural: logsclusters + singular: logscluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: LogsCluster is the Schema for the LogsClusters API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LogsClusterSpec defines the desired state of LogsCluster + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + archiveAllowedNetworks: + description: Allowed networks for ARCHIVE flow type + items: + type: string + type: array + directInputAllowedNetworks: + description: Allowed networks for DIRECT_INPUT flow type + items: + type: string + type: array + queryAllowedNetworks: + description: Allowed networks for QUERY flow type + items: + type: string + type: array + serviceName: + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + archiveAllowedNetworks: + description: Allowed networks for ARCHIVE flow type + items: + type: string + type: array + directInputAllowedNetworks: + description: Allowed networks for DIRECT_INPUT flow type + items: + type: string + type: array + queryAllowedNetworks: + description: Allowed networks for QUERY flow type + items: + type: string + type: array + serviceName: + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + status: + description: LogsClusterStatus defines the observed state of LogsCluster. + properties: + atProvider: + properties: + archiveAllowedNetworks: + description: Allowed networks for ARCHIVE flow type + items: + type: string + type: array + clusterType: + description: Cluster type + type: string + directInputAllowedNetworks: + description: Allowed networks for DIRECT_INPUT flow type + items: + type: string + type: array + hostname: + description: hostname + type: string + id: + type: string + isDefault: + description: All content generated by given service will be placed + on this cluster + type: boolean + isUnlocked: + description: Allow given service to perform advanced operations + on cluster + type: boolean + queryAllowedNetworks: + description: Allowed networks for QUERY flow type + items: + type: string + type: array + region: + description: Data center localization + type: string + serviceName: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/logs.ovh.edixos.io_logsinputs.yaml b/package/crds/logs.ovh.edixos.io_logsinputs.yaml new file mode 100644 index 0000000..65c7d7e --- /dev/null +++ b/package/crds/logs.ovh.edixos.io_logsinputs.yaml @@ -0,0 +1,515 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: logsinputs.logs.ovh.edixos.io +spec: + group: logs.ovh.edixos.io + names: + categories: + - crossplane + - managed + - ovh + kind: LogsInput + listKind: LogsInputList + plural: logsinputs + singular: logsinput + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: LogsInput is the Schema for the LogsInputs API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LogsInputSpec defines the desired state of LogsInput + properties: + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicies field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowedNetworks: + description: IP blocks + items: + type: string + type: array + configuration: + description: Input configuration + items: + properties: + flowgger: + description: Flowgger configuration + items: + properties: + logFormat: + description: Type of format to decode + type: string + logFraming: + description: Indicates how messages are delimited + type: string + type: object + type: array + logstash: + description: Logstash configuration + items: + properties: + filterSection: + description: The filter section of logstash.conf + type: string + inputSection: + description: The filter section of logstash.conf + type: string + patternSection: + description: The list of customs Grok patterns + type: string + type: object + type: array + type: object + type: array + description: + description: Input description + type: string + engineId: + description: Input engine ID + type: string + exposedPort: + description: Port + type: string + nbInstance: + description: Number of instance running + type: number + serviceName: + type: string + streamId: + description: Associated Graylog stream + type: string + title: + description: Input title + type: string + type: object + initProvider: + description: THIS IS A BETA FIELD. It will be honored unless the Management + Policies feature flag is disabled. InitProvider holds the same fields + as ForProvider, with the exception of Identifier and other resource + reference fields. The fields that are in InitProvider are merged + into ForProvider when the resource is created. The same fields are + also added to the terraform ignore_changes hook, to avoid updating + them after creation. This is useful for fields that are required + on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, + like an autoscaler. + properties: + allowedNetworks: + description: IP blocks + items: + type: string + type: array + configuration: + description: Input configuration + items: + properties: + flowgger: + description: Flowgger configuration + items: + properties: + logFormat: + description: Type of format to decode + type: string + logFraming: + description: Indicates how messages are delimited + type: string + type: object + type: array + logstash: + description: Logstash configuration + items: + properties: + filterSection: + description: The filter section of logstash.conf + type: string + inputSection: + description: The filter section of logstash.conf + type: string + patternSection: + description: The list of customs Grok patterns + type: string + type: object + type: array + type: object + type: array + description: + description: Input description + type: string + engineId: + description: Input engine ID + type: string + exposedPort: + description: Port + type: string + nbInstance: + description: Number of instance running + type: number + serviceName: + type: string + streamId: + description: Associated Graylog stream + type: string + title: + description: Input title + type: string + type: object + managementPolicies: + default: + - '*' + description: 'THIS IS A BETA FIELD. It is on by default but can be + opted out through a Crossplane feature flag. ManagementPolicies + specify the array of actions Crossplane is allowed to take on the + managed and external resources. This field is planned to replace + the DeletionPolicy field in a future release. Currently, both could + be set independently and non-default values would be honored if + the feature flag is enabled. If both are custom, the DeletionPolicy + field will be ignored. See the design doc for more information: + https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md' + items: + description: A ManagementAction represents an action that the Crossplane + controllers can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.configuration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.configuration) + || (has(self.initProvider) && has(self.initProvider.configuration))' + - message: spec.forProvider.description is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.description) + || (has(self.initProvider) && has(self.initProvider.description))' + - message: spec.forProvider.engineId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engineId) + || (has(self.initProvider) && has(self.initProvider.engineId))' + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.streamId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.streamId) + || (has(self.initProvider) && has(self.initProvider.streamId))' + - message: spec.forProvider.title is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.title) + || (has(self.initProvider) && has(self.initProvider.title))' + status: + description: LogsInputStatus defines the observed state of LogsInput. + properties: + atProvider: + properties: + allowedNetworks: + description: IP blocks + items: + type: string + type: array + configuration: + description: Input configuration + items: + properties: + flowgger: + description: Flowgger configuration + items: + properties: + logFormat: + description: Type of format to decode + type: string + logFraming: + description: Indicates how messages are delimited + type: string + type: object + type: array + logstash: + description: Logstash configuration + items: + properties: + filterSection: + description: The filter section of logstash.conf + type: string + inputSection: + description: The filter section of logstash.conf + type: string + patternSection: + description: The list of customs Grok patterns + type: string + type: object + type: array + type: object + type: array + createdAt: + description: Input creation + type: string + description: + description: Input description + type: string + engineId: + description: Input engine ID + type: string + exposedPort: + description: Port + type: string + hostname: + description: Hostname + type: string + id: + type: string + inputId: + description: Input ID + type: string + isRestartRequired: + description: Indicate if input need to be restarted + type: boolean + nbInstance: + description: Number of instance running + type: number + publicAddress: + description: Input IP address + type: string + serviceName: + type: string + status: + description: 'init: configuration required, pending: ready to + start, running: available' + type: string + streamId: + description: Associated Graylog stream + type: string + title: + description: Input title + type: string + updatedAt: + description: Input last update + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}