diff --git a/ovh/data_cloud_project_database.go b/ovh/data_cloud_project_database.go index 932adc998..86170648c 100644 --- a/ovh/data_cloud_project_database.go +++ b/ovh/data_cloud_project_database.go @@ -135,6 +135,11 @@ func dataSourceCloudProjectDatabase() *schema.Resource { Description: "Defines whether the REST API is enabled on a Kafka cluster", Computed: true, }, + "kafka_schema_registry": { + Type: schema.TypeBool, + Description: "Defines whether the schema registry is enabled on a Kafka cluster", + Computed: true, + }, "maintenance_time": { Type: schema.TypeString, Description: "Time on which maintenances can start every day", @@ -232,16 +237,16 @@ func dataSourceCloudProjectDatabaseRead(ctx context.Context, d *schema.ResourceD nodesEndpoint := fmt.Sprintf("%s/node", serviceEndpoint) nodeList := &[]string{} if err := config.OVHClient.GetWithContext(ctx, nodesEndpoint, nodeList); err != nil { - return diag.Errorf("unable to get database %s nodes: %v", res.Id, err) + return diag.Errorf("unable to get database %s nodes: %v", res.ID, err) } if len(*nodeList) == 0 { - return diag.Errorf("no node found for database %s", res.Id) + return diag.Errorf("no node found for database %s", res.ID) } nodeEndpoint := fmt.Sprintf("%s/%s", nodesEndpoint, url.PathEscape((*nodeList)[0])) node := &CloudProjectDatabaseNodes{} if err := config.OVHClient.GetWithContext(ctx, nodeEndpoint, node); err != nil { - return diag.Errorf("unable to get database %s node %s: %v", res.Id, (*nodeList)[0], err) + return diag.Errorf("unable to get database %s node %s: %v", res.ID, (*nodeList)[0], err) } res.Region = node.Region @@ -250,12 +255,12 @@ func dataSourceCloudProjectDatabaseRead(ctx context.Context, d *schema.ResourceD advancedConfigEndpoint := fmt.Sprintf("%s/advancedConfiguration", serviceEndpoint) advancedConfigMap := &map[string]string{} if err := config.OVHClient.GetWithContext(ctx, advancedConfigEndpoint, advancedConfigMap); err != nil { - return diag.Errorf("unable to get database %s advanced configuration: %v", res.Id, err) + return diag.Errorf("unable to get database %s advanced configuration: %v", res.ID, err) } res.AdvancedConfiguration = *advancedConfigMap } - for k, v := range res.ToMap() { + for k, v := range res.toMap() { if k != "id" { d.Set(k, v) } else { diff --git a/ovh/resource_cloud_project_database.go b/ovh/resource_cloud_project_database.go index 4a5c7a84c..906981e27 100644 --- a/ovh/resource_cloud_project_database.go +++ b/ovh/resource_cloud_project_database.go @@ -85,6 +85,14 @@ func resourceCloudProjectDatabase() *schema.Resource { return d.Get("engine").(string) != "kafka" || new == old }, }, + "kafka_schema_registry": { + Type: schema.TypeBool, + Description: "Defines whether the schema registry is enabled on a Kafka cluster", + Optional: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return d.Get("engine").(string) != "kafka" || new == old + }, + }, "nodes": { Type: schema.TypeList, Description: "List of nodes composing the service", @@ -245,15 +253,15 @@ func resourceCloudProjectDatabase() *schema.Resource { } func resourceCloudProjectDatabaseImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - givenId := d.Id() + givenID := d.Id() n := 3 - splitId := strings.SplitN(givenId, "/", n) - if len(splitId) != n { + splitID := strings.SplitN(givenID, "/", n) + if len(splitID) != n { return nil, fmt.Errorf("import Id is not service_name/engine/databaseId formatted") } - serviceName := splitId[0] - engine := splitId[1] - id := splitId[2] + serviceName := splitID[0] + engine := splitID[1] + id := splitID[2] d.SetId(id) d.Set("engine", engine) d.Set("service_name", serviceName) @@ -272,7 +280,7 @@ func resourceCloudProjectDatabaseCreate(ctx context.Context, d *schema.ResourceD url.PathEscape(serviceName), url.PathEscape(engine), ) - params, err := (&CloudProjectDatabaseCreateOpts{}).FromResource(d) + params, err := (&CloudProjectDatabaseCreateOpts{}).fromResource(d) if err != nil { return diag.Errorf("service creation failed : %q", err) } @@ -284,17 +292,18 @@ func resourceCloudProjectDatabaseCreate(ctx context.Context, d *schema.ResourceD return diag.Errorf("calling Post %s with params %+v:\n\t %q", endpoint, params, err) } - log.Printf("[DEBUG] Waiting for database %s to be READY", res.Id) - err = waitForCloudProjectDatabaseReady(ctx, config.OVHClient, serviceName, engine, res.Id, d.Timeout(schema.TimeoutCreate)) + log.Printf("[DEBUG] Waiting for database %s to be READY", res.ID) + err = waitForCloudProjectDatabaseReady(ctx, config.OVHClient, serviceName, engine, res.ID, d.Timeout(schema.TimeoutCreate)) if err != nil { - return diag.Errorf("timeout while waiting database %s to be READY: %s", res.Id, err.Error()) + return diag.Errorf("timeout while waiting database %s to be READY: %s", res.ID, err.Error()) } - log.Printf("[DEBUG] database %s is READY", res.Id) + log.Printf("[DEBUG] database %s is READY", res.ID) - d.SetId(res.Id) + d.SetId(res.ID) if (engine != "mongodb" && len(d.Get("advanced_configuration").(map[string]interface{})) > 0) || (engine == "kafka" && d.Get("kafka_rest_api").(bool)) || + (engine == "kafka" && d.Get("kafka_schema_registry").(bool)) || (engine == "opensearch" && d.Get("opensearch_acls_enabled").(bool)) { return resourceCloudProjectDatabaseUpdate(ctx, d, meta) } @@ -322,16 +331,16 @@ func resourceCloudProjectDatabaseRead(ctx context.Context, d *schema.ResourceDat nodesEndpoint := fmt.Sprintf("%s/node", serviceEndpoint) nodeList := &[]string{} if err := config.OVHClient.GetWithContext(ctx, nodesEndpoint, nodeList); err != nil { - return diag.Errorf("unable to get database %s nodes: %v", res.Id, err) + return diag.Errorf("unable to get database %s nodes: %v", res.ID, err) } if len(*nodeList) == 0 { - return diag.Errorf("no node found for database %s", res.Id) + return diag.Errorf("no node found for database %s", res.ID) } nodeEndpoint := fmt.Sprintf("%s/%s", nodesEndpoint, url.PathEscape((*nodeList)[0])) node := &CloudProjectDatabaseNodes{} if err := config.OVHClient.GetWithContext(ctx, nodeEndpoint, node); err != nil { - return diag.Errorf("unable to get database %s node %s: %v", res.Id, (*nodeList)[0], err) + return diag.Errorf("unable to get database %s node %s: %v", res.ID, (*nodeList)[0], err) } res.Region = node.Region @@ -340,12 +349,12 @@ func resourceCloudProjectDatabaseRead(ctx context.Context, d *schema.ResourceDat advancedConfigEndpoint := fmt.Sprintf("%s/advancedConfiguration", serviceEndpoint) advancedConfigMap := &map[string]string{} if err := config.OVHClient.GetWithContext(ctx, advancedConfigEndpoint, advancedConfigMap); err != nil { - return diag.Errorf("unable to get database %s advanced configuration: %v", res.Id, err) + return diag.Errorf("unable to get database %s advanced configuration: %v", res.ID, err) } res.AdvancedConfiguration = *advancedConfigMap } - for k, v := range res.ToMap() { + for k, v := range res.toMap() { if k != "id" { d.Set(k, v) } else { @@ -366,7 +375,7 @@ func resourceCloudProjectDatabaseUpdate(ctx context.Context, d *schema.ResourceD url.PathEscape(engine), url.PathEscape(d.Id()), ) - params, err := (&CloudProjectDatabaseUpdateOpts{}).FromResource(d) + params, err := (&CloudProjectDatabaseUpdateOpts{}).fromResource(d) if err != nil { return diag.Errorf("service update failed : %q", err) } diff --git a/ovh/types_cloud_project_database.go b/ovh/types_cloud_project_database.go index 55f03d445..7182eee48 100644 --- a/ovh/types_cloud_project_database.go +++ b/ovh/types_cloud_project_database.go @@ -67,69 +67,67 @@ type CloudProjectDatabaseResponse struct { Description string `json:"description"` Endpoints []CloudProjectDatabaseEndpoint `json:"endpoints"` Flavor string `json:"flavor"` - Id string `json:"id"` + ID string `json:"id"` IPRestrictions []CloudProjectDatabaseIPRestrictionResponse `json:"ipRestrictions"` MaintenanceTime string `json:"maintenanceTime"` - NetworkId string `json:"networkId"` + NetworkID string `json:"networkId"` NetworkType string `json:"networkType"` Plan string `json:"plan"` NodeNumber int `json:"nodeNumber"` Region string `json:"region"` - RestApi bool `json:"restApi"` + RestAPI bool `json:"restApi"` + SchemaRegistry bool `json:"schemaRegistry"` Status string `json:"status"` - SubnetId string `json:"subnetId"` + SubnetID string `json:"subnetId"` Version string `json:"version"` Disk CloudProjectDatabaseDisk `json:"disk"` AdvancedConfiguration map[string]string `json:"advancedConfiguration"` } -func (s *CloudProjectDatabaseResponse) String() string { - return fmt.Sprintf("%s(%s): %s", s.Description, s.Id, s.Status) -} - -func (v CloudProjectDatabaseResponse) ToMap() map[string]interface{} { +func (r CloudProjectDatabaseResponse) toMap() map[string]interface{} { obj := make(map[string]interface{}) - obj["backup_regions"] = v.Backups.Regions - obj["backup_time"] = v.Backups.Time - obj["created_at"] = v.CreatedAt - obj["description"] = v.Description - obj["id"] = v.Id + obj["backup_regions"] = r.Backups.Regions + obj["backup_time"] = r.Backups.Time + obj["created_at"] = r.CreatedAt + obj["description"] = r.Description + obj["id"] = r.ID var ipRests []map[string]interface{} - for _, ir := range v.IPRestrictions { + for _, ir := range r.IPRestrictions { ipRests = append(ipRests, ir.toMap()) } obj["ip_restrictions"] = ipRests var endpoints []map[string]interface{} - for _, e := range v.Endpoints { + for _, e := range r.Endpoints { endpoints = append(endpoints, e.ToMap()) } obj["endpoints"] = endpoints - obj["flavor"] = v.Flavor - obj["kafka_rest_api"] = v.RestApi - obj["maintenance_time"] = v.MaintenanceTime - obj["network_type"] = v.NetworkType + obj["flavor"] = r.Flavor + obj["kafka_rest_api"] = r.RestAPI + obj["kafka_schema_registry"] = r.SchemaRegistry + obj["maintenance_time"] = r.MaintenanceTime + obj["network_type"] = r.NetworkType var nodes []map[string]interface{} - for i := 0; i < v.NodeNumber; i++ { + for i := 0; i < r.NodeNumber; i++ { node := CloudProjectDatabaseNodes{ - Region: v.Region, - NetworkId: v.NetworkId, - SubnetId: v.SubnetId, + Region: r.Region, + NetworkId: r.NetworkID, + SubnetId: r.SubnetID, } nodes = append(nodes, node.ToMap()) } obj["nodes"] = nodes - obj["opensearch_acls_enabled"] = v.AclsEnabled - obj["plan"] = v.Plan - obj["status"] = v.Status - obj["version"] = v.Version - obj["disk_size"] = v.Disk.Size - obj["disk_type"] = v.Disk.Type - obj["advanced_configuration"] = v.AdvancedConfiguration + obj["opensearch_acls_enabled"] = r.AclsEnabled + obj["plan"] = r.Plan + obj["status"] = r.Status + obj["version"] = r.Version + obj["disk_size"] = r.Disk.Size + obj["disk_type"] = r.Disk.Type + obj["advanced_configuration"] = r.AdvancedConfiguration return obj } @@ -185,7 +183,7 @@ func (v CloudProjectDatabaseNodes) ToMap() map[string]interface{} { } type CloudProjectDatabaseCreateOpts struct { - Backups CloudProjectDatabaseBackups `json:"backups,omitempty"` + Backups *CloudProjectDatabaseBackups `json:"backups,omitempty"` Description string `json:"description,omitempty"` Disk CloudProjectDatabaseDisk `json:"disk,omitempty"` IPRestrictions []CloudProjectDatabaseIPRestriction `json:"ipRestrictions,omitempty"` @@ -212,7 +210,7 @@ type CloudProjectDatabaseNodesPattern struct { Region string `json:"region"` } -func (opts *CloudProjectDatabaseCreateOpts) FromResource(d *schema.ResourceData) (*CloudProjectDatabaseCreateOpts, error) { +func (opts *CloudProjectDatabaseCreateOpts) fromResource(d *schema.ResourceData) (*CloudProjectDatabaseCreateOpts, error) { opts.Description = d.Get("description").(string) opts.Plan = d.Get("plan").(string) @@ -251,33 +249,39 @@ func (opts *CloudProjectDatabaseCreateOpts) FromResource(d *schema.ResourceData) if err != nil { return nil, err } + time := d.Get("backup_time").(string) - opts.Backups = CloudProjectDatabaseBackups{ - Regions: regions, - Time: d.Get("backup_time").(string), + if len(regions) != 0 || time != "" { + opts.Backups = &CloudProjectDatabaseBackups{ + Regions: regions, + Time: time, + } } + return opts, nil } type CloudProjectDatabaseUpdateOpts struct { AclsEnabled bool `json:"aclsEnabled,omitempty"` - Backups CloudProjectDatabaseBackups `json:"backups,omitempty"` + Backups *CloudProjectDatabaseBackups `json:"backups,omitempty"` Description string `json:"description,omitempty"` Disk CloudProjectDatabaseDisk `json:"disk,omitempty"` Flavor string `json:"flavor,omitempty"` IPRestrictions []CloudProjectDatabaseIPRestriction `json:"ipRestrictions,omitempty"` Plan string `json:"plan,omitempty"` - RestApi bool `json:"restApi,omitempty"` + RestAPI bool `json:"restApi,omitempty"` + SchemaRegistry bool `json:"schemaRegistry,omitempty"` Version string `json:"version,omitempty"` } -func (opts *CloudProjectDatabaseUpdateOpts) FromResource(d *schema.ResourceData) (*CloudProjectDatabaseUpdateOpts, error) { +func (opts *CloudProjectDatabaseUpdateOpts) fromResource(d *schema.ResourceData) (*CloudProjectDatabaseUpdateOpts, error) { engine := d.Get("engine").(string) if engine == "opensearch" { opts.AclsEnabled = d.Get("opensearch_acls_enabled").(bool) } if engine == "kafka" { - opts.RestApi = d.Get("kafka_rest_api").(bool) + opts.RestAPI = d.Get("kafka_rest_api").(bool) + opts.SchemaRegistry = d.Get("kafka_schema_registry").(bool) } opts.Description = d.Get("description").(string) @@ -301,10 +305,13 @@ func (opts *CloudProjectDatabaseUpdateOpts) FromResource(d *schema.ResourceData) if err != nil { return nil, err } + time := d.Get("backup_time").(string) - opts.Backups = CloudProjectDatabaseBackups{ - Regions: regions, - Time: d.Get("backup_time").(string), + if engine != "kafka" && (len(regions) != 0 || time != "") { + opts.Backups = &CloudProjectDatabaseBackups{ + Regions: regions, + Time: time, + } } return opts, nil diff --git a/website/docs/d/cloud_project_database.html.markdown b/website/docs/d/cloud_project_database.html.markdown index 92284d365..5f791e618 100644 --- a/website/docs/d/cloud_project_database.html.markdown +++ b/website/docs/d/cloud_project_database.html.markdown @@ -61,6 +61,7 @@ The following attributes are exported: * `ip` - Authorized IP * `status` - Current status of the IP restriction. * `kafka_rest_api` - Defines whether the REST API is enabled on a kafka cluster. +* `kafka_schema_registry` - Defines whether the schema registry is enabled on a Kafka cluster * `maintenance_time` - Time on which maintenances can start every day. * `network_type` - Type of network of the cluster. * `nodes` - List of nodes object. diff --git a/website/docs/r/cloud_project_database.html.markdown b/website/docs/r/cloud_project_database.html.markdown index f546dd6eb..542b07733 100644 --- a/website/docs/r/cloud_project_database.html.markdown +++ b/website/docs/r/cloud_project_database.html.markdown @@ -45,6 +45,7 @@ resource "ovh_cloud_project_database" "kafkadb" { version = "3.4" plan = "business" kafka_rest_api = true + kafka_schema_registry = true nodes { region = "DE" } @@ -209,45 +210,33 @@ The following arguments are supported: * `service_name` - (Required, Forces new resource) The id of the public cloud project. If omitted, the `OVH_CLOUD_PROJECT_SERVICE` environment variable is used. - * `description` - (Optional) Small description of the database service. - * `engine` - (Required, Forces new resource) The database engine you want to deploy. To get a full list of available engine visit. [public documentation](https://docs.ovh.com/gb/en/publiccloud/databases). - * `flavor` - (Required) A valid OVHcloud public cloud database flavor name in which the nodes will be started. Ex: "db1-7". Changing this value upgrade the nodes with the new flavor. You can find the list of flavor names: https://www.ovhcloud.com/fr/public-cloud/prices/ - * `ip_restrictions` - (Optional) IP Blocks authorized to access to the cluster. * `description` - (Optional) Description of the IP restriction * `ip` - (Optional) Authorized IP - * `kafka_rest_api` - (Optional) Defines whether the REST API is enabled on a kafka cluster - +* `kafka_schema_registry` - (Optional) Defines whether the schema registry is enabled on a Kafka cluster * `nodes` - (Required, Minimum Items: 1) List of nodes object. Multi region cluster are not yet available, all node should be identical. * `network_id` - (Optional, Forces new resource) Private network id in which the node should be deployed. It's the regional openstackId of the private network * `region` - (Required, Forces new resource) Public cloud region in which the node should be deployed. Ex: "GRA'. * `subnet_id` - (Optional, Forces new resource) Private subnet ID in which the node is. - * `opensearch_acls_enabled` - (Optional) Defines whether the ACLs are enabled on an OpenSearch cluster - * `disk_size` - (Optional) The disk size (in GB) of the database service. - * `advanced_configuration` - (Optional) Advanced configuration key / value. - * `plan` - (Required) Plan of the cluster. * MongoDB: Enum: "discovery", "production", "advanced". * Mysql, PosgreSQL, Cassandra, M3DB, : Enum: "essential", "business", "enterprise". * M3 Aggregator: "business", "enterprise". * Redis: "essential", "business" - * `version` - (Required) The version of the engine in which the service should be deployed - * `backup_regions` - List of region where backups are pushed. Not more than 1 regions for MongoDB. Not more than 2 regions for the other engines with one being the same as the nodes[].region field - * `backup_time` - Time on which backups start every day. ## Attributes Reference @@ -276,6 +265,7 @@ The following attributes are exported: * `ip` - See Argument Reference above. * `status` - Current status of the IP restriction. * `kafka_rest_api` - See Argument Reference above. +* `kafka_schema_registry` - See Argument Reference above. * `maintenance_time` - Time on which maintenances can start every day. * `network_type` - Type of network of the cluster. * `nodes` - See Argument Reference above.