Skip to content

Commit

Permalink
Merge branch 'master' into feat/add-psc-support
Browse files Browse the repository at this point in the history
  • Loading branch information
g-awmalik authored Sep 3, 2023
2 parents 4e5776b + 0e0c196 commit 98b869b
Show file tree
Hide file tree
Showing 7 changed files with 64 additions and 12 deletions.
4 changes: 4 additions & 0 deletions modules/backup/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ fetch workflows.googleapis.com/Workflow
| backup\_retention\_time | The number of days backups should be kept | `number` | `30` | no |
| backup\_schedule | The cron schedule to execute the internal backup | `string` | `"45 2 * * *"` | no |
| compress\_export | Whether or not to compress the export when storing in the bucket; Only valid for MySQL and PostgreSQL | `bool` | `true` | no |
| connector\_params\_timeout | The end-to-end duration the connector call is allowed to run for before throwing a timeout exception. The default value is 1800 and this should be the maximum for connector methods that are not long-running operations. Otherwise, for long-running operations, the maximum timeout for a connector call is 31536000 seconds (one year). | `number` | `1800` | no |
| enable\_connector\_params | Whether to enable connector-specific parameters for Google Workflow SQL Export. | `bool` | `false` | no |
| enable\_export\_backup | Weather to create exports to GCS Buckets with this module | `bool` | `true` | no |
| enable\_internal\_backup | Wether to create internal backups with this module | `bool` | `true` | no |
| export\_databases | The list of databases that should be exported - if is an empty set all databases will be exported | `set(string)` | `[]` | no |
Expand All @@ -67,7 +69,9 @@ fetch workflows.googleapis.com/Workflow
| scheduler\_timezone | The Timezone in which the Scheduler Jobs are triggered | `string` | `"Etc/GMT"` | no |
| service\_account | The service account to use for running the workflow and triggering the workflow by Cloud Scheduler - If empty or null a service account will be created. If you have provided a service account you need to grant the Cloud SQL Admin and the Workflows Invoker role to that | `string` | `null` | no |
| sql\_instance | The name of the SQL instance to backup | `string` | n/a | yes |
| sql\_instance\_replica | The name of the SQL instance replica to export | `string` | `null` | no |
| unique\_suffix | Unique suffix to add to scheduler jobs and workflows names. | `string` | `""` | no |
| use\_sql\_instance\_replica\_in\_exporter | Whether or not to use replica instance on exporter workflow. | `bool` | `false` | no |

## Outputs

Expand Down
22 changes: 12 additions & 10 deletions modules/backup/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -97,26 +97,28 @@ resource "google_cloud_scheduler_job" "sql_backup" {
################################
resource "google_workflows_workflow" "sql_export" {
count = var.enable_export_backup ? 1 : 0
name = "sql-export-${var.sql_instance}${var.unique_suffix}"
name = var.use_sql_instance_replica_in_exporter ? "sql-export-${var.sql_instance_replica}${var.unique_suffix}" : "sql-export-${var.sql_instance}${var.unique_suffix}"
region = var.region
description = "Workflow for backing up the CloudSQL Instance"
project = var.project_id
service_account = local.service_account
source_contents = templatefile("${path.module}/templates/export.yaml.tftpl", {
project = var.project_id
instanceName = var.sql_instance
backupRetentionTime = var.backup_retention_time
databases = jsonencode(var.export_databases)
gcsBucket = var.export_uri
dbType = split("_", data.google_sql_database_instance.backup_instance.database_version)[0]
compressExport = var.compress_export
logDbName = var.log_db_name_to_export
project = var.project_id
instanceName = var.use_sql_instance_replica_in_exporter ? var.sql_instance_replica : var.sql_instance
backupRetentionTime = var.backup_retention_time
databases = jsonencode(var.export_databases)
gcsBucket = var.export_uri
dbType = split("_", data.google_sql_database_instance.backup_instance.database_version)[0]
compressExport = var.compress_export
enableConnectorParams = var.enable_connector_params
connectorParamsTimeout = var.connector_params_timeout
logDbName = var.log_db_name_to_export
})
}

resource "google_cloud_scheduler_job" "sql_export" {
count = var.enable_export_backup ? 1 : 0
name = "sql-export-${var.sql_instance}${var.unique_suffix}"
name = var.use_sql_instance_replica_in_exporter ? "sql-export-${var.sql_instance_replica}${var.unique_suffix}" : "sql-export-${var.sql_instance}${var.unique_suffix}"
project = var.project_id
region = var.region
description = "Managed by Terraform - Triggers a SQL Export via Workflows"
Expand Down
13 changes: 12 additions & 1 deletion modules/backup/templates/export.yaml.tftpl
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ main:
args:
project: ${project}
instance: ${instanceName}
%{ if enableConnectorParams }
connector_params:
timeout: ${connectorParamsTimeout}
%{ endif }
body:
exportContext:
databases: [$${database}]
Expand Down Expand Up @@ -81,6 +85,10 @@ main:
args:
project: ${project}
instance: ${instanceName}
%{ if enableConnectorParams }
connector_params:
timeout: ${connectorParamsTimeout}
%{ endif }
body:
exportContext:
databases: [$${database}]
Expand All @@ -94,9 +102,12 @@ main:
args:
project: ${project}
instance: ${instanceName}
%{ if enableConnectorParams }
connector_params:
timeout: ${connectorParamsTimeout}
%{ endif }
body:
exportContext:
databases: $${databases}
uri: $${"${gcsBucket}/${instanceName}-" + backupTime + %{ if compressExport == true }".sql.gz"%{ else }".sql"%{ endif }}
%{ endif }

24 changes: 24 additions & 0 deletions modules/backup/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,18 @@ variable "compress_export" {
default = true
}

variable "enable_connector_params" {
description = "Whether to enable connector-specific parameters for Google Workflow SQL Export."
type = bool
default = false
}

variable "connector_params_timeout" {
description = "The end-to-end duration the connector call is allowed to run for before throwing a timeout exception. The default value is 1800 and this should be the maximum for connector methods that are not long-running operations. Otherwise, for long-running operations, the maximum timeout for a connector call is 31536000 seconds (one year)."
type = number
default = 1800
}

variable "unique_suffix" {
description = "Unique suffix to add to scheduler jobs and workflows names."
type = string
Expand All @@ -108,3 +120,15 @@ variable "log_db_name_to_export" {
type = bool
default = false
}

variable "use_sql_instance_replica_in_exporter" {
description = "Whether or not to use replica instance on exporter workflow."
type = bool
default = false
}

variable "sql_instance_replica" {
description = "The name of the SQL instance replica to export"
type = string
default = null
}
2 changes: 1 addition & 1 deletion modules/mysql/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ Note: CloudSQL provides [disk autoresize](https://cloud.google.com/sql/docs/mysq
| read\_replica\_deletion\_protection | Used to block Terraform from deleting replica SQL Instances. | `bool` | `false` | no |
| read\_replica\_deletion\_protection\_enabled | Enables protection of a read replica from accidental deletion across all surfaces (API, gcloud, Cloud Console and Terraform). | `bool` | `false` | no |
| read\_replica\_name\_suffix | The optional suffix to add to the read instance name | `string` | `""` | no |
| read\_replicas | List of read replicas to create. Encryption key is required for replica in different region. For replica in same region as master set encryption\_key\_name = null | <pre>list(object({<br> name = string<br> name_override = optional(string)<br> tier = optional(string)<br> edition = optional(string)<br> availability_type = optional(string)<br> zone = optional(string)<br> disk_type = optional(string)<br> disk_autoresize = optional(bool)<br> disk_autoresize_limit = optional(number)<br> disk_size = optional(string)<br> user_labels = map(string)<br> database_flags = list(object({<br> name = string<br> value = string<br> }))<br> insights_config = optional(object({<br> query_plans_per_minute = number<br> query_string_length = number<br> record_application_tags = bool<br> record_client_address = bool<br> }))<br> ip_configuration = object({<br> authorized_networks = optional(list(map(string)), [])<br> ipv4_enabled = optional(bool)<br> private_network = optional(string, )<br> require_ssl = optional(bool)<br> allocated_ip_range = optional(string)<br> enable_private_path_for_google_cloud_services = optional(bool, false)<br> psc_enabled = optional(bool, false)<br> psc_allowed_consumer_projects = optional(list(string), [])<br> })<br> encryption_key_name = optional(string)<br> }))</pre> | `[]` | no |
| read\_replicas | List of read replicas to create. Encryption key is required for replica in different region. For replica in same region as master set encryption\_key\_name = null | <pre>list(object({<br> name = string<br> name_override = optional(string)<br> tier = string<br> edition = optional(string)<br> zone = string<br> availability_type = string<br> disk_type = string<br> disk_autoresize = bool<br> disk_autoresize_limit = number<br> disk_size = string<br> user_labels = map(string)<br> database_flags = list(object({<br> name = string<br> value = string<br> }))<br> backup_configuration = optional(object({<br> binary_log_enabled = bool<br> transaction_log_retention_days = string<br> }))<br> insights_config = optional(object({<br> query_plans_per_minute = number<br> query_string_length = number<br> record_application_tags = bool<br> record_client_address = bool<br> }))<br> ip_configuration = object({<br> authorized_networks = list(map(string))<br> ipv4_enabled = bool<br> private_network = string<br> require_ssl = bool<br> allocated_ip_range = string<br> enable_private_path_for_google_cloud_services = optional(bool)<br> })<br> encryption_key_name = string<br> }))</pre> | `[]` | no |
| region | The region of the Cloud SQL resources | `string` | `"us-central1"` | no |
| replica\_database\_version | The read replica database version to use. This var should only be used during a database update. The update sequence 1. read-replica 2. master, setting this to an updated version will cause the replica to update, then you may update the master with the var database\_version and remove this field after update is complete | `string` | `""` | no |
| root\_password | Mysql password for the root user. If not set, a random one will be generated and available in the root\_password output variable. | `string` | `""` | no |
Expand Down
7 changes: 7 additions & 0 deletions modules/mysql/read_replica.tf
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,13 @@ resource "google_sql_database_instance" "replicas" {
availability_type = lookup(each.value, "availability_type", var.availability_type)
deletion_protection_enabled = var.read_replica_deletion_protection_enabled

dynamic "backup_configuration" {
for_each = each.value["backup_configuration"] != null ? [each.value["backup_configuration"]] : []
content {
binary_log_enabled = lookup(backup_configuration.value, "binary_log_enabled", null)
transaction_log_retention_days = lookup(backup_configuration.value, "transaction_log_retention_days", null)
}
}

dynamic "insights_config" {
for_each = lookup(each.value, "insights_config") != null ? [lookup(each.value, "insights_config")] : []
Expand Down
4 changes: 4 additions & 0 deletions modules/mysql/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,10 @@ variable "read_replicas" {
name = string
value = string
}))
backup_configuration = optional(object({
binary_log_enabled = bool
transaction_log_retention_days = string
}))
insights_config = optional(object({
query_plans_per_minute = number
query_string_length = number
Expand Down

0 comments on commit 98b869b

Please sign in to comment.