forked from hashicorp/terraform-aws-vault
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.tf
226 lines (176 loc) · 10.6 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY A VAULT SERVER CLUSTER, AN ELB, AND A CONSUL SERVER CLUSTER IN AWS
# This is an example of how to use the vault-cluster and vault-elb modules to deploy a Vault cluster in AWS with an
# Elastic Load Balancer (ELB) in front of it. This cluster uses Consul, running in a separate cluster, as its storage
# backend.
# ---------------------------------------------------------------------------------------------------------------------
# Terraform 0.9.5 suffered from https://github.com/hashicorp/terraform/issues/14399, which causes this template the
# conditionals in this template to fail.
terraform {
required_version = ">= 0.9.3, != 0.9.5"
}
# ---------------------------------------------------------------------------------------------------------------------
# AUTOMATICALLY LOOK UP THE LATEST PRE-BUILT AMI
# This repo contains a CircleCI job that automatically builds and publishes the latest AMI by building the Packer
# template at /examples/vault-consul-ami upon every new release. The Terraform data source below automatically looks up
# the latest AMI so that a simple "terraform apply" will just work without the user needing to manually build an AMI and
# fill in the right value.
#
# !! WARNING !! These example AMIs are meant only convenience when initially testing this repo. Do NOT use these example
# AMIs in a production setting as those TLS certificate files are publicly available from the Module repo containing
# this code.
#
# NOTE: This Terraform data source must return at least one AMI result or the entire template will fail. See
# /_ci/publish-amis-in-new-account.md for more information.
# ---------------------------------------------------------------------------------------------------------------------
data "aws_ami" "vault_consul" {
most_recent = true
# If we change the AWS Account in which test are run, update this value.
owners = ["562637147889"]
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "is-public"
values = ["true"]
}
filter {
name = "name"
values = ["vault-consul-ubuntu-*"]
}
}
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY THE VAULT SERVER CLUSTER
# ---------------------------------------------------------------------------------------------------------------------
module "vault_cluster" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "github.com/hashicorp/terraform-aws-vault//modules/vault-cluster?ref=v0.0.1"
source = "modules/vault-cluster"
cluster_name = "${var.vault_cluster_name}"
cluster_size = "${var.vault_cluster_size}"
instance_type = "${var.vault_instance_type}"
ami_id = "${var.ami_id == "" ? data.aws_ami.vault_consul.image_id : var.ami_id}"
user_data = "${data.template_file.user_data_vault_cluster.rendered}"
vpc_id = "${data.aws_vpc.default.id}"
subnet_ids = "${data.aws_subnet_ids.default.ids}"
# Do NOT use the ELB for the ASG health check, or the ASG will assume all sealed instances are unhealthy and
# repeatedly try to redeploy them.
health_check_type = "EC2"
# To make testing easier, we allow requests from any IP address here but in a production deployment, we *strongly*
# recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.
allowed_ssh_cidr_blocks = ["0.0.0.0/0"]
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
allowed_inbound_security_group_ids = []
allowed_inbound_security_group_count = 0
ssh_key_name = "${var.ssh_key_name}"
}
# ---------------------------------------------------------------------------------------------------------------------
# ATTACH IAM POLICIES FOR CONSUL
# To allow our Vault servers to automatically discover the Consul servers, we need to give them the IAM permissions from
# the Consul AWS Module's consul-iam-policies module.
# ---------------------------------------------------------------------------------------------------------------------
module "consul_iam_policies_servers" {
source = "github.com/hashicorp/terraform-aws-consul//modules/consul-iam-policies?ref=v0.3.3"
iam_role_id = "${module.vault_cluster.iam_role_id}"
}
# ---------------------------------------------------------------------------------------------------------------------
# THE USER DATA SCRIPT THAT WILL RUN ON EACH VAULT SERVER WHEN IT'S BOOTING
# This script will configure and start Vault
# ---------------------------------------------------------------------------------------------------------------------
data "template_file" "user_data_vault_cluster" {
template = "${file("${path.module}/examples/root-example/user-data-vault.sh")}"
vars {
aws_region = "${data.aws_region.current.name}"
consul_cluster_tag_key = "${var.consul_cluster_tag_key}"
consul_cluster_tag_value = "${var.consul_cluster_name}"
}
}
# ---------------------------------------------------------------------------------------------------------------------
# PERMIT CONSUL SPECIFIC TRAFFIC IN VAULT CLUSTER
# To allow our Vault servers consul agents to communicate with other consul agents and participate in the LAN gossip,
# we open up the consul specific protocols and ports for consul traffic
# ---------------------------------------------------------------------------------------------------------------------
module "security_group_rules" {
source = "github.com/hashicorp/terraform-aws-consul.git//modules/consul-client-security-group-rules?ref=v0.3.3"
security_group_id = "${module.vault_cluster.security_group_id}"
# To make testing easier, we allow requests from any IP address here but in a production deployment, we *strongly*
# recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
}
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY THE ELB
# ---------------------------------------------------------------------------------------------------------------------
module "vault_elb" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "github.com/hashicorp/terraform-aws-vault//modules/vault-elb?ref=v0.0.1"
source = "modules/vault-elb"
name = "${var.vault_cluster_name}"
vpc_id = "${data.aws_vpc.default.id}"
subnet_ids = "${data.aws_subnet_ids.default.ids}"
# Associate the ELB with the instances created by the Vault Autoscaling group
vault_asg_name = "${module.vault_cluster.asg_name}"
# To make testing easier, we allow requests from any IP address here but in a production deployment, we *strongly*
# recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
# In order to access Vault over HTTPS, we need a domain name that matches the TLS cert
create_dns_entry = "${var.create_dns_entry}"
# Terraform conditionals are not short-circuiting, so we use join as a workaround to avoid errors when the
# aws_route53_zone data source isn't actually set: https://github.com/hashicorp/hil/issues/50
hosted_zone_id = "${var.create_dns_entry ? join("", data.aws_route53_zone.selected.*.zone_id) : ""}"
domain_name = "${var.vault_domain_name}"
}
# Look up the Route 53 Hosted Zone by domain name
data "aws_route53_zone" "selected" {
count = "${var.create_dns_entry}"
name = "${var.hosted_zone_domain_name}."
}
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY THE CONSUL SERVER CLUSTER
# ---------------------------------------------------------------------------------------------------------------------
module "consul_cluster" {
source = "github.com/hashicorp/terraform-aws-consul//modules/consul-cluster?ref=v0.3.3"
cluster_name = "${var.consul_cluster_name}"
cluster_size = "${var.consul_cluster_size}"
instance_type = "${var.consul_instance_type}"
# The EC2 Instances will use these tags to automatically discover each other and form a cluster
cluster_tag_key = "${var.consul_cluster_tag_key}"
cluster_tag_value = "${var.consul_cluster_name}"
ami_id = "${var.ami_id == "" ? data.aws_ami.vault_consul.image_id : var.ami_id}"
user_data = "${data.template_file.user_data_consul.rendered}"
vpc_id = "${data.aws_vpc.default.id}"
subnet_ids = "${data.aws_subnet_ids.default.ids}"
# To make testing easier, we allow Consul and SSH requests from any IP address here but in a production
# deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.
allowed_ssh_cidr_blocks = ["0.0.0.0/0"]
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
ssh_key_name = "${var.ssh_key_name}"
}
# ---------------------------------------------------------------------------------------------------------------------
# THE USER DATA SCRIPT THAT WILL RUN ON EACH CONSUL SERVER WHEN IT'S BOOTING
# This script will configure and start Consul
# ---------------------------------------------------------------------------------------------------------------------
data "template_file" "user_data_consul" {
template = "${file("${path.module}/examples/root-example/user-data-consul.sh")}"
vars {
consul_cluster_tag_key = "${var.consul_cluster_tag_key}"
consul_cluster_tag_value = "${var.consul_cluster_name}"
}
}
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY THE CLUSTERS IN THE DEFAULT VPC AND AVAILABILITY ZONES
# Using the default VPC and subnets makes this example easy to run and test, but it means Consul and Vault are
# accessible from the public Internet. In a production deployment, we strongly recommend deploying into a custom VPC
# and private subnets. Only the ELB should run in the public subnets.
# ---------------------------------------------------------------------------------------------------------------------
data "aws_vpc" "default" {
default = "${var.use_default_vpc}"
tags = "${var.vpc_tags}"
}
data "aws_subnet_ids" "default" {
vpc_id = "${data.aws_vpc.default.id}"
tags = "${var.subnet_tags}"
}
data "aws_region" "current" {}