diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml new file mode 100644 index 0000000..8365794 --- /dev/null +++ b/.github/workflows/integration.yml @@ -0,0 +1,113 @@ +name: 'integration' +# This workflow intends to verify that the module provisions +# successfully for all software and infrastructure defined. +# https://learn.hashicorp.com/tutorials/terraform/automate-terraform + +on: + workflow_dispatch: + push: + paths-ignore: + - 'LICENSE' + - '**.md' + +jobs: + integrate: + name: Integration Tests + runs-on: ${{ matrix.os }} + timeout-minutes: 120 + strategy: + matrix: + os: [ubuntu-latest] + tf: [0.14.8] + vsphere: + #- vmware_os: vmware_esxi_6_7 + # vcenter_iso: VMware-VCSA-all-6.7.0-14367737.iso + - vmware_os: vmware_esxi_7_0 + vcenter_iso: VMware-VCSA-all-7.0.0-16189094.iso + env: + SSH_AUTH_SOCK: /tmp/ssh_agent.sock + TF_IN_AUTOMATION: 1 + TF_VERSION: ${{ matrix.tf }} + TF_VAR_control_plane_node_count: 0 + TF_VAR_vcenter_iso_name: ${{ matrix.vsphere.vcenter_iso }} + TF_VAR_vmware_os: ${{ matrix.vsphere.vmware_os }} + TF_VAR_esxi_host_count: 2 + TF_VAR_esxi_size: "c3.medium.x86" + TF_VAR_router_size: "c2.medium.x86" + TF_VAR_facility: "sjc1" + TF_VAR_create_project : false + # TODO only provide this to terraform steps that need it + TF_VAR_auth_token: ${{ secrets.METAL_AUTH_TOKEN }} + TF_VAR_organization_id: ${{ secrets.METAL_ORGANIZATION_ID }} + TF_VAR_s3_url: ${{ secrets.S3_URL }} + TF_VAR_object_store_bucket_name: ${{ secrets.S3_BUCKET_NAME }} + TF_VAR_s3_access_key: ${{ secrets.S3_ACCESS_KEY }} + TF_VAR_s3_secret_key: ${{ secrets.S3_SECRET_KEY }} + TF_VAR_s3_boolean: true + TF_VAR_anthos_version: "1.4.0-gke.13" + TF_VAR_anthos_gcp_project_id: ${{ secrets.ANTHOS_GCP_PROJECT_ID }} + # TODO remove this secret and variable if the "Cluster Name" setting works below + TF_VAR_anthos_user_cluster_name: ${{ secrets.ANTHOS_USER_CLUSTER_NAME }} + steps: + - name: Checkout from Github + uses: actions/checkout@v2 + - name: Add SHORT_SHA env property with commit short sha + run: echo "SHORT_SHA=`echo ${GITHUB_SHA} | cut -c1-8`" >> $GITHUB_ENV + + - name: Install Terraform + uses: hashicorp/setup-terraform@v1 + with: + terraform_version: ${{ env.TF_VERSION }} + - name: GCP Keys + env: + GCP_CONNECT_KEY: ${{ secrets.GCP_CONNECT_KEY }} + GCP_REGISTER_KEY: ${{ secrets.GCP_REGISTER_KEY }} + GCP_STACKDRIVER_KEY: ${{ secrets.GCP_STACKDRIVER_KEY }} + GCP_STORAGE_READER_KEY: ${{ secrets.GCP_STORAGE_READER_KEY }} + GCP_WHITELISTED_KEY: ${{ secrets.GCP_WHITELISTED_KEY }} + run: | + mkdir -p anthos/gcp_keys + echo "${GCP_CONNECT_KEY}" > anthos/gcp_keys/connect-key.json + echo "${GCP_REGISTER_KEY}" > anthos/gcp_keys/register-key.json + echo "${GCP_STACKDRIVER_KEY}" > anthos/gcp_keys/stackdriver-key.json + echo "${GCP_STORAGE_READER_KEY}" > anthos/gcp_keys/storage-reader-key.json + echo "${GCP_WHITELISTED_KEY}" > anthos/gcp_keys/whitelisted-key.json + - name: Initialize Terraform, Modules, and Plugins + id: init + run: terraform init -input=false + - id: project + uses: displague/metal-project-action@v0.10.0 + env: + METAL_AUTH_TOKEN: ${{ secrets.METAL_AUTH_TOKEN }} + # Configure an SSH Agent with a key that can access the project + - name: SSH Agent + run: | + ssh-agent -a $SSH_AUTH_SOCK > /dev/null + ssh-add <(echo $METAL_SSH_PRIVATE_KEY_BASE64 | base64 -d) + - name: Terraform Vars - Cluster Name + # TODO can we use this as a the anthos user cluster name? + run: | + echo "TF_VAR_vcenter_cluster_name=tfacc-${SHORT_SHA}" >> $GITHUB_ENV + echo "TF_VAR_anthos_user_cluster_name=tfacc-${SHORT_SHA}" >> $GITHUB_ENV + - name: Terraform Vars - Project ID + run: echo "TF_VAR_project_id=${{ steps.project.outputs.projectID }}" >> $GITHUB_ENV + - name: Terraform Plan + id: plan + timeout-minutes: 120 + run: terraform plan -out=tfplan -input=false + - name: Terraform Apply + id: apply + timeout-minutes: 120 + run: terraform apply -input=false tfplan + - name: Terraform Destroy + id: destroy + if: ${{ always() }} + run: terraform destroy -input=false -auto-approve + - name: Project Delete + if: ${{ always() }} + uses: displague/metal-sweeper-action@v0.3.0 + with: + keepProject: 'false' + env: + METAL_PROJECT_ID: ${{ steps.project.outputs.projectID }} + METAL_AUTH_TOKEN: ${{ secrets.METAL_AUTH_TOKEN }} diff --git a/.github/workflows/terraform.yml b/.github/workflows/terraform.yml new file mode 100644 index 0000000..64fc216 --- /dev/null +++ b/.github/workflows/terraform.yml @@ -0,0 +1,39 @@ +name: 'terraform' +# This workflow verifies that the Terraform configs are valid, +# without running Google Anthos or building any Packet infrastructure. +# https://learn.hashicorp.com/tutorials/terraform/automate-terraform + +on: + push: + branches: + - master + pull_request: + +jobs: + test: + name: Test + runs-on: ${{ matrix.os }} + env: + TF_IN_AUTOMATION: 1 + TF_VERSION: ${{ matrix.tf }} + strategy: + matrix: + os: [ubuntu-latest] + tf: [0.14.8] + steps: + - name: Checkout from Github + uses: actions/checkout@v2 + - name: Install Terraform + uses: hashicorp/setup-terraform@v1 + with: + terraform_version: ${{ env.TF_VERSION }} + - name: Check Terraform formatting + id: fmt + run: terraform fmt + continue-on-error: true + - name: Initialize Terraform, Modules, and Plugins + id: init + run: terraform init -input=false + - name: Validate Terraform syntax + id: validate + run: terraform validate -no-color diff --git a/.gitignore b/.gitignore index 520643b..2191637 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,15 @@ -/.terraform* -/override.tf -/terraform.tfvars +**/.terraform/* +*.tfstate +*.tfstate.* +crash.log +*.tfvars +override.tf +override.tf.json +*_override.tf +*_override.tf.json +terraform.tfvars +.terraformrc +terraform.rc /anthos_ssh_priv_key /anthos/gcp_keys/* -terraform.tfstate* /ksa_token.txt diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl new file mode 100644 index 0000000..e285a49 --- /dev/null +++ b/.terraform.lock.hcl @@ -0,0 +1,128 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/equinix/metal" { + version = "1.1.0" + constraints = "~> 1.1" + hashes = [ + "h1:cWzXeAijh0itev15SULKHreilkBKZcnaG/F98Uw4nl4=", + "zh:3fa3f8d7d7098a70c02f866ea45c718a0a2bec611730595c41b94251eed6f445", + "zh:400c107fa0e9a5e31a7e330295a2e48e70503fd908296aba33d1008ddb7840bd", + "zh:4998994a71aeb88a3d5e909c30e3aa70a56a26a6764d266a1493958352157e73", + "zh:582d0e81455f12759e8e9ca17ce75323b51c661d187367cfed21e9f4277c86a0", + "zh:68bab42ebdefb86da5374e16897be743ccbd4cdd5968cf0c1cf0a16791a99a5e", + "zh:7a61c7cf256b47621b4407d16cce964a50c7b75674952fb812d2e2cb04fdda6e", + "zh:95feee424de8de1f1612e633267bfbcfc0dedc82d841eb8abd0b0cc671418a05", + "zh:a9e2f1596f472693a66cc1b3571e8a1325de233c78e6a548d1adacd5b7c934b8", + "zh:b420f328e78a645a5d362b078a07ff0223b9bc285a301fed5a9e36b625ef0aae", + "zh:f6edceb76ca69359cce2064f70dcf08144de998e559eaceba8dfd0846a761e43", + "zh:fd2746ea1a4a192a9ea8d91eef92351d7feffba982b2645d0c9a9eb5a2cd97ba", + ] +} + +provider "registry.terraform.io/hashicorp/external" { + version = "2.1.0" + hashes = [ + "h1:LTl5CGW8wiIEe16AC4MtXN/95xWWNDbap70zJsBTk0w=", + "zh:0d83ffb72fbd08986378204a7373d8c43b127049096eaf2765bfdd6b00ad9853", + "zh:7577d6edc67b1e8c2cf62fe6501192df1231d74125d90e51d570d586d95269c5", + "zh:9c669ded5d5affa4b2544952c4b6588dfed55260147d24ced02dca3a2829f328", + "zh:a404d46f2831f90633947ab5d57e19dbfe35b3704104ba6ec80bcf50b058acfd", + "zh:ae1caea1c936d459ceadf287bb5c5bd67b5e2a7819df6f5c4114b7305df7f822", + "zh:afb4f805477694a4b9dde86b268d2c0821711c8aab1c6088f5f992228c4c06fb", + "zh:b993b4a1de8a462643e78f4786789e44ce5064b332fee1cb0d6250ed085561b8", + "zh:c84b2c13fa3ea2c0aa7291243006d560ce480a5591294b9001ce3742fc9c5791", + "zh:c8966f69b7eccccb771704fd5335923692eccc9e0e90cb95d14538fe2e92a3b8", + "zh:d5fe68850d449b811e633a300b114d0617df6d450305e8251643b4d143dc855b", + "zh:ddebfd1e674ba336df09b1f27bbaa0e036c25b7a7087dc8081443f6e5954028b", + ] +} + +provider "registry.terraform.io/hashicorp/local" { + version = "2.1.0" + hashes = [ + "h1:KfieWtVyGWwplSoLIB5usKAUnrIkDQBkWaR5TI+4WYg=", + "zh:0f1ec65101fa35050978d483d6e8916664b7556800348456ff3d09454ac1eae2", + "zh:36e42ac19f5d68467aacf07e6adcf83c7486f2e5b5f4339e9671f68525fc87ab", + "zh:6db9db2a1819e77b1642ec3b5e95042b202aee8151a0256d289f2e141bf3ceb3", + "zh:719dfd97bb9ddce99f7d741260b8ece2682b363735c764cac83303f02386075a", + "zh:7598bb86e0378fd97eaa04638c1a4c75f960f62f69d3662e6d80ffa5a89847fe", + "zh:ad0a188b52517fec9eca393f1e2c9daea362b33ae2eb38a857b6b09949a727c1", + "zh:c46846c8df66a13fee6eff7dc5d528a7f868ae0dcf92d79deaac73cc297ed20c", + "zh:dc1a20a2eec12095d04bf6da5321f535351a594a636912361db20eb2a707ccc4", + "zh:e57ab4771a9d999401f6badd8b018558357d3cbdf3d33cc0c4f83e818ca8e94b", + "zh:ebdcde208072b4b0f8d305ebf2bfdc62c926e0717599dcf8ec2fd8c5845031c3", + "zh:ef34c52b68933bedd0868a13ccfd59ff1c820f299760b3c02e008dc95e2ece91", + ] +} + +provider "registry.terraform.io/hashicorp/null" { + version = "3.1.0" + hashes = [ + "h1:xhbHC6in3nQryvTQBWKxebi3inG5OCgHgc4fRxL0ymc=", + "zh:02a1675fd8de126a00460942aaae242e65ca3380b5bb192e8773ef3da9073fd2", + "zh:53e30545ff8926a8e30ad30648991ca8b93b6fa496272cd23b26763c8ee84515", + "zh:5f9200bf708913621d0f6514179d89700e9aa3097c77dac730e8ba6e5901d521", + "zh:9ebf4d9704faba06b3ec7242c773c0fbfe12d62db7d00356d4f55385fc69bfb2", + "zh:a6576c81adc70326e4e1c999c04ad9ca37113a6e925aefab4765e5a5198efa7e", + "zh:a8a42d13346347aff6c63a37cda9b2c6aa5cc384a55b2fe6d6adfa390e609c53", + "zh:c797744d08a5307d50210e0454f91ca4d1c7621c68740441cf4579390452321d", + "zh:cecb6a304046df34c11229f20a80b24b1603960b794d68361a67c5efe58e62b8", + "zh:e1371aa1e502000d9974cfaff5be4cfa02f47b17400005a16f14d2ef30dc2a70", + "zh:fc39cc1fe71234a0b0369d5c5c7f876c71b956d23d7d6f518289737a001ba69b", + "zh:fea4227271ebf7d9e2b61b89ce2328c7262acd9fd190e1fd6d15a591abfa848e", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.1.0" + hashes = [ + "h1:rKYu5ZUbXwrLG1w81k7H3nce/Ys6yAxXhWcbtk36HjY=", + "zh:2bbb3339f0643b5daa07480ef4397bd23a79963cc364cdfbb4e86354cb7725bc", + "zh:3cd456047805bf639fbf2c761b1848880ea703a054f76db51852008b11008626", + "zh:4f251b0eda5bb5e3dc26ea4400dba200018213654b69b4a5f96abee815b4f5ff", + "zh:7011332745ea061e517fe1319bd6c75054a314155cb2c1199a5b01fe1889a7e2", + "zh:738ed82858317ccc246691c8b85995bc125ac3b4143043219bd0437adc56c992", + "zh:7dbe52fac7bb21227acd7529b487511c91f4107db9cc4414f50d04ffc3cab427", + "zh:a3a9251fb15f93e4cfc1789800fc2d7414bbc18944ad4c5c98f466e6477c42bc", + "zh:a543ec1a3a8c20635cf374110bd2f87c07374cf2c50617eee2c669b3ceeeaa9f", + "zh:d9ab41d556a48bd7059f0810cf020500635bfc696c9fc3adab5ea8915c1d886b", + "zh:d9e13427a7d011dbd654e591b0337e6074eef8c3b9bb11b2e39eaaf257044fd7", + "zh:f7605bd1437752114baf601bdf6931debe6dc6bfe3006eb7e9bb9080931dca8a", + ] +} + +provider "registry.terraform.io/hashicorp/template" { + version = "2.2.0" + hashes = [ + "h1:0wlehNaxBX7GJQnPfQwTNvvAf38Jm0Nv7ssKGMaG6Og=", + "zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386", + "zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53", + "zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603", + "zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16", + "zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776", + "zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451", + "zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae", + "zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde", + "zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d", + "zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2", + ] +} + +provider "registry.terraform.io/hashicorp/tls" { + version = "3.1.0" + hashes = [ + "h1:XTU9f6sGMZHOT8r/+LWCz2BZOPH127FBTPjMMEAAu1U=", + "zh:3d46616b41fea215566f4a957b6d3a1aa43f1f75c26776d72a98bdba79439db6", + "zh:623a203817a6dafa86f1b4141b645159e07ec418c82fe40acd4d2a27543cbaa2", + "zh:668217e78b210a6572e7b0ecb4134a6781cc4d738f4f5d09eb756085b082592e", + "zh:95354df03710691773c8f50a32e31fca25f124b7f3d6078265fdf3c4e1384dca", + "zh:9f97ab190380430d57392303e3f36f4f7835c74ea83276baa98d6b9a997c3698", + "zh:a16f0bab665f8d933e95ca055b9c8d5707f1a0dd8c8ecca6c13091f40dc1e99d", + "zh:be274d5008c24dc0d6540c19e22dbb31ee6bfdd0b2cddd4d97f3cd8a8d657841", + "zh:d5faa9dce0a5fc9d26b2463cea5be35f8586ab75030e7fa4d4920cd73ee26989", + "zh:e9b672210b7fb410780e7b429975adcc76dd557738ecc7c890ea18942eb321a5", + "zh:eb1f8368573d2370605d6dbf60f9aaa5b64e55741d96b5fb026dbfe91de67c0d", + "zh:fc1e12b713837b85daf6c3bb703d7795eaf1c5177aebae1afcf811dd7009f4b0", + ] +} diff --git a/01-create-project.tf b/01-create-project.tf deleted file mode 100644 index 669ae82..0000000 --- a/01-create-project.tf +++ /dev/null @@ -1,40 +0,0 @@ -locals { - timestamp = "${timestamp()}" - timestamp_sanitized = "${replace("${local.timestamp}", "/[-| |T|Z|:]/", "")}" - project_name_sanitized = "${replace("${var.project_name}", "/[ ]/", "_")}" - ssh_key_name = "${local.project_name_sanitized}-${local.timestamp_sanitized}-key" -} - -resource "packet_project" "new_project" { - count = var.create_project ? 1 : 0 - name = var.project_name - organization_id = var.organization_id -} - -locals { - depends_on = [packet_project.new_project] - count = var.create_project ? 1 : 0 - project_id = var.create_project ? packet_project.new_project[0].id : var.project_id -} - -resource "tls_private_key" "ssh_key_pair" { - algorithm = "RSA" - rsa_bits = 4096 -} - -resource "packet_project_ssh_key" "ssh_pub_key" { - depends_on = [packet_project.new_project] - project_id = local.project_id - name = local.ssh_key_name - public_key = chomp(tls_private_key.ssh_key_pair.public_key_openssh) -} - -resource "local_file" "project_private_key_pem" { - content = chomp(tls_private_key.ssh_key_pair.private_key_pem) - filename = pathexpand("~/.ssh/${local.ssh_key_name}") - file_permission = "0600" - - provisioner "local-exec" { - command = "cp ~/.ssh/${local.ssh_key_name} ~/.ssh/${local.ssh_key_name}.bak" - } -} diff --git a/01-create-vsphere.tf b/01-create-vsphere.tf new file mode 100644 index 0000000..1a65ee0 --- /dev/null +++ b/01-create-vsphere.tf @@ -0,0 +1,36 @@ +module "vsphere" { + source = "equinix/vsphere/metal" + version = "2.3.0" + + auth_token = var.auth_token + organization_id = var.organization_id + project_name = var.project_name + create_project = var.create_project + project_id = var.project_id + private_subnets = var.private_subnets + public_subnets = var.public_subnets + router_hostname = var.router_hostname + esxi_hostname = var.esxi_hostname + router_size = var.router_size + esxi_size = var.esxi_size + facility = var.facility + router_os = var.router_os + vmware_os = var.vmware_os + billing_cycle = var.billing_cycle + esxi_host_count = var.esxi_host_count + vcenter_portgroup_name = var.vcenter_portgroup_name + domain_name = var.domain_name + vpn_user = var.vpn_user + vcenter_datacenter_name = var.vcenter_datacenter_name + vcenter_cluster_name = var.vcenter_cluster_name + vcenter_domain = var.vcenter_domain + vcenter_user_name = var.vcenter_user_name + s3_url = var.s3_url + s3_access_key = var.s3_access_key + s3_secret_key = var.s3_secret_key + s3_version = var.s3_version + object_store_tool = var.object_store_tool + object_store_bucket_name = var.object_store_bucket_name + relative_path_to_gcs_key = var.relative_path_to_gcs_key + vcenter_iso_name = var.vcenter_iso_name +} diff --git a/02-network-resources.tf b/02-network-resources.tf deleted file mode 100644 index c53ebc6..0000000 --- a/02-network-resources.tf +++ /dev/null @@ -1,27 +0,0 @@ -resource "packet_reserved_ip_block" "ip_blocks" { - count = length(var.public_subnets) - project_id = local.project_id - facility = var.facility - quantity = jsonencode(element(var.public_subnets.*.ip_count, count.index)) -} - -resource "packet_reserved_ip_block" "esx_ip_blocks" { - count = var.esxi_host_count - project_id = local.project_id - facility = var.facility - quantity = 8 -} - -resource "packet_vlan" "private_vlans" { - count = length(var.private_subnets) - facility = var.facility - project_id = local.project_id - description = jsonencode(element(var.private_subnets.*.name, count.index)) -} - -resource "packet_vlan" "public_vlans" { - count = length(var.public_subnets) - facility = var.facility - project_id = local.project_id - description = jsonencode(element(var.public_subnets.*.name, count.index)) -} diff --git a/03-edge-router.tf b/03-edge-router.tf deleted file mode 100644 index c7f63ac..0000000 --- a/03-edge-router.tf +++ /dev/null @@ -1,43 +0,0 @@ -data "template_file" "user_data" { - template = file("templates/user_data.py") - vars = { - private_subnets = jsonencode(var.private_subnets) - private_vlans = jsonencode(packet_vlan.private_vlans.*.vxlan) - public_subnets = jsonencode(var.public_subnets) - public_vlans = jsonencode(packet_vlan.public_vlans.*.vxlan) - public_cidrs = jsonencode(packet_reserved_ip_block.ip_blocks.*.cidr_notation) - domain_name = var.domain_name - } -} - -resource "packet_device" "router" { - depends_on = [packet_project_ssh_key.ssh_pub_key] - hostname = var.router_hostname - plan = var.router_size - facilities = [var.facility] - operating_system = var.router_os - billing_cycle = var.billing_cycle - project_id = local.project_id - user_data = data.template_file.user_data.rendered - network_type = "hybrid" -} - -resource "packet_port_vlan_attachment" "router_priv_vlan_attach" { - count = length(packet_vlan.private_vlans) - device_id = packet_device.router.id - port_name = "eth1" - vlan_vnid = jsonencode(element(packet_vlan.private_vlans.*.vxlan, count.index)) -} - -resource "packet_port_vlan_attachment" "router_pub_vlan_attach" { - count = length(packet_vlan.public_vlans) - device_id = packet_device.router.id - port_name = "eth1" - vlan_vnid = jsonencode(element(packet_vlan.public_vlans.*.vxlan, count.index)) -} - -resource "packet_ip_attachment" "block_assignment" { - count = length(packet_reserved_ip_block.ip_blocks) - device_id = packet_device.router.id - cidr_notation = substr(jsonencode(element(packet_reserved_ip_block.ip_blocks.*.cidr_notation, count.index)), 1, length(jsonencode(element(packet_reserved_ip_block.ip_blocks.*.cidr_notation, count.index))) - 2) -} diff --git a/04-esx-hosts.tf b/04-esx-hosts.tf deleted file mode 100644 index 7007d3a..0000000 --- a/04-esx-hosts.tf +++ /dev/null @@ -1,39 +0,0 @@ -resource "packet_device" "esxi_hosts" { - depends_on = [packet_project_ssh_key.ssh_pub_key] - count = var.esxi_host_count - hostname = format("%s%02d", var.esxi_hostname, count.index + 1) - plan = var.esxi_size - facilities = [var.facility] - operating_system = var.vmware_os - billing_cycle = var.billing_cycle - project_id = local.project_id - network_type = "hybrid" - tags = ["vmware", "hypervisor", "anthos"] - ip_address { - type = "public_ipv4" - cidr = 29 - reservation_ids = [element(packet_reserved_ip_block.esx_ip_blocks.*.id, count.index)] - } - ip_address { - type = "private_ipv4" - } - ip_address { - type = "public_ipv6" - } -} - - -resource "packet_port_vlan_attachment" "esxi_priv_vlan_attach" { - count = length(packet_device.esxi_hosts) * length(packet_vlan.private_vlans) - device_id = element(packet_device.esxi_hosts.*.id, ceil(count.index / length(packet_vlan.private_vlans))) - port_name = "eth1" - vlan_vnid = jsonencode(element(packet_vlan.private_vlans.*.vxlan, count.index)) -} - - -resource "packet_port_vlan_attachment" "esxi_pub_vlan_attach" { - count = length(packet_device.esxi_hosts) * length(packet_vlan.public_vlans) - device_id = element(packet_device.esxi_hosts.*.id, ceil(count.index / length(packet_vlan.public_vlans))) - port_name = "eth1" - vlan_vnid = element(packet_vlan.public_vlans.*.vxlan, count.index) -} diff --git a/05-download_vcenter_iso.tf b/05-download_vcenter_iso.tf deleted file mode 100644 index 9ef7aac..0000000 --- a/05-download_vcenter_iso.tf +++ /dev/null @@ -1,45 +0,0 @@ -data "template_file" "download_vcenter" { - template = file("templates/download_vcenter.sh") - vars = { - gcs_bucket_name = var.gcs_bucket_name - storage_reader_key_name = var.storage_reader_key_name - s3_boolean = var.s3_boolean - s3_url = var.s3_url - s3_access_key = var.s3_access_key - s3_secret_key = var.s3_secret_key - s3_bucket_name = var.s3_bucket_name - vcenter_iso_name = var.vcenter_iso_name - ssh_private_key = chomp(tls_private_key.ssh_key_pair.private_key_pem) - } -} - -resource "null_resource" "download_vcenter_iso" { - connection { - type = "ssh" - user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 - } - - provisioner "file" { - content = data.template_file.download_vcenter.rendered - destination = "/root/download_vcenter.sh" - } - - provisioner "remote-exec" { - inline = ["mkdir -p /root/anthos/gcp_keys"] - } - - provisioner "file" { - source = "anthos/gcp_keys/" - destination = "/root/anthos/gcp_keys" - } - - provisioner "remote-exec" { - inline = [ - "cd /root", - "chmod +x /root/download_vcenter.sh", - "/root/download_vcenter.sh" - ] - } -} diff --git a/06-install-vpn.tf b/06-install-vpn.tf deleted file mode 100644 index e968d31..0000000 --- a/06-install-vpn.tf +++ /dev/null @@ -1,50 +0,0 @@ -resource "random_string" "ipsec_psk" { - length = 20 - min_upper = 2 - min_lower = 2 - min_numeric = 2 - min_special = 2 - override_special = "$!?@*" -} - -resource "random_string" "vpn_pass" { - length = 16 - min_upper = 2 - min_lower = 2 - min_numeric = 2 - min_special = 2 - override_special = "$!?@*" -} - -data "template_file" "vpn_installer" { - template = file("templates/l2tp_vpn.sh") - vars = { - ipsec_psk = random_string.ipsec_psk.result - vpn_user = var.vpn_user - vpn_pass = random_string.vpn_pass.result - } -} - -resource "null_resource" "install_vpn_server" { - depends_on = [null_resource.download_vcenter_iso] - connection { - type = "ssh" - user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 - } - - provisioner "file" { - content = data.template_file.vpn_installer.rendered - destination = "/root/vpn_installer.sh" - } - - provisioner "remote-exec" { - inline = [ - "cd /root", - "chmod +x /root/vpn_installer.sh", - "/root/vpn_installer.sh" - ] - } -} - diff --git a/07-vcva-template.tf b/07-vcva-template.tf deleted file mode 100644 index 523af4c..0000000 --- a/07-vcva-template.tf +++ /dev/null @@ -1,41 +0,0 @@ -resource "random_string" "vcenter_password" { - length = 16 - min_upper = 2 - min_lower = 2 - min_numeric = 2 - min_special = 2 - override_special = "$!?@*" -} - -resource "random_string" "sso_password" { - length = 16 - min_upper = 2 - min_lower = 2 - min_numeric = 2 - min_special = 2 - override_special = "$!?@*" -} - -data "template_file" "vcva_template" { - template = file("templates/vcva_template.json") - vars = { - vcenter_password = random_string.vcenter_password.result - sso_password = random_string.sso_password.result - first_esx_pass = packet_device.esxi_hosts.0.root_password - domain_name = var.domain_name - vcenter_network = var.vcenter_portgroup_name - } -} - -resource "null_resource" "copy_vcva_template" { - connection { - type = "ssh" - user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 - } - provisioner "file" { - content = data.template_file.vcva_template.rendered - destination = "/root/vcva_template.json" - } -} diff --git a/08-esx-host-networking.tf b/08-esx-host-networking.tf deleted file mode 100644 index 99cb6d9..0000000 --- a/08-esx-host-networking.tf +++ /dev/null @@ -1,63 +0,0 @@ -resource "null_resource" "copy_update_uplinks" { - connection { - type = "ssh" - user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 - } - - provisioner "file" { - content = file("templates/update_uplinks.py") - destination = "/root/update_uplinks.py" - } -} - -data "template_file" "esx_host_networking" { - template = file("templates/esx_host_networking.py") - vars = { - private_subnets = jsonencode(var.private_subnets) - private_vlans = jsonencode(packet_vlan.private_vlans.*.vxlan) - public_subnets = jsonencode(var.public_subnets) - public_vlans = jsonencode(packet_vlan.public_vlans.*.vxlan) - public_cidrs = jsonencode(packet_reserved_ip_block.ip_blocks.*.cidr_notation) - domain_name = var.domain_name - packet_token = var.auth_token - } -} - -resource "null_resource" "esx_network_prereqs" { - connection { - type = "ssh" - user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 - } - - provisioner "file" { - content = data.template_file.esx_host_networking.rendered - destination = "/root/esx_host_networking.py" - } -} - -resource "null_resource" "apply_esx_network_config" { - count = length(packet_device.esxi_hosts) - depends_on = [ - packet_port_vlan_attachment.esxi_priv_vlan_attach, - packet_port_vlan_attachment.esxi_pub_vlan_attach, - null_resource.esx_network_prereqs, - null_resource.copy_update_uplinks, - null_resource.install_vpn_server - ] - - connection { - type = "ssh" - user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 - } - - provisioner "remote-exec" { - inline = ["python3 /root/esx_host_networking.py --host '${element(packet_device.esxi_hosts.*.access_public_ipv4, count.index)}' --user root --pass '${element(packet_device.esxi_hosts.*.root_password, count.index)}' --id '${element(packet_device.esxi_hosts.*.id, count.index)}' --index ${count.index} --ipRes ${element(packet_reserved_ip_block.esx_ip_blocks.*.id, count.index)}"] - on_failure = continue - } -} diff --git a/09-deploy-vcva.tf b/09-deploy-vcva.tf deleted file mode 100644 index ee0185f..0000000 --- a/09-deploy-vcva.tf +++ /dev/null @@ -1,74 +0,0 @@ -data "template_file" "deploy_vcva_script" { - template = file("templates/deploy_vcva.py") - vars = { - private_subnets = jsonencode(var.private_subnets) - vcenter_network = var.vcenter_portgroup_name - esx_passwords = jsonencode(packet_device.esxi_hosts.*.root_password) - dc_name = var.vcenter_datacenter_name - sso_password = random_string.sso_password.result - cluster_name = var.vcenter_cluster_name - } -} - -data "template_file" "claim_vsan_disks" { - template = file("templates/vsan_claim.py") - vars = { - vcenter_fqdn = format("vcva.%s", var.domain_name) - vcenter_user = "Administrator@vsphere.local" - vcenter_pass = random_string.sso_password.result - } -} - -resource "null_resource" "deploy_vcva" { - depends_on = [ - null_resource.apply_esx_network_config, - null_resource.download_vcenter_iso - ] - connection { - type = "ssh" - user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 - } - - provisioner "file" { - content = data.template_file.claim_vsan_disks.rendered - destination = "/root/vsan_claim.py" - } - - provisioner "file" { - content = data.template_file.deploy_vcva_script.rendered - destination = "/root/deploy_vcva.py" - } - - provisioner "file" { - source = "templates/extend_datastore.sh" - destination = "/root/extend_datastore.sh" - } - - provisioner "remote-exec" { - inline = [ - "python3 /root/deploy_vcva.py", - "sleep 60", - ] - } -} - - -resource "null_resource" "vsan_claim" { - depends_on = [null_resource.deploy_vcva] - count = var.esxi_host_count == 1 ? 0 : 1 - connection { - type = "ssh" - user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 - } - - provisioner "remote-exec" { - inline = [ - "python3 /root/vsan_claim.py", - "sleep 90" - ] - } -} diff --git a/32-anthos-pre-reqs.tf b/32-anthos-pre-reqs.tf index 4c7023e..92ed9be 100644 --- a/32-anthos-pre-reqs.tf +++ b/32-anthos-pre-reqs.tf @@ -9,12 +9,12 @@ data "template_file" "anthos_pre_reqs_script" { resource "null_resource" "anthos_pre_reqs" { count = var.anthos_deploy_workstation_prereqs ? 1 : 0 - depends_on = [null_resource.install_vpn_server] + depends_on = [module.vsphere] connection { type = "ssh" user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 + private_key = file(module.vsphere.ssh_key_path) + host = module.vsphere.bastion_host } diff --git a/33-anthos-deploy-admin-workstation.tf b/33-anthos-deploy-admin-workstation.tf index 9003cc2..7aad4b9 100644 --- a/33-anthos-deploy-admin-workstation.tf +++ b/33-anthos-deploy-admin-workstation.tf @@ -2,7 +2,7 @@ data "template_file" "anthos_workstation_tf_vars" { template = file("anthos/static-ip.tfvars") vars = { vcenter_username = "Administrator@vsphere.local" - vcenter_password = random_string.sso_password.result + vcenter_password = module.vsphere.vcenter_password vcenter_fqdn = format("vcva.%s", var.domain_name) vsphere_datastore = var.anthos_datastore @@ -20,7 +20,7 @@ data "template_file" "anthos_upload_ova_template" { anthos_version = var.anthos_version vmware_fqdn = format("vcva.%s", var.domain_name) vmware_username = "Administrator@vsphere.local" - vmware_password = random_string.sso_password.result + vmware_password = module.vsphere.vcenter_password vmware_datastore = var.anthos_datastore vmware_resource_pool = var.anthos_resource_pool_name } @@ -39,7 +39,7 @@ data "template_file" "anthos_workstation_config_yaml" { template = file("anthos/admin-ws-config.yaml") vars = { vcenter_username = "Administrator@vsphere.local" - vcenter_password = random_string.sso_password.result + vcenter_password = module.vsphere.vcenter_password vcenter_fqdn = format("vcva.%s", var.domain_name) vsphere_datastore = var.anthos_datastore vsphere_datacenter = var.vcenter_datacenter_name @@ -60,12 +60,12 @@ data "template_file" "anthos_deploy_admin_ws_sh" { resource "null_resource" "anthos_deploy_workstation" { count = var.anthos_deploy_workstation_prereqs ? 1 : 0 - depends_on = [null_resource.deploy_vcva, null_resource.vsan_claim] + depends_on = [module.vsphere] connection { type = "ssh" user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 + private_key = file(module.vsphere.ssh_key_path) + host = module.vsphere.bastion_host } provisioner "file" { diff --git a/34-anthos-deploy-clusters.tf b/34-anthos-deploy-clusters.tf index b11c9f0..babdc3f 100644 --- a/34-anthos-deploy-clusters.tf +++ b/34-anthos-deploy-clusters.tf @@ -26,7 +26,7 @@ data "template_file" "anthos_cluster_config" { template = file("anthos/cluster/bundled-lb-admin-uc1-config.yaml") vars = { vcenter_user = "Administrator@vsphere.local" - vcenter_pass = random_string.sso_password.result + vcenter_pass = module.vsphere.vcenter_password vcenter_fqdn = format("vcva.%s", var.domain_name) vcenter_datastore = var.anthos_datastore vcenter_datacenter = var.vcenter_datacenter_name @@ -56,7 +56,7 @@ data "template_file" "anthos_admin_cluster_config" { template = file("anthos/cluster/admin-cluster-config.yaml") vars = { vcenter_user = "Administrator@vsphere.local" - vcenter_pass = random_string.sso_password.result + vcenter_pass = module.vsphere.vcenter_password vcenter_fqdn = format("vcva.%s", var.domain_name) vcenter_datastore = var.anthos_datastore vcenter_datacenter = var.vcenter_datacenter_name @@ -99,7 +99,7 @@ data "template_file" "anthos_cluster_creation_script" { template = file("anthos/cluster/bundled-lb-install-script.sh") vars = { vcenter_user = "Administrator@vsphere.local" - vcenter_pass = random_string.sso_password.result + vcenter_pass = module.vsphere.vcenter_password vcenter_fqdn = format("vcva.%s", var.domain_name) vcenter_datastore = var.anthos_datastore vcenter_datacenter = var.vcenter_datacenter_name @@ -115,8 +115,8 @@ resource "null_resource" "anthos_deploy_cluster" { connection { type = "ssh" user = "root" - private_key = file("~/.ssh/${local.ssh_key_name}") - host = packet_device.router.access_public_ipv4 + private_key = file(module.vsphere.ssh_key_path) + host = module.vsphere.bastion_host } provisioner "file" { diff --git a/35-anthos-copy-token.tf b/35-anthos-copy-token.tf index 4e7e6d3..9d1d1ea 100644 --- a/35-anthos-copy-token.tf +++ b/35-anthos-copy-token.tf @@ -3,7 +3,7 @@ resource "null_resource" "anthos_copy_token" { depends_on = [null_resource.anthos_deploy_cluster] provisioner "local-exec" { - command = "scp -i ~/.ssh/${local.ssh_key_name} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@${packet_device.router.access_public_ipv4}:/root/anthos/ksa_token.txt ." + command = "scp -i ${module.vsphere.ssh_key_path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@${module.vsphere.bastion_host}:/root/anthos/ksa_token.txt ." } } diff --git a/README.md b/README.md index aeab542..5ec8220 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,12 @@ The build (with default settings) typically takes 70-75 minutes. We use [Slack](https://slack.com/) as our primary communication tool for collaboration. You can join the Equinix Metal Community Slack group by going to [slack.equinixmetal.com](https://slack.equinixmetal.com/) and submitting your email address. You will receive a message with an invite link. Once you enter the Slack group, join the **#google-anthos** channel! Feel free to introduce yourself there, but know it's not mandatory. ## Latest Updates + +Starting with version v0.2.0, this module is published in the Terraform registy at . + +For current releases, with Git tags, see . +Historic changes are listed here by date. + ### 9-25-2020 * GKE on-prem 1.5.0-gke.27 has been released and has been successfully tested ### 7-29-2020 @@ -52,7 +58,7 @@ To use these Terraform files, you need to have the following Prerequisites: * OR * E-Mail support@equinixmetal.com * Your message across one of these mediums should be: - * I am working with the Google Anthos Terrafom deployment (github.com/packet-labs/google-anthos). I need an entitlement increase to allow the creation of five or more vLans. Can you please assist? + * I am working with the Google Anthos Terrafom deployment (github.com/equinix/terraform-metal-anthos-on-vsphere). I need an entitlement increase to allow the creation of five or more vLans. Can you please assist? * [VMware vCenter Server 6.7U3](https://my.vmware.com/group/vmware/details?downloadGroup=VC67U3B&productId=742&rPId=40665) - VMware vCenter Server Appliance ISO obtained from VMware * [VMware vSAN Management SDK 6.7U3](https://my.vmware.com/group/vmware/details?downloadGroup=VSAN-MGMT-SDK67U3&productId=734) - Virtual SAN Management SDK for Python, also from VMware @@ -146,12 +152,13 @@ sudo mv terraform /usr/local/bin/ To download this project, run the following command: ```bash -git clone https://github.com/packet-labs/google-anthos.git +git clone https://github.com/equinix/terraform-metal-anthos-on-vsphere.git ``` ## Initialize Terraform -Terraform uses modules to deploy infrastructure. In order to initialize the modules your simply run: `terraform init`. This should download five modules into a hidden directory `.terraform` - + +Terraform uses modules to deploy infrastructure. In order to initialize the modules your simply run: `terraform init -upgrade`. This should download five modules into a hidden directory `.terraform`. + ## Modify your variables There are many variables which can be set to customize your install within `00-vars.tf` and `30-anthos-vars.tf`. The default variables to bring up a 3 node vSphere cluster and linux router using Equinix Metal's [c2.medium.x86](https://metal.equinix.com/product/servers/). Change each default variable at your own risk. @@ -379,7 +386,7 @@ To create just the vSphere environment and skip all Anthos related steps, add `a > Note that `anthos_deploy_clusters` uses a string of either `"True"` or `"False"` while `anthos_deploy_workstation_prereqs` uses a boolean of `true` or `false`. This is because the `anthos_deploy_clusters` variable is used within a bash script while `anthos_deploy_workstation_prereqs` is used by Terraform which supports booleans. -See [anthos/cluster/bundled-lb-admin-uc1-config.yaml.sample](https://github.com/packet-labs/google-anthos/blob/master/anthos/cluster/bundled-lb-admin-uc1-config.yaml.sample) to see what the Anthos parameters are when the default settings are used to create the environment. +See [anthos/cluster/bundled-lb-admin-uc1-config.yaml.sample](https://github.com/equinix/terraform-metal-anthos-on-vsphere/blob/master/anthos/cluster/bundled-lb-admin-uc1-config.yaml.sample) to see what the Anthos parameters are when the default settings are used to create the environment. ## Use an existing Equinix Metal project If you have an existing Equinix Metal project you can use it assuming the project has at least 5 available vlans, Equinix Metal project has a limit of 12 Vlans and this setup uses 5 of them. @@ -404,9 +411,9 @@ Some common issues and fixes. ### Error: The specified project contains insufficient public IPv4 space to complete the request. Please e-mail help@packet.com. -Should be resolved in https://github.com/packet-labs/google-anthos/commit/f6668b1359683eb5124d6ab66457f3680072651a +Should be resolved in https://github.com/equinix/terraform-metal-anthos-on-vsphere/commit/f6668b1359683eb5124d6ab66457f3680072651a -Due to recent changes to the Equinix Metal API, new organizations may be unable to use the Terraform to build ESXi servers. Equinix Metal is aware of the issue and is planning some fixes. In the meantime, if you hit this issue, email help@equinixmetal.com and request that your organization be white listed to deploy ESXi servers with the API. You should reference this project (https://github.com/packet-labs/google-anthos) in your email. +Due to recent changes to the Equinix Metal API, new organizations may be unable to use the Terraform to build ESXi servers. Equinix Metal is aware of the issue and is planning some fixes. In the meantime, if you hit this issue, email help@equinixmetal.com and request that your organization be white listed to deploy ESXi servers with the API. You should reference this project (https://github.com/equinix/terraform-metal-anthos-on-vsphere) in your email. ### Error: POST https://api.packet.net/ports/e2385919-fd4c-410d-b71c-568d7a517896/disbond: diff --git a/main.tf b/main.tf index 4bef561..05baabd 100644 --- a/main.tf +++ b/main.tf @@ -1,10 +1,28 @@ terraform { required_providers { - packet = "~> 2.10.1" + metal = { + source = "equinix/metal" + version = "~> 1.1" + } + local = { + source = "hashicorp/local" + } + null = { + source = "hashicorp/null" + } + random = { + source = "hashicorp/random" + } + template = { + source = "hashicorp/template" + } + tls = { + source = "hashicorp/tls" + } } } -provider "packet" { +provider "metal" { auth_token = var.auth_token } diff --git a/outputs.tf b/outputs.tf index 79f21f5..7d0e624 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,39 +1,53 @@ output "VPN_Endpoint" { - value = packet_device.router.access_public_ipv4 + description = "L2TP VPN Endpoint" + value = module.vsphere.bastion_host } output "VPN_PSK" { - value = random_string.ipsec_psk.result + description = "L2TP VPN Pre-Shared Key" + value = module.vsphere.vpn_psk + sensitive = true } output "VPN_User" { - value = var.vpn_user + description = "L2TP VPN username" + value = module.vsphere.vpn_user } output "VPN_Password" { - value = random_string.vpn_pass.result + description = "L2TP VPN Password" + value = module.vsphere.vpn_password + sensitive = true } output "vCenter_FQDN" { - value = "vcva.packet.local" + description = "The FQDN of vCenter (Private DNS only)" + value = module.vsphere.vcenter_fqdn } output "vCenter_Username" { - value = "Administrator@vsphere.local" + description = "The username to login to vCenter" + value = module.vsphere.vcenter_username } output "vCenter_Password" { - value = random_string.sso_password.result + description = "The SSO Password to login to vCenter" + value = module.vsphere.vcenter_password + sensitive = true } output "vCenter_Appliance_Root_Password" { - value = random_string.vcenter_password.result + description = "The root password to ssh or login at the console of vCanter." + value = module.vsphere.vcenter_root_password + sensitive = true } output "KSA_Token_Location" { - value = "The user cluster KSA Token (for logging in from GCP) is located at ./ksa_token.txt" + description = "The user cluster KSA Token (for logging in from GCP)" + value = "${path.module}/ksa_token.txt" } output "SSH_Key_Location" { - value = "An SSH Key was created for this environment, it is saved at ~/.ssh/${local.ssh_key_name}" + description = "An SSH Key was created for this environment" + value = module.vsphere.ssh_key_path } diff --git a/templates/deploy_vcva.py b/templates/deploy_vcva.py deleted file mode 100644 index 48eba89..0000000 --- a/templates/deploy_vcva.py +++ /dev/null @@ -1,120 +0,0 @@ -import json -import ipaddress -import os -import sys -import subprocess -import socket -from time import sleep -from pyVmomi import vim, vmodl -from pyVim import connect - - -def get_ssl_thumbprint(host_ip): - p1 = subprocess.Popen(('echo', '-n'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p2 = subprocess.Popen(('openssl', 's_client', '-connect', '{0}:443'.format(host_ip)), - stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p3 = subprocess.Popen(('openssl', 'x509', '-noout', '-fingerprint', '-sha1'), - stdin=p2.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out = p3.stdout.read() - ssl_thumbprint = out.split(b'=')[-1].strip() - return ssl_thumbprint.decode("utf-8") - - -# Vars from Terraform -private_subnets = '${private_subnets}' -esx_passwords = '${esx_passwords}' -sso_password = '${sso_password}' -dc_name = '${dc_name}' -cluster_name = '${cluster_name}' -vcenter_network = '${vcenter_network}' - -# Parse TF Vars -subnets = json.loads(private_subnets) -esx_passes = json.loads(esx_passwords) -esx = [] -for pw in esx_passes: - esx.append({"password": pw}) - -for subnet in subnets: - if subnet['name'] == vcenter_network: - vcenter_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[1].compressed - esx_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[3].compressed - gateway_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[0].compressed - prefix_length = int(subnet['cidr'].split('/')[1]) - for i in range(len(esx)): - esx[i]['private_ip'] = list(ipaddress.ip_network(subnet['cidr']).hosts())[i + 3].compressed - break - -# If there's only one host, extend the datastore with all available disks -if len(esx) == 1: - print (esx[0]['private_ip']) - ip = str(esx[0]['private_ip']) - os.system("chmod +x /root/extend_datastore.sh") - command="./extend_datastore.sh "+ip+" "+"/root/.ssh/esxi_key" - os.system(command) - -os.system("sed -i 's/__ESXI_IP__/{}/g' /root/vcva_template.json".format(esx_ip)) -os.system("sed -i 's/__VCENTER_IP__/{}/g' /root/vcva_template.json".format(vcenter_ip)) -os.system("sed -i 's/__MGMT_GATEWAY__/{}/g' /root/vcva_template.json".format(gateway_ip)) -os.system("sed -i 's/__MGMT_PREFIX_LENGTH__/{}/g' /root/vcva_template.json".format(prefix_length)) -os.system("/mnt/vcsa-cli-installer/lin64/vcsa-deploy install --accept-eula --acknowledge-ceip " - "--no-esx-ssl-verify /root/vcva_template.json") - - - -# Connect to vCenter -for i in range(1, 30): - si = None - try: - si = connect.SmartConnectNoSSL(host=vcenter_ip, user="Administrator@vsphere.local", pwd=sso_password, port=443) - break - except Exception: - sleep(10) -if si == None: - print("Couldn't connect to vCenter!!!") - sys.exit(1) - -# Create Datacenter in the root folder -folder = si.content.rootFolder -dc = folder.CreateDatacenter(name=dc_name) - -# Create cluster config -cluster_config = vim.cluster.ConfigSpecEx() - -# Create DRS config -drs_config=vim.cluster.DrsConfigInfo() -drs_config.enabled = True -cluster_config.drsConfig=drs_config - -if len(esx) > 2: -# Create vSan config - vsan_config=vim.vsan.cluster.ConfigInfo() - vsan_config.enabled = True - vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo( - autoClaimStorage = True - ) - cluster_config.vsanConfig = vsan_config - -# Create HA config -if len(esx) > 1: - ha_config = vim.cluster.DasConfigInfo() - ha_config.enabled = True - ha_config.hostMonitoring = vim.cluster.DasConfigInfo.ServiceState.enabled - ha_config.failoverLevel = 1 - cluster_config.dasConfig = ha_config - -# Create the cluster -host_folder = dc.hostFolder -cluster = host_folder.CreateClusterEx(name=cluster_name, spec=cluster_config) - - -# Join hosts to the cluster -for host in esx: - dns_name = socket.gethostbyaddr(host['private_ip'])[0] - host_connect_spec = vim.host.ConnectSpec() - host_connect_spec.hostName = dns_name - host_connect_spec.userName = 'root' - host_connect_spec.password = host['password'] - host_connect_spec.force = True - host_connect_spec.sslThumbprint = get_ssl_thumbprint(dns_name) - cluster.AddHost(spec=host_connect_spec, asConnected=True) diff --git a/templates/download_vcenter.sh b/templates/download_vcenter.sh deleted file mode 100644 index c0ecb65..0000000 --- a/templates/download_vcenter.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -SSH_PRIVATE_KEY='${ssh_private_key}' - -s3_boolean=`echo "${s3_boolean}" | awk '{print tolower($0)}'` -cd /root/anthos - - -cat </root/.ssh/esxi_key -$SSH_PRIVATE_KEY -EOF -chmod 0400 /root/.ssh/esxi_key - -echo "Set SSH config to not do StrictHostKeyChecking" -cat </root/.ssh/config -Host * - StrictHostKeyChecking no -EOF -chmod 0400 /root/.ssh/config - -while [ ! -f /usr/lib/google-cloud-sdk/platform/gsutil/gsutil ] ; -do - echo "Waiting for gsutil to become available" - sleep 10 -done - -sleep 60 - -cd /root/ -if [ $s3_boolean = "false" ]; then - echo "USING GCS" - gcloud auth activate-service-account --key-file=$HOME/anthos/gcp_keys/${storage_reader_key_name} - gsutil cp gs://${gcs_bucket_name}/${vcenter_iso_name} . - gsutil cp gs://${gcs_bucket_name}/vsanapiutils.py . - gsutil cp gs://${gcs_bucket_name}/vsanmgmtObjects.py . -else - echo "USING S3" - curl -LO https://dl.min.io/client/mc/release/linux-amd64/mc - chmod +x mc - mv mc /usr/local/bin/ - mc config host add s3 ${s3_url} ${s3_access_key} ${s3_secret_key} - mc cp s3/${s3_bucket_name}/${vcenter_iso_name} . - mc cp s3/${s3_bucket_name}/vsanapiutils.py . - mc cp s3/${s3_bucket_name}/vsanmgmtObjects.py . -fi -mount /root/${vcenter_iso_name} /mnt/ - diff --git a/templates/esx_host_networking.py b/templates/esx_host_networking.py deleted file mode 100644 index df9e8c9..0000000 --- a/templates/esx_host_networking.py +++ /dev/null @@ -1,323 +0,0 @@ -import json -import ipaddress -import packet -import optparse -import sys -from time import sleep -from pyVmomi import vim, vmodl -from pyVim import connect -from subprocess import Popen - - -# Vars from Terraform -private_subnets = '${private_subnets}' -private_vlans = '${private_vlans}' -public_subnets = '${public_subnets}' -public_vlans = '${public_vlans}' -public_cidrs = '${public_cidrs}' -domain_name = '${domain_name}' -packet_token = '${packet_token}' - -# Constants -vswitch_name = 'vSwitch1' -del_vswitch_name = 'vSwitch0' - -# Build single subnet map with all vlans, cidrs, etc... -subnets = json.loads(private_subnets) -private_vlans = json.loads(private_vlans) -public_subnets = json.loads(public_subnets) -public_vlans = json.loads(public_vlans) -public_cidrs = json.loads(public_cidrs) - -for i in range(len(private_vlans)): - subnets[i]['vlan'] = private_vlans[i] - -for i in range(len(public_vlans)): - public_subnets[i]['vlan'] = public_vlans[i] - public_subnets[i]['cidr'] = public_cidrs[i] - subnets.append(public_subnets[i]) - - -def create_vswitch(host_network_system, vss_name, num_ports, nic_name, mtu): - vss_spec = vim.host.VirtualSwitch.Specification() - vss_spec.numPorts = num_ports - vss_spec.mtu = mtu - vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[nic_name]) - - host_network_system.AddVirtualSwitch(vswitchName=vss_name, spec=vss_spec) - - print("Successfully created vSwitch ", vss_name) - - -def create_port_group(host_network_system, pg_name, vss_name, vlan_id): - port_group_spec = vim.host.PortGroup.Specification() - port_group_spec.name = pg_name - port_group_spec.vlanId = vlan_id - port_group_spec.vswitchName = vss_name - - security_policy = vim.host.NetworkPolicy.SecurityPolicy() - security_policy.allowPromiscuous = True - security_policy.forgedTransmits = True - security_policy.macChanges = True - - port_group_spec.policy = vim.host.NetworkPolicy(security=security_policy) - - host_network_system.AddPortGroup(portgrp=port_group_spec) - - print("Successfully created PortGroup ", pg_name) - - -def add_virtual_nic(host, host_network_system, pg_name, network_type, ip_address, subnet_mask, default_gateway, - dns_servers, domain_name, mtu): - vnic_config = vim.host.VirtualNic.Specification() - ip_spec = vim.host.IpConfig() - if network_type == 'dhcp': - ip_spec.dhcp = True - else: - ip_spec.dhcp = False - ip_spec.ipAddress = ip_address - ip_spec.subnetMask = subnet_mask - if default_gateway: - vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec() - vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig() - vnic_config.ipRouteSpec.ipRouteConfig.defaultGateway = default_gateway - routespec = vim.host.IpRouteConfig() - routespec.defaultGateway = default_gateway - dns_config = host.configManager.networkSystem.dnsConfig - if len(dns_servers) > 0: - dns_config.dhcp = False - dns_config.address = dns_servers - if domain_name: - dns_config.domainName = domain_name - dns_config.searchDomain = domain_name - - else: - vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec() - vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig() - - vnic_config.ip = ip_spec - vnic_config.mtu = mtu - virtual_nic = host_network_system.AddVirtualNic(portgroup=pg_name, nic=vnic_config) - if default_gateway: - host.configManager.networkSystem.UpdateIpRouteConfig(config=routespec) - host.configManager.networkSystem.UpdateDnsConfig(config=dns_config) - - return(virtual_nic) - - -def enable_service_on_virtual_nic(host, virtual_nic, service_type): - if service_type == 'vsan': - vsan_port = vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig(device=virtual_nic) - net_info = vim.vsan.host.ConfigInfo.NetworkInfo(port=[vsan_port]) - vsan_config = vim.vsan.host.ConfigInfo(networkInfo=net_info,) - vsan_system = host.configManager.vsanSystem - try: - vsan_task = vsan_system.UpdateVsan_Task(vsan_config) - except Exception as e: - print("Failed to set service type to vsan: {}".format(str(e))) - else: - host.configManager.virtualNicManager.SelectVnicForNicType(service_type, virtual_nic) - - -def connect_to_host(esx_host, esx_user, esx_pass): - for i in range(1, 30): - si = None - try: - print("Trying to connect to ESX Host . . .") - si = connect.SmartConnectNoSSL(host=esx_host, user=esx_user, pwd=esx_pass, port=443) - break - except Exception: - print("There was a connection Error to host: {}. Sleeping 10 seconds and trying again.".format(esx_host)) - sleep(10) - if i == 30: - return None, None - print("Connected to ESX Host !") - content = si.RetrieveContent() - host = content.viewManager.CreateContainerView(content.rootFolder, [vim.HostSystem], True).view[0] - return host, si - - -def main(): - parser = optparse.OptionParser(usage="%prog --host --user --pass " - "--id --index --ipRes ") - parser.add_option('--host', dest="host", action="store", help="IP or FQDN of the ESXi host") - parser.add_option('--user', dest="user", action="store", help="Username to authenticate to ESXi host") - parser.add_option('--pass', dest="pw", action="store", help="Password to authenticarte to ESXi host") - parser.add_option('--id', dest="id", action="store", help="Packet Device ID for Server") - parser.add_option('--index', dest="index", action="store", help="Terraform index count, used for IPing") - parser.add_option('--ipRes', dest="ipRes", action="store", help="IP reservation for /29 ip block") - - options, _ = parser.parse_args() - if not (options.host and options.user and options.pw and options.id and options.index and options.ipRes): - print("ERROR: Missing arguments") - parser.print_usage() - sys.exit(1) - print(options) - - host, si = connect_to_host(options.host, options.user, options.pw) - if si is None or host is None: - print("Couldn't connect to host: {} after 5 minutes. Skipping...".format(options.host)) - sys.exit(1) - - host_name = host.name - host_network_system = host.configManager.networkSystem - online_pnics = [] - - for pnic in host.config.network.pnic: - if pnic.linkSpeed: - online_pnics.append(pnic) - - for vswitch in host_network_system.networkInfo.vswitch: - for pnic in vswitch.pnic: - for n in range(len(online_pnics)): - if pnic == online_pnics[n].key: - del online_pnics[n] - break - - uplink = online_pnics[0].device - create_vswitch(host_network_system, vswitch_name, 1024, uplink, 9000) - for subnet in subnets: - create_port_group(host_network_system, subnet['name'], vswitch_name, subnet['vlan']) - if subnet['vsphere_service_type']: - ip_address = list(ipaddress.ip_network(subnet['cidr']).hosts())[int(options.index) + 3].compressed - subnet_mask = ipaddress.ip_network(subnet['cidr']).netmask.compressed - default_gateway = None - mtu = 9000 - if subnet['vsphere_service_type'] == 'management': - create_port_group(host_network_system, "{} Net".format(subnet['name']), vswitch_name, subnet['vlan']) - default_gateway = list(ipaddress.ip_network(subnet['cidr']).hosts())[0].compressed - dns_servers = [] - dns_servers.append(default_gateway) - dns_servers.append('8.8.8.8') - mtu = 1500 - new_ip = ip_address - - # Reserve IP in dnsmasq - dnsmasq_conf = open('/etc/dnsmasq.d/dhcp.conf', 'a+') - dnsmasq_conf.write("dhcp-host=00:00:00:00:00:0{}, {} # {} IP\n".format(int(options.index), - ip_address, - host_name)) - dnsmasq_conf.close() - - # DNS record for ESX_Host - etc_hosts = open('/etc/hosts', 'a+') - etc_hosts.write('{}\t{}\t{}.{}\n'.format(ip_address, host_name, host_name, domain_name)) - etc_hosts.close() - # Restart dnsmasq service - Popen(["systemctl restart dnsmasq"], shell=True, stdin=None, stdout=None, stderr=None, close_fds=True) - virtual_nic = add_virtual_nic(host, host_network_system, subnet['name'], 'static', ip_address, - subnet_mask, default_gateway, dns_servers, domain_name, mtu) - enable_service_on_virtual_nic(host, virtual_nic, subnet['vsphere_service_type']) - connect.Disconnect(si) - - host = None - si = None - host, si = connect_to_host(new_ip, options.user, options.pw) - if si is None or host is None: - print("Couldn't connect to host: {}".format(new_ip)) - sys.exit(1) - - host_network_system = host.configManager.networkSystem - active_uplinks = [] - backup_uplinks = [] - for vnic in host_network_system.networkInfo.vnic: - if vnic.spec.ip.ipAddress == options.host or vnic.spec.ip.ipAddress[:3] == '10.': - print("Removing vNic: {}".format(vnic.device)) - host_network_system.RemoveVirtualNic(vnic.device) - for vswitch in host_network_system.networkInfo.vswitch: - if vswitch.name == del_vswitch_name: - for uplink in vswitch.spec.bridge.nicDevice: - active_uplinks.append(uplink) - for pgroup in vswitch.portgroup: - print("Removing Port Group: {}".format(pgroup[23:])) - host_network_system.RemovePortGroup(pgroup[23:]) - if vswitch.name == vswitch_name: - vss_spec = vswitch.spec - for uplink in vss_spec.bridge.nicDevice: - backup_uplinks.append(uplink) - - print("Removing vSwitch: {}".format(del_vswitch_name)) - host_network_system.RemoveVirtualSwitch(del_vswitch_name) - #vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=new_uplinks) - #vss_spec.policy.nicTeaming.nicOrder.activeNic = new_uplinks - - print("Updating vSwitch Uplinks...") - str_active_uplinks = ",".join(map(str, active_uplinks)) - str_backup_uplinks = ",".join(map(str, backup_uplinks)) - cmd_str = "python3 /root/update_uplinks.py --host '{}' --user '{}' --pass '{}' --vswitch '{}' --active-uplinks '{}' --backup-uplinks '{}'".format( - new_ip, options.user, options.pw, vswitch_name, str_active_uplinks, str_backup_uplinks) - Popen([cmd_str], shell=True, stdin=None, stdout=None, stderr=None, close_fds=True) - - # Get Packet Deivce - manager = packet.Manager(auth_token=packet_token) - device = manager.get_device(options.id) - for port in device.network_ports: - if port['type'] == 'NetworkBondPort': - print("Found {} port id".format(port['name'])) - bond_port = port['id'] - elif port['type'] == 'NetworkPort' and not port['data']['bonded']: - print("Found {} port id".format(port['name'])) - unbonded_port = port['id'] - else: - print("Found {} port id, but...".format(port['name'])) - print("This is not the port you're looking for...") - - for subnet in subnets: - print("Removing vLan {} from unbonded port".format(subnet['vlan'])) - attempt = 0 - for attempt in range(1,5): - try: - manager.remove_port(unbonded_port, subnet['vlan']) - break - except Exception: - if attempt == 5: - print("Tried to remove vLan five times and failed. Exiting...") - sys.exit(1) - print("Failed to remove vlan, trying again...") - sleep(5) - print("Rebonding Ports...") - attempt = 0 - for attempt in range(1,5): - try: - manager.bond_ports(bond_port, True) - break - except Exception: - if attempt == 5: - print("Tried to bond ports five times and failed. Exiting...") - sys.exit(1) - print("Failed to bond ports, trying again...") - sleep(5) - for n in range(len(subnets)): - if n == 0: - print("Adding vLan {} to bond".format(subnets[n]['vlan'])) - attempt = 0 - for attempt in range(1,5): - try: - manager.convert_layer_2(bond_port, subnets[n]['vlan']) - break - except Exception: - if attempt == 5: - print("Tried to convert bond to Layer 2 five times and failed. Exiting...") - sys.exit(1) - print("Failed to convert bond to Layer 2, trying again...") - sleep(5) - else: - print("Adding vLan {} to bond".format(subnets[n]['vlan'])) - attempt = 0 - for attempt in range(1,5): - try: - manager.assign_port(bond_port, subnets[n]['vlan']) - break - except Exception: - if attempt == 5: - print("Tried to add vLan to bond five times and failed. Exiting...") - sys.exit(1) - print("Failed to add vLan to bond, trying again...") - sleep(5) - # Clean up IP Reservations - manager.delete_ip(options.ipRes) - - -# Start program -if __name__ == "__main__": - main() diff --git a/templates/extend_datastore.sh b/templates/extend_datastore.sh deleted file mode 100644 index 4e73d52..0000000 --- a/templates/extend_datastore.sh +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/expect -f - -set ESXISERVERIP [lindex $argv 0] - -set PRIVATEKEY [lindex $argv 1] - - -set HEADPARTITION "" -set HEADPARTITIONTRUNC "" -set PARTITIONSTRUNC "" -set PARTITIONCOUNT 0 -set NEWPARTITION "" -set PARTITIONLISTLENGTH 0 -set SEDSTR "" -set MAXSECTOR 0 -set ALLEXTENTS 0 - -set FRMTDPRTTRUNC "" -set FRMTDPRTLIST "" -set FRMTDPRTLISTLTH 0 -set PRTSEDSTR "" - -set MAX_TRIES 120 -set bclineno 160 - -set timeout 20 -set timeout_min 30 -set timeout_avg 600 -set timeout_mid 3600 -set timeout_max 7200 - -################################################################################################## -# Description: -# This block of code format and then add all avaiable extent(s) to the datastore -# On Failure: -# The extent will not be added but continue to the next line of code -# On success: -# Continue add the next available extent -################################################################################################## - -spawn ssh -i $PRIVATEKEY -o StrictHostKeyChecking=no root@$ESXISERVERIP -expect -timeout $timeout_min * - -set expect_out(buffer) {} - -send "vmkfstools -P -h /vmfs/volumes/datastore1 | grep -E '(\\sn.*|\\st.*)' | sed 's/\\t//g' | sed 's/:.$//' | awk '{print}' | xargs\r" -expect -timeout $timeout_min -re "\n(naa.+|t10.+)\r" { - set FRMTDPRTTRUNC $expect_out(1,string) - set FRMTDPRTLIST [split $FRMTDPRTTRUNC " "] - set FRMTDPRTLISTLTH [llength $FRMTDPRTLIST] - set PRTSEDSTR "" - foreach PARTITIONNAME $FRMTDPRTLIST { - if {$FRMTDPRTLISTLTH > 1} { - append PRTSEDSTR "s/$PARTITIONNAME.*//g;" - } else { - append PRTSEDSTR "s/$PARTITIONNAME.*//g" - } - } - if {$FRMTDPRTLISTLTH > 1} { - set PRTSEDSTR [string trimright $PRTSEDSTR ";"] - } -} - -send "PARTITNAME=`vmkfstools -P -h /vmfs/volumes/datastore1 | grep -E '(\\sn.*|\\st.*)' | sed 's/\\t//g' | sed 's/:.$//' | xargs | awk '{print \$1}'` && EXTENTS=`ls -lrt /vmfs/devices/disks/ | awk '{print \$11}' | sed '-e $PRTSEDSTR' | sed '/^$/d' | wc -l` && echo ==\$EXTENTS==\r" -expect { - -re "\n==(\\d+)==\r" {set ALLEXTENTS $expect_out(1,string)} - timeout {set ALLEXTENTS 0} -} - -for {set count $ALLEXTENTS} {$count > 0} {incr count -1} { - -send "vmkfstools -P -h /vmfs/volumes/datastore1 | grep -E '(\\sn.*|\\st.*)' | sed 's/\\t//g' | sed 's/:.$//' | awk '{print}' | xargs\r" -expect -timeout $timeout_min -re "\n(naa.+|t10.+)\r" { - set PARTITIONSTRUNC $expect_out(1,string) - set PARTITIONLIST [split $PARTITIONSTRUNC " "] - set PARTITIONLISTLENGTH [llength $PARTITIONLIST] - set SEDSTR "" - - foreach PARTITIONNAME $PARTITIONLIST { - if {$PARTITIONLISTLENGTH > 1} { - append SEDSTR "s/$PARTITIONNAME.*//g;" - } else { - append SEDSTR "s/$PARTITIONNAME.*//g" - } - } - if {$PARTITIONLISTLENGTH > 1} { - set SEDSTR [string trimright $SEDSTR ";"] - } -} - -send "vmkfstools -P -h /vmfs/volumes/datastore1 | grep -E '(\\sn.*|\\st.*)' | sed 's/\\t//g' | awk 'FNR == 1 {print}'\r" -expect -timeout $timeout_min -re "\n(naa.+|t10.+)\r" { -set HEADPARTITION $expect_out(1,string) -} - -send "vmkfstools -P -h /vmfs/volumes/datastore1 | grep -E '(\\sn.*|\\st.*)' | sed 's/\\t//g' | sed 's/:.$//' | awk 'FNR == 1 {print}'\r" -expect -timeout $timeout_min -re "\n(naa.+|t10.+)\r" { -set HEADPARTITIONTRUNC $expect_out(1,string) -} - -set expect_out(buffer) {} - -send "ls -lrt /vmfs/devices/disks/ | awk '{print \$11}' | sed '-e $SEDSTR' | sed '/^$/d' | awk 'FNR == 1 {print}'\r" -expect -timeout $timeout_min -re "\n(naa.+|t10.+)\r" { -set PARTITION $expect_out(1,string) -} - -set expect_out(buffer) {} - -send "MAXSECTOR=\$(eval expr \$(partedUtil getptbl /vmfs/devices/disks/$PARTITION | tail -1 | awk '{print \$1 \" \\\\* \" \$2 \" \\\\* \" \$3}') - 1) && echo ==\$MAXSECTOR==\r" -expect -timeout $timeout_min -re "\n==(\\d+)==\r" { -set MAXSECTOR $expect_out(1,string) -} - -set expect_out(buffer) {} - -send "partedUtil setptbl /vmfs/devices/disks/$PARTITION gpt \"1 2048 $MAXSECTOR AA31E02A400F11DB9590000C2911D1B8 0\"\r" -expect -timeout $timeout_avg * - -set expect_out(buffer) {} - -send "ls -lrt /vmfs/devices/disks/ | awk '{print \$11}' | grep $PARTITION | awk 'FNR == 1 {print}'\r" -expect -timeout $timeout_min -re "\n(naa.+|t10.+)\r" { -set NEWPARTITION $expect_out(1,string) -} - -set expect_out(buffer) {} - -send "vmkfstools -Z /vmfs/devices/disks/$NEWPARTITION /vmfs/devices/disks/$HEADPARTITION\r" -expect -timeout $timeout_avg "Select a number from 0-1: " - -send "0\r" -expect -timeout $timeout_min * - -sleep 10 - -send "vmkfstools -P -h /vmfs/volumes/datastore1 | grep -E '(\\sn.*|\\st.*)' | sed 's/\\t//g' | sed 's/:.$//' | awk '{print}' | xargs\r" -expect -timeout $timeout_min -re "\n(naa.+|t10.+)\r" { - set FRMTDPRTTRUNC $expect_out(1,string) - set FRMTDPRTLIST [split $FRMTDPRTTRUNC " "] - set FRMTDPRTLISTLTH [llength $FRMTDPRTLIST] - set PRTSEDSTR "" - foreach PARTITIONNAME $FRMTDPRTLIST { - if {$FRMTDPRTLISTLTH > 1} { - append PRTSEDSTR "s/$PARTITIONNAME.*//g;" - } else { - append PRTSEDSTR "s/$PARTITIONNAME.*//g" - } - } - if {$FRMTDPRTLISTLTH > 1} { - set PRTSEDSTR [string trimright $PRTSEDSTR ";"] - } -} - -send "PARTITNAME=`vmkfstools -P -h /vmfs/volumes/datastore1 | grep -E '(\\sn.*|\\st.*)' | sed 's/\\t//g' | sed 's/:.$//' | xargs | awk '{print \$1}'` && EXTENTS=`ls -lrt /vmfs/devices/disks/ | awk '{print \$11}' | sed '-e $PRTSEDSTR' | sed '/^$/d' | wc -l` && echo ==\$EXTENTS==\r" -expect { - -re "\n==(\\d+)==\r" {set ALLEXTENTS $expect_out(1,string)} - timeout {set ALLEXTENTS 0} -} -} - -send "exit\r" -expect eof - - diff --git a/templates/l2tp_vpn.sh b/templates/l2tp_vpn.sh deleted file mode 100644 index c7460c9..0000000 --- a/templates/l2tp_vpn.sh +++ /dev/null @@ -1,519 +0,0 @@ -#!/bin/sh -# -# Script for automatic setup of an IPsec VPN server on Ubuntu LTS and Debian. -# Works on any dedicated server or virtual private server (VPS) except OpenVZ. -# -# DO NOT RUN THIS SCRIPT ON YOUR PC OR MAC! -# -# The latest version of this script is available at: -# https://github.com/hwdsl2/setup-ipsec-vpn -# -# Copyright (C) 2014-2019 Lin Song -# Based on the work of Thomas Sarlandie (Copyright 2012) -# -# This work is licensed under the Creative Commons Attribution-ShareAlike 3.0 -# Unported License: http://creativecommons.org/licenses/by-sa/3.0/ -# -# Attribution required: please include my name in any derivative and let me -# know how you have improved it! - -# ===================================================== - -# Define your own values for these variables -# - IPsec pre-shared key, VPN username and password -# - All values MUST be placed inside 'single quotes' -# - DO NOT use these special characters within values: \ " ' - -YOUR_IPSEC_PSK='${ipsec_psk}' -YOUR_USERNAME='${vpn_user}' -YOUR_PASSWORD='${vpn_pass}' - -# Important notes: https://git.io/vpnnotes -# Setup VPN clients: https://git.io/vpnclients - -# ===================================================== - -export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -SYS_DT=$(date +%F-%T) - -exiterr() { echo "Error: $1" >&2; exit 1; } -exiterr2() { exiterr "'apt-get install' failed."; } -conf_bk() { /bin/cp -f "$1" "$1.old-$SYS_DT" 2>/dev/null; } -bigecho() { echo; echo "## $1"; echo; } - -check_ip() { - IP_REGEX='^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$' - printf '%s' "$1" | tr -d '\n' | grep -Eq "$IP_REGEX" -} - -vpnsetup() { - -os_type=$(lsb_release -si 2>/dev/null) -if [ -z "$os_type" ]; then - [ -f /etc/os-release ] && os_type=$(. /etc/os-release && printf '%s' "$ID") - [ -f /etc/lsb-release ] && os_type=$(. /etc/lsb-release && printf '%s' "$DISTRIB_ID") -fi -if ! printf '%s' "$os_type" | head -n 1 | grep -qiF -e ubuntu -e debian -e raspbian; then - exiterr "This script only supports Ubuntu and Debian." -fi - -if [ "$(sed 's/\..*//' /etc/debian_version)" = "7" ]; then - exiterr "Debian 7 is not supported." -fi - -if [ -f /proc/user_beancounters ]; then - exiterr "OpenVZ VPS is not supported. Try OpenVPN: github.com/Nyr/openvpn-install" -fi - -if [ "$(id -u)" != 0 ]; then - exiterr "Script must be run as root. Try 'sudo sh $0'" -fi - -def_iface=$(route 2>/dev/null | grep -m 1 '^default' | grep -o '[^ ]*$') -[ -z "$def_iface" ] && def_iface=$(ip -4 route list 0/0 2>/dev/null | grep -m 1 -Po '(?<=dev )(\S+)') -def_state=$(cat "/sys/class/net/$def_iface/operstate" 2>/dev/null) -if [ -n "$def_state" ] && [ "$def_state" != "down" ]; then - if ! uname -m | grep -qi '^arm'; then - case "$def_iface" in - wl*) - exiterr "Wireless interface '$def_iface' detected. DO NOT run this script on your PC or Mac!" - ;; - esac - fi - NET_IFACE="$def_iface" -else - eth0_state=$(cat "/sys/class/net/eth0/operstate" 2>/dev/null) - if [ -z "$eth0_state" ] || [ "$eth0_state" = "down" ]; then - exiterr "Could not detect the default network interface." - fi - NET_IFACE=eth0 -fi - -[ -n "$YOUR_IPSEC_PSK" ] && VPN_IPSEC_PSK="$YOUR_IPSEC_PSK" -[ -n "$YOUR_USERNAME" ] && VPN_USER="$YOUR_USERNAME" -[ -n "$YOUR_PASSWORD" ] && VPN_PASSWORD="$YOUR_PASSWORD" - -if [ -z "$VPN_IPSEC_PSK" ] && [ -z "$VPN_USER" ] && [ -z "$VPN_PASSWORD" ]; then - bigecho "VPN credentials not set by user. Generating random PSK and password..." - VPN_IPSEC_PSK=$(LC_CTYPE=C tr -dc 'A-HJ-NPR-Za-km-z2-9' < /dev/urandom | head -c 20) - VPN_USER=vpnuser - VPN_PASSWORD=$(LC_CTYPE=C tr -dc 'A-HJ-NPR-Za-km-z2-9' < /dev/urandom | head -c 16) -fi - -if [ -z "$VPN_IPSEC_PSK" ] || [ -z "$VPN_USER" ] || [ -z "$VPN_PASSWORD" ]; then - exiterr "All VPN credentials must be specified. Edit the script and re-enter them." -fi - -if printf '%s' "$VPN_IPSEC_PSK $VPN_USER $VPN_PASSWORD" | LC_ALL=C grep -q '[^ -~]\+'; then - exiterr "VPN credentials must not contain non-ASCII characters." -fi - -case "$VPN_IPSEC_PSK $VPN_USER $VPN_PASSWORD" in - *[\\\"\']*) - exiterr "VPN credentials must not contain these special characters: \\ \" '" - ;; -esac - -bigecho "VPN setup in progress... Please be patient." - -# Create and change to working dir -mkdir -p /opt/src -cd /opt/src || exit 1 - -count=0 -APT_LK=/var/lib/apt/lists/lock -PKG_LK=/var/lib/dpkg/lock -while fuser "$APT_LK" "$PKG_LK" >/dev/null 2>&1 \ - || lsof "$APT_LK" >/dev/null 2>&1 || lsof "$PKG_LK" >/dev/null 2>&1; do - [ "$count" = "0" ] && bigecho "Waiting for apt to be available..." - [ "$count" -ge "60" ] && exiterr "Could not get apt/dpkg lock." - count=$((count+1)) - printf '%s' '.' - sleep 3 -done - -bigecho "Populating apt-get cache..." - -export DEBIAN_FRONTEND=noninteractive -apt-get -yq update || exiterr "'apt-get update' failed." - -bigecho "Installing packages required for setup..." - -apt-get -yq install wget dnsutils openssl \ - iptables iproute2 gawk grep sed net-tools || exiterr2 - -bigecho "Trying to auto discover IP of this server..." - -cat <<'EOF' -In case the script hangs here for more than a few minutes, -press Ctrl-C to abort. Then edit it and manually enter IP. -EOF - -[ -z "$PUBLIC_IP" ] && PUBLIC_IP=$(dig @resolver1.opendns.com -t A -4 myip.opendns.com +short) - -check_ip "$PUBLIC_IP" || PUBLIC_IP=$(wget -t 3 -T 15 -qO- http://ipv4.icanhazip.com) -check_ip "$PUBLIC_IP" || exiterr "Cannot detect this server's public IP. Edit the script and manually enter it." - -bigecho "Installing packages required for the VPN..." - -apt-get -yq install libnss3-dev libnspr4-dev pkg-config \ - libpam0g-dev libcap-ng-dev libcap-ng-utils libselinux1-dev \ - libcurl4-nss-dev flex bison gcc make libnss3-tools \ - libevent-dev ppp xl2tpd || exiterr2 - -bigecho "Installing Fail2Ban to protect SSH..." - -apt-get -yq install fail2ban || exiterr2 - -bigecho "Compiling and installing Libreswan..." - -SWAN_VER=3.29 -swan_file="libreswan-$SWAN_VER.tar.gz" -swan_url1="https://github.com/libreswan/libreswan/archive/v$SWAN_VER.tar.gz" -swan_url2="https://download.libreswan.org/$swan_file" -if ! { wget -t 3 -T 30 -nv -O "$swan_file" "$swan_url1" || wget -t 3 -T 30 -nv -O "$swan_file" "$swan_url2"; }; then - exit 1 -fi -/bin/rm -rf "/opt/src/libreswan-$SWAN_VER" -tar xzf "$swan_file" && /bin/rm -f "$swan_file" -cd "libreswan-$SWAN_VER" || exit 1 -cat > Makefile.inc.local <<'EOF' -WERROR_CFLAGS = -USE_DNSSEC = false -USE_DH31 = false -USE_NSS_AVA_COPY = true -USE_NSS_IPSEC_PROFILE = false -USE_GLIBC_KERN_FLIP_HEADERS = true -EOF -if [ "$(packaging/utils/lswan_detect.sh init)" = "systemd" ]; then - apt-get -yq install libsystemd-dev || exiterr2 -fi -NPROCS=$(grep -c ^processor /proc/cpuinfo) -[ -z "$NPROCS" ] && NPROCS=1 -make "-j$((NPROCS+1))" -s base && make -s install-base - -cd /opt/src || exit 1 -/bin/rm -rf "/opt/src/libreswan-$SWAN_VER" -if ! /usr/local/sbin/ipsec --version 2>/dev/null | grep -qF "$SWAN_VER"; then - exiterr "Libreswan $SWAN_VER failed to build." -fi - -bigecho "Creating VPN configuration..." - -L2TP_NET='192.168.42.0/24' -L2TP_LOCAL='192.168.42.1' -L2TP_POOL='192.168.42.10-192.168.42.250' -XAUTH_NET='192.168.43.0/24' -XAUTH_POOL='192.168.43.10-192.168.43.250' -DNS_SRV1=$L2TP_LOCAL -DNS_SRV2='8.8.4.4' -DNS_SRVS="\"$DNS_SRV1\"" -[ -n "$VPN_DNS_SRV1" ] && [ -z "$VPN_DNS_SRV2" ] && DNS_SRVS="$DNS_SRV1" - -# Create IPsec config -conf_bk "/etc/ipsec.conf" -cat > /etc/ipsec.conf < /etc/ipsec.secrets < /etc/xl2tpd/xl2tpd.conf < /etc/ppp/options.xl2tpd <> /etc/ppp/options.xl2tpd < /etc/ppp/chap-secrets < /etc/ipsec.d/passwd <> /etc/sysctl.conf </dev/null \ - || ! iptables -t nat -C POSTROUTING -s "$XAUTH_NET" -o "$NET_IFACE" -m policy --dir out --pol none -j MASQUERADE 2>/dev/null; then - ipt_flag=1 -fi - -# Add IPTables rules for VPN -if [ "$ipt_flag" = "1" ]; then - service fail2ban stop >/dev/null 2>&1 - iptables-save > "$IPT_FILE.old-$SYS_DT" - iptables -I INPUT 1 -p udp --dport 1701 -m policy --dir in --pol none -j DROP - iptables -I INPUT 2 -m conntrack --ctstate INVALID -j DROP - iptables -I INPUT 3 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - iptables -I INPUT 4 -p udp -m multiport --dports 500,4500 -j ACCEPT - iptables -I INPUT 5 -p udp --dport 1701 -m policy --dir in --pol ipsec -j ACCEPT - iptables -I INPUT 6 -p udp --dport 1701 -j DROP - iptables -I FORWARD 1 -m conntrack --ctstate INVALID -j DROP - iptables -I FORWARD 2 -i "$NET_IFACE" -o ppp+ -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - iptables -I FORWARD 3 -i ppp+ -o "$NET_IFACE" -j ACCEPT - iptables -I FORWARD 4 -i ppp+ -o ppp+ -s "$L2TP_NET" -d "$L2TP_NET" -j ACCEPT - iptables -I FORWARD 5 -i "$NET_IFACE" -d "$XAUTH_NET" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - iptables -I FORWARD 6 -s "$XAUTH_NET" -o "$NET_IFACE" -j ACCEPT - # Uncomment if you wish to disallow traffic between VPN clients themselves - # iptables -I FORWARD 2 -i ppp+ -o ppp+ -s "$L2TP_NET" -d "$L2TP_NET" -j DROP - # iptables -I FORWARD 3 -s "$XAUTH_NET" -d "$XAUTH_NET" -j DROP - #iptables -A FORWARD -j DROP - iptables -t nat -I POSTROUTING -s "$XAUTH_NET" -o "$NET_IFACE" -m policy --dir out --pol none -j MASQUERADE - iptables -t nat -I POSTROUTING -s "$L2TP_NET" -o "$NET_IFACE" -j MASQUERADE - echo "# Modified by hwdsl2 VPN script" > "$IPT_FILE" - iptables-save >> "$IPT_FILE" - - if [ -f "$IPT_FILE2" ]; then - conf_bk "$IPT_FILE2" - /bin/cp -f "$IPT_FILE" "$IPT_FILE2" - fi -fi - -bigecho "Enabling services on boot..." - -# Check for iptables-persistent -IPT_PST="/etc/init.d/iptables-persistent" -IPT_PST2="/usr/share/netfilter-persistent/plugins.d/15-ip4tables" -ipt_load=1 -if [ -f "$IPT_FILE2" ] && { [ -f "$IPT_PST" ] || [ -f "$IPT_PST2" ]; }; then - ipt_load=0 -fi - -if [ "$ipt_load" = "1" ]; then - mkdir -p /etc/network/if-pre-up.d -cat > /etc/network/if-pre-up.d/iptablesload <<'EOF' -#!/bin/sh -iptables-restore < /etc/iptables.rules -exit 0 -EOF - chmod +x /etc/network/if-pre-up.d/iptablesload - - if [ -f /usr/sbin/netplan ]; then - mkdir -p /etc/systemd/system -cat > /etc/systemd/system/load-iptables-rules.service <<'EOF' -[Unit] -Description = Load /etc/iptables.rules -DefaultDependencies=no - -Before=network-pre.target -Wants=network-pre.target - -Wants=systemd-modules-load.service local-fs.target -After=systemd-modules-load.service local-fs.target - -[Service] -Type=oneshot -ExecStart=/etc/network/if-pre-up.d/iptablesload - -[Install] -WantedBy=multi-user.target -EOF - systemctl enable load-iptables-rules 2>/dev/null - fi -fi - -for svc in fail2ban ipsec xl2tpd; do - update-rc.d "$svc" enable >/dev/null 2>&1 - systemctl enable "$svc" 2>/dev/null -done - -if ! grep -qs "hwdsl2 VPN script" /etc/rc.local; then - if [ -f /etc/rc.local ]; then - conf_bk "/etc/rc.local" - sed --follow-symlinks -i '/^exit 0/d' /etc/rc.local - else - echo '#!/bin/sh' > /etc/rc.local - fi -cat >> /etc/rc.local <<'EOF' - -# Added by hwdsl2 VPN script -(sleep 15 -service ipsec restart -service xl2tpd restart -echo 1 > /proc/sys/net/ipv4/ip_forward)& -exit 0 -EOF -fi - -bigecho "Starting services..." - -# Reload sysctl.conf -sysctl -e -q -p - -# Update file attributes -chmod +x /etc/rc.local -chmod 600 /etc/ipsec.secrets* /etc/ppp/chap-secrets* /etc/ipsec.d/passwd* - -# Apply new IPTables rules -iptables-restore < "$IPT_FILE" - -# Restart services -mkdir -p /run/pluto -service fail2ban restart 2>/dev/null -service ipsec restart 2>/dev/null -service xl2tpd restart 2>/dev/null - -cat < --user --pass " - "--vswitch --active-uplinks " - "--backup-uplinks ") - parser.add_option('--host', dest="host", action="store", help="IP or FQDN of the ESXi host") - parser.add_option('--user', dest="user", action="store", help="Username to authenticate to ESXi host") - parser.add_option('--pass', dest="pw", action="store", help="Password to authenticarte to ESXi host") - parser.add_option('--vswitch', dest="vswitch", action="store", help="vSwitch name to be modified") - parser.add_option('--active-uplinks', dest="active_uplinks", action="store", help="A comma seperated sting of active " - "uplinks to be added to the vSwitch") - parser.add_option('--backup-uplinks', dest="backup_uplinks", action="store", help="A comma seperated sting of backup " - "uplinks to be added to the vSwitch") - - options, _ = parser.parse_args() - if not (options.host and options.user and options.pw and options.vswitch and options.active_uplinks): - print("ERROR: Missing arguments") - parser.print_usage() - sys.exit(1) - - host = connect_to_host(options.host, options.user, options.pw) - host_network_system = host.configManager.networkSystem - for vswitch in host_network_system.networkInfo.vswitch: - if vswitch.name == options.vswitch: - vss_spec = vswitch.spec - print("Found correct vSwitch.") - break - if vss_spec is None: - print("Couldn't find the correct vSwitch.") - active_uplinks = options.active_uplinks.split(',') - backup_uplinks = options.backup_uplinks.split(',') - all_uplinks = active_uplinks + backup_uplinks - vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=all_uplinks) - vss_spec.policy.nicTeaming.nicOrder.activeNic = active_uplinks - vss_spec.policy.nicTeaming.nicOrder.standbyNic = backup_uplinks - host_network_system.UpdateVirtualSwitch(vswitchName=options.vswitch, spec=vss_spec) - - -if __name__ == "__main__": - main() diff --git a/templates/user_data.py b/templates/user_data.py deleted file mode 100644 index e062bc3..0000000 --- a/templates/user_data.py +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/python3 -import json -import apt -import os -import ipaddress -import urllib.request as urllib2 -import random - -# Vars from Terraform -private_subnets = '${private_subnets}' -private_vlans = '${private_vlans}' -public_subnets = '${public_subnets}' -public_vlans = '${public_vlans}' -public_cidrs = '${public_cidrs}' -domain_name = '${domain_name}' - -def words_list(): - word_site = "https://raw.githubusercontent.com/taikuukaits/SimpleWordlists/master/Wordlist-Nouns-Common-Audited-Len-3-6.txt" - response = urllib2.urlopen(word_site) - word_list = response.read().splitlines() - words = [] - for word in word_list: - if 4 <= len(word) <= 5: - words.append(word.decode().lower()) - return words - - -# Get random word list -words = words_list() - -# Allow -os.system("echo 'iptables-persistent iptables-persistent/autosave_v4 boolean true' | sudo debconf-set-selections") -os.system("echo 'iptables-persistent iptables-persistent/autosave_v6 boolean true' | sudo debconf-set-selections") - -# Disable systemd-resolved -os.system("systemctl stop systemd-resolved") -os.system("systemctl disable systemd-resolved") - -# Install Apt Packages -os.system("echo 'deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main' > /etc/apt/sources.list.d/google-cloud-sdk.list") -os.system("curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -") -os.system('DEBIAN_FRONTEND=noninteractive apt-get update -y') -os.system('DEBIAN_FRONTEND=noninteractive apt-get install -o Dpkg::Options::="--force-confold" --force-yes -y dnsmasq vlan iptables-persistent conntrack python3-pip expect unzip google-cloud-sdk') - -# Build single subnet map with all vlans, cidrs, etc... -subnets = json.loads(private_subnets) -private_vlans = json.loads(private_vlans) -public_subnets = json.loads(public_subnets) -public_vlans = json.loads(public_vlans) -public_cidrs = json.loads(public_cidrs) - -for i in range(0, len(private_vlans)): - subnets[i]['vlan'] = private_vlans[i] - -for i in range(0, len(public_vlans)): - public_subnets[i]['vlan'] = public_vlans[i] - public_subnets[i]['cidr'] = public_cidrs[i] - subnets.append(public_subnets[i]) - -# Wipe second Network Interface from config file -readFile = open("/etc/network/interfaces") -lines = readFile.readlines() -readFile.close() -for line in reversed(lines): - if "auto" in line: - split_line = line.split() - interface = split_line[-1] - break -lines = lines[:-5] - -# Ensure 8021q and remove the second interface from the bond -os.system("modprobe 8021q") -os.system("ifdown {}".format(interface)) - -# Make sure 8021q is loaded at startup -modules_file = open("/etc/modules-load.d/modules.conf", "a+") -modules_file.write("\n8021q\n") -modules_file.close() - -# Setup syctl parameters for routing -sysctl_file = open("/etc/sysctl.conf", "a+") -sysctl_file.write("\n\n#Routing parameters\n") -sysctl_file.write("net.ipv4.conf.all.rp_filter=0\n") -sysctl_file.write("net.ipv4.conf.default.rp_filter=0\n") -sysctl_file.write("net.ipv4.ip_forward=1\n") -sysctl_file.write("net.ipv4.tcp_mtu_probing=2\n") -sysctl_file.close() - -# Apply sysctl parameters -os.system("sysctl -p") - -# Remove old conf for second interface -interface_file = open('/etc/network/interfaces', 'w') -for line in lines: - interface_file.write(line) - -# Add new conf for second physical interface -interface_file.write("\nauto {}\n".format(interface)) -interface_file.write("iface {} inet manual\n".format(interface)) -interface_file.write("\tmtu 9000\n") - -# Open dnsmasq config for writing -dnsmasq_conf = open('/etc/dnsmasq.d/dhcp.conf', 'w') - -# Loop though all subnets and setup Interfaces, DNSMasq, & IPTables -for subnet in subnets: - if subnet['routable']: - # Find vCenter IP - if subnet['vsphere_service_type'] == 'management': - vcenter_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[1].compressed - management_gateway = list(ipaddress.ip_network(subnet['cidr']).hosts())[0].compressed - sed_cmd = "sed -i '1i nameserver " + management_gateway + "' /etc/resolv.conf" - os.system(sed_cmd) - # Gather network facts about this subnet - router_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[0].compressed - low_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[1].compressed - if 'reserved_ip_count' in subnet: - high_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[-subnet['reserved_ip_count']].compressed - else: - high_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[-1].compressed - - netmask = ipaddress.ip_network(subnet['cidr']).netmask.compressed - - # Setup vLan interface for this subnet - interface_file.write("\nauto {}.{}\n".format(interface, subnet['vlan'])) - interface_file.write("iface {}.{} inet static\n".format(interface, subnet['vlan'])) - interface_file.write("\taddress {}\n".format(router_ip)) - interface_file.write("\tnetmask {}\n".format(netmask)) - interface_file.write("\tvlan-raw-device {}\n".format(interface)) - interface_file.write("\tmtu 9000\n") - - # Generate random name for the network - word = random.choice(words) - words.remove(word) - - # Write dnsmasq dhcp scopes - dnsmasq_conf.write("dhcp-range=set:{},{},{},2h\n".format(word, low_ip, high_ip)) - dnsmasq_conf.write("dhcp-option=tag:{},option:router,{}\n".format(word, router_ip)) - - # Create NAT rule for this network if the network is tagged as NAT - if subnet['nat']: - os.system("iptables -t nat -A POSTROUTING -o bond0 -j MASQUERADE -s {}".format(subnet['cidr'])) - -interface_file.close() - -# Reserver the vCenter IP -dnsmasq_conf.write("\ndhcp-host=00:00:00:00:00:99, {} # vCenter IP\n".format(vcenter_ip)) - -dnsmasq_conf.close() - -# DNS record for vCenter -etc_hosts = open('/etc/hosts', 'a+') -etc_hosts.write('\n{}\tvcva\tvcva.{}\n'.format(vcenter_ip, domain_name)) -etc_hosts.close() - -# Add domain to host -resolv_conf = open('/etc/resolv.conf', 'a+') -resolv_conf.write('\ndomain {}\nsearch {}\n'.format(domain_name, domain_name)) -resolv_conf.close() - -# Block DNSMasq out the WAN -os.system("iptables -I INPUT -p udp --dport 67 -i bond0 -j DROP") -os.system("iptables -I INPUT -p udp --dport 53 -i bond0 -j DROP") -os.system("iptables -I INPUT -p tcp --dport 53 -i bond0 -j DROP") - -# Bring up newly configured interfaces -os.system("ifup --all") - -# Remove a saftey measure from dnsmasq that blocks VPN users from using DNS -os.system("sed -i 's/ --local-service//g' /etc/init.d/dnsmasq") - -# Restart dnsmasq service -os.system("systemctl restart dnsmasq") - -# Save iptables rules -os.system("iptables-save > /etc/iptables/rules.v4") - -# Install python modules -os.system("pip3 install --upgrade pip pyvmomi packet-python") - diff --git a/templates/vcva_template.json b/templates/vcva_template.json deleted file mode 100644 index 1a0508e..0000000 --- a/templates/vcva_template.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "__version": "2.13.0", - "new_vcsa": { - "esxi": { - "hostname": "__ESXI_IP__", - "username": "root", - "password": "${first_esx_pass}", - "deployment_network": "${vcenter_network} Net", - "datastore": "datastore1" - }, - "appliance": { - "thin_disk_mode": true, - "deployment_option": "small", - "name": "vcva" - }, - "network": { - "ip_family": "ipv4", - "mode": "static", - "ip": "__VCENTER_IP__", - "dns_servers": [ - "__MGMT_GATEWAY__" - ], - "prefix": "__MGMT_PREFIX_LENGTH__", - "gateway": "__MGMT_GATEWAY__", - "system_name": "vcva.${domain_name}" - }, - "os": { - "password": "${vcenter_password}", - "ntp_servers": "time.google.com", - "ssh_enable": true - }, - "sso": { - "password": "${sso_password}", - "domain_name": "vsphere.local" - } - }, - "ceip": { - "description": { - "__comments": [ - "++++VMware Customer Experience Improvement Program (CEIP)++++", - "VMware's Customer Experience Improvement Program (CEIP) ", - "provides VMware with information that enables VMware to ", - "improve its products and services, to fix problems, ", - "and to advise you on how best to deploy and use our ", - "products. As part of CEIP, VMware collects technical ", - "information about your organization's use of VMware ", - "products and services on a regular basis in association ", - "with your organization's VMware license key(s). This ", - "information does not personally identify any individual. ", - "", - "Additional information regarding the data collected ", - "through CEIP and the purposes for which it is used by ", - "VMware is set forth in the Trust & Assurance Center at ", - "http://www.vmware.com/trustvmware/ceip.html . If you ", - "prefer not to participate in VMware's CEIP for this ", - "product, you should disable CEIP by setting ", - "'ceip_enabled': false. You may join or leave VMware's ", - "CEIP for this product at any time. Please confirm your ", - "acknowledgement by passing in the parameter ", - "--acknowledge-ceip in the command line.", - "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" - ] - }, - "settings": { - "ceip_enabled": false - } - } -} diff --git a/templates/vsan_claim.py b/templates/vsan_claim.py deleted file mode 100644 index 26f8e21..0000000 --- a/templates/vsan_claim.py +++ /dev/null @@ -1,119 +0,0 @@ -import json -import ipaddress -import os -import sys -import subprocess -import socket -from pyVmomi import vim, vmodl -from pyVim import connect -import vsanapiutils -from operator import itemgetter, attrgetter -import requests -import ssl - - -# A large portion of this code was lifted from: https://github.com/storage-code/vsanDeploy/blob/master/vsanDeploy.py - - -def sizeof_fmt(num, suffix='B'): - for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) - - -def getClusterInstance(clusterName, serviceInstance): - content = serviceInstance.RetrieveContent() - searchIndex = content.searchIndex - datacenters = content.rootFolder.childEntity - for datacenter in datacenters: - cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) - if cluster is not None: - return cluster - return None - - -def CollectMultiple(content, objects, parameters, handleNotFound=True): - if len(objects) == 0: - return {} - result = None - pc = content.propertyCollector - propSet = [vim.PropertySpec( - type=objects[0].__class__, - pathSet=parameters - )] - - while result == None and len(objects) > 0: - try: - objectSet = [] - for obj in objects: - objectSet.append(vim.ObjectSpec(obj=obj)) - specSet = [vim.PropertyFilterSpec(objectSet=objectSet, propSet=propSet)] - result = pc.RetrieveProperties(specSet=specSet) - except vim.ManagedObjectNotFound as ex: - objects.remove(ex.obj) - result = None - - out = {} - for x in result: - out[x.obj] = {} - for y in x.propSet: - out[x.obj][y.name] = y.val - return out - - -# Terraform Vars -vcenter_fqdn = '${vcenter_fqdn}' -vcenter_user = '${vcenter_user}' -vcenter_pass = '${vcenter_pass}' - - -# Workaround for SSL verification for vSan API -requests.packages.urllib3.disable_warnings() -ssl._create_default_https_context = ssl._create_unverified_context -context = ssl.create_default_context() -context.check_hostname = False -context.verify_mode = ssl.CERT_NONE - - -si = connect.SmartConnectNoSSL(host=vcenter_fqdn, user=vcenter_user, pwd=vcenter_pass, port=443) -cluster = getClusterInstance('Packet-1', si) -vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context) -vsanClusterSystem = vcMos['vsan-cluster-config-system'] -vsanVcDiskManagementSystem = vcMos['vsan-disk-management-system'] -hostProps = CollectMultiple(si.content, cluster.host, ['name', 'configManager.vsanSystem', 'configManager.storageSystem']) -hosts = hostProps.keys() - -diskmap = {host: {'cache':[],'capacity':[]} for host in hosts} -cacheDisks = [] -capacityDisks = [] - -for host in hosts: - ssds = [result.disk for result in hostProps[host]['configManager.vsanSystem'].QueryDisksForVsan() if - result.state == 'eligible' and result.disk.ssd] - smallerSize = min([disk.capacity.block * disk.capacity.blockSize for disk in ssds]) - for ssd in ssds: - size = ssd.capacity.block * ssd.capacity.blockSize - if size == smallerSize: - diskmap[host]['cache'].append(ssd) - cacheDisks.append((ssd.displayName, sizeof_fmt(size), hostProps[host]['name'])) - else: - diskmap[host]['capacity'].append(ssd) - capacityDisks.append((ssd.displayName, sizeof_fmt(size), hostProps[host]['name'])) - -tasks = [] -for host,disks in diskmap.items(): - if len(disks['cache']) > len(disks['capacity']): - disks['cache'] = disks['cache'][:len(disks['capacity'])] - - dm = vim.VimVsanHostDiskMappingCreationSpec( - cacheDisks=disks['cache'], - capacityDisks=disks['capacity'], - creationType='allFlash', - host=host - ) - - task = vsanVcDiskManagementSystem.InitializeDiskMappings(dm) - tasks.append(task) - diff --git a/variables.tf b/variables.tf index 5e49f24..3331210 100644 --- a/variables.tf +++ b/variables.tf @@ -1,25 +1,26 @@ variable "auth_token" { - description = "This is your Packet API Auth token. This can also be specified with the TF_VAR_PACKET_AUTH_TOKEN shell environment variable." + description = "This is your Equinix Metal API Auth token. This can also be specified with the TF_VAR_PACKET_AUTH_TOKEN shell environment variable." type = string + sensitive = true } variable "organization_id" { - description = "your org ID" + description = "Your Equinix Metal Organization Id" type = string } variable "project_name" { - default = "anthos-on-packet-1" + default = "anthos-on-metal-1" } variable "create_project" { - description = "if true create the packet project, if not skip and use the provided project" + description = "if true create the Equinix Metal project, if not skip and use the provided project" default = true } variable "project_id" { - description = "Packet Project ID to use in case create_project is false" + description = "Equinix Metal Project ID to use in case create_project is false" default = "null" } @@ -35,6 +36,7 @@ Valid vsphere_service_types are: */ variable "private_subnets" { + description = "This is the network topology for your vSphere Env" default = [ { "name" = "Management", @@ -69,6 +71,7 @@ variable "private_subnets" { } variable "public_subnets" { + description = "This will dynamically create public subnets in vSphere" default = [ { "name" = "VM Public Net", @@ -81,59 +84,83 @@ variable "public_subnets" { } variable "router_hostname" { - default = "edge-gateway01" + description = "This is the hostname for the router." + default = "edge-gateway01" } variable "esxi_hostname" { - default = "esx" + description = "This is the hostname prefix for your esxi hosts. A number will be added to the end." + default = "esx" } variable "router_size" { - default = "c3.medium.x86" + description = "This is the size/plan/flavor of your router machine" + default = "c3.medium.x86" } variable "esxi_size" { - default = "c3.medium.x86" + description = "This is the size/plan/flavor of your ESXi machine(s)" + default = "c3.medium.x86" } variable "facility" { - default = "ny5" + description = "This is the Region/Location of your deployment." + default = "ny5" } variable "router_os" { - default = "ubuntu_18_04" + description = "This is the operating System for you router machine (Only Ubuntu 18.04 has been tested)" + default = "ubuntu_18_04" } variable "vmware_os" { - default = "vmware_esxi_6_7" + description = "This is the version of vSphere that you want to deploy (ESXi 6.5, 6.7, & 7.0 have been tested)" + default = "vmware_esxi_6_7" } variable "billing_cycle" { - default = "hourly" + description = "This is billing cycle to use. The hasn't beend built to allow reserved isntances yet." + default = "hourly" } variable "esxi_host_count" { - default = 3 + description = "This is the number of ESXi host you'd like in your cluster." + default = 3 } variable "vcenter_portgroup_name" { - default = "Management" + description = "This is the VM Portgroup you would like vCenter to be deployed to. See 'private_subnets' & 'public_subnets' above. By deploying on a public subnet, you will not need to use the VPN to access vCenter." + default = "Management" +} + +variable "vcenter_domain" { + description = "This will be the vSphere SSO domain." + default = "vsphere.local" } variable "domain_name" { - default = "packet.local" + description = "This is the domain to use for internal DNS" + default = "metal.local" } variable "vpn_user" { - default = "vm_admin" + description = "This is the username for the L2TP VPN" + default = "vm_admin" } variable "vcenter_datacenter_name" { - default = "Packet" + description = "This will be the name of the vCenter Datacenter object." + default = "Metal" } variable "vcenter_cluster_name" { - default = "Packet-1" + description = "This will be the name of the vCenter Cluster object." + default = "Metal-1" +} + +variable "vcenter_user_name" { + description = "This will be the admin user for vSphere SSO" + default = "Administrator" } variable "gcs_bucket_name" { @@ -149,22 +176,40 @@ variable "s3_bucket_name" { } variable "s3_access_key" { - default = "S3_ACCESS_KEY" + default = "S3_ACCESS_KEY" + sensitive = true } variable "s3_secret_key" { - default = "S3_SECRET_KEY" + default = "S3_SECRET_KEY" + sensitive = true } variable "s3_boolean" { default = "false" } -variable "vcenter_iso_name" { +variable "s3_version" { + description = "S3 API Version (S3v2, S3v4)" + default = "S3v4" +} + +variable "object_store_tool" { + description = "Which tool should you use to download objects from the object store? ('mc' and 'gcs' have been tested.)" + default = "mc" +} + +variable "object_store_bucket_name" { + description = "This is the name of the bucket on your Object Store" + default = "vmware" } -variable "storage_reader_key_name" { - default = "storage-reader-key.json" +variable "relative_path_to_gcs_key" { + description = "If you are using GCS to download you vCenter ISO this is the path to the GCS key" + default = "storage-reader-key.json" +} + +variable "vcenter_iso_name" { } variable "whitelisted_key_name" { diff --git a/versions.tf b/versions.tf new file mode 100644 index 0000000..5e4ce43 --- /dev/null +++ b/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.14" +}