diff --git a/README.md b/README.md index b8e69f79..ec0d1f92 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,8 @@ Name | Description [cloud.aws_ops.manage_transit_gateway](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/manage_transit_gateway/README.md)|A role to create/delete transit_gateway with vpc and vpn attachments. [cloud.aws_ops.deploy_flask_app](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/deploy_flask_app/README.md)|A role to deploy a flask web application on AWS. [cloud.aws_ops.create_rds_global_cluster](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/create_rds_global_cluster/README.md)|A role to create, delete aurora global cluster with a primary cluster and a replica cluster in different regions. +[cloud.aws_ops.clone_on_prem_vm](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/clone_on_prem_vm/README.md)|A role to clone an existing on prem VM using the KVM hypervisor. +[cloud.aws_ops.import_image_and_run_aws_instance](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/import_image_and_run_aws_instance/README.md)|A role that imports a local .raw image into an Amazon Machine Image (AMI) and run an AWS EC2 instance. ### Playbooks Name | Description @@ -38,6 +40,7 @@ Name | Description [cloud.aws_ops.eda](https://github.com/ansible-collections/cloud.aws_ops/blob/main/playbooks/README.md)|A set of playbooks to restore AWS Cloudtrail configurations, created for use with the [cloud.aws_manage_cloudtrail_encryption rulebook](https://github.com/ansible-collections/cloud.aws_ops/blob/main/extensions/eda/rulebooks/AWS_MANAGE_CLOUDTRAIL_ENCRYPTION.md). [cloud.aws_ops.webapp](https://github.com/ansible-collections/cloud.aws_ops/blob/main/playbooks/webapp/README.md)|A set of playbooks to create, delete, or migrate a webapp on AWS. [cloud.aws_ops.upload_file_to_s3](https://github.com/ansible-collections/cloud.aws_ops/blob/main/playbooks/UPLOAD_FILE_TO_S3.md)|A playbook to upload a local file to S3. +[cloud.aws_ops.move_vm_from_on_prem_to_aws](https://github.com/ansible-collections/cloud.aws_ops/blob/main/playbooks/move_vm_from_on_prem_to_aws/README.md)|A playbook to migrate an existing on prem VM running on KVM hypervisor to AWS. ### Rulebooks Name | Description diff --git a/galaxy.yml b/galaxy.yml index 509ff62e..6ed7a5ea 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -20,6 +20,7 @@ dependencies: amazon.aws: '>=5.1.0' community.aws: '>=5.0.0' amazon.cloud: '>=0.4.0' + community.libvirt: '>=1.2.0' version: 1.0.3 build_ignore: - .DS_Store diff --git a/playbooks/move_vm_from_on_prem_to_aws/README.md b/playbooks/move_vm_from_on_prem_to_aws/README.md new file mode 100644 index 00000000..57e9947a --- /dev/null +++ b/playbooks/move_vm_from_on_prem_to_aws/README.md @@ -0,0 +1,43 @@ +# cloud.aws_ops.move_vm_from_on_prem_to_aws playbooks + +A playbook to migrate an existing on prem VM running on KVM hypervisor to AWS. + +## Requirements + +This playbook uses the ``cloud.aws_ops.clone_on_prem_vm`` role to clone an existing VM on prem using the KVM hypervisor and the ``cloud.aws_ops.import_image_and_run_aws_instance`` role to import a local .raw image into an Amazon machine image (AMI) and run an AWS EC2 instance. For a complete list of requirements, see [clone_on_prem_vm](../clone_on_prem_vm/README.md#Requirements) and [import_image_and_run_aws_instance](../roles/import_image_and_run_aws_instance/REAME.md#Requirements), respectively. + + +## Playbook Variables + +For a full list of accepted variables see: [clone_on_prem_vm](../clone_on_prem_vm/README.md#Role-Variables) and respectively [import_image_and_run_aws_instance](../roles/import_image_and_run_aws_instance/REAME.md#Role-Variables). + +## Example Usage + +Create a `credentials.yml` file with the folling contents: + +```yaml +aws_access_key: "xxxxxxxxxxxxxxxxxxxx" +aws_secret_key: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +aws_region: "us-east-1" +``` + +Create an `inventory.yml` file with information about the host running the KVM hypervisor. + +```yaml +--- +all: + hosts: + kvm: + ansible_host: myhost + ansible_user: myuser + ansible_ssh_private_key_file: /path/to/private_key + groups: mygroup +``` + +All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars.yml`` file. + +Run the playbook: + +```shell +ansible-playbook cloud.aws_ops.move_vm_from_on_prem_to_aws.move_vm_from_on_prem_to_aws -e "@credentials.yml" -e "@vars.yml" -i inventory.yml +``` diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml new file mode 100644 index 00000000..d14b752e --- /dev/null +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -0,0 +1,36 @@ +- name: A playbook to migrate an existing on prem VM running on KVM hypervisor to AWS + hosts: localhost + gather_facts: false + + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key | default(omit) }}" + aws_secret_key: "{{ aws_secret_key | default(omit) }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region | default('us-east-1') }}" + + tasks: + - name: Import 'cloud.aws_ops.clone_on_prem_vm' role + ansible.builtin.import_role: + name: cloud.aws_ops.clone_on_prem_vm + vars: + clone_on_prem_vm_source_vm_name: "{{ clone_on_prem_vm_source_vm_name }}" + clone_on_prem_vm_image_name: "{{ clone_on_prem_vm_image_name }}" + clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" + clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" + clone_on_prem_vm_overwrite: "{{ clone_on_prem_vm_overwrite }}" + delegate_to: kvm + + - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role + ansible.builtin.import_role: + name: cloud.aws_ops.import_image_and_run_aws_instance + vars: + import_image_and_run_aws_instance_bucket_name: "{{ import_image_and_run_aws_instance_bucket_name }}" + import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_raw_image_path }}" + import_image_and_run_aws_instance_instance_name: "{{ import_image_and_run_aws_instance_instance_name }}" + import_image_and_run_aws_instance_instance_type: "{{ import_image_and_run_aws_instance_instance_type }}" + import_image_and_run_aws_instance_import_image_task_name: "{{ import_image_and_run_aws_instance_import_image_task_name }}" + import_image_and_run_aws_instances_keypair_name: "{{ import_image_and_run_aws_instances_keypair_name }}" + import_image_and_run_aws_instance_security_groups: "{{ import_image_and_run_aws_instance_security_groups }}" + import_image_and_run_aws_instance_vpc_subnet_id: "{{ import_image_and_run_aws_instance_vpc_subnet_id }}" + import_image_and_run_aws_instance_volumes: "{{ import_image_and_run_aws_instance_volumes }}" diff --git a/roles/clone_on_prem_vm/README.md b/roles/clone_on_prem_vm/README.md new file mode 100644 index 00000000..2a1da0ab --- /dev/null +++ b/roles/clone_on_prem_vm/README.md @@ -0,0 +1,77 @@ +clone_on_prem_vm +================ + +A role to clone an existing on prem VM using the KVM hypervisor. The role sets the **clone_on_prem_vm_raw_image_path** variable containing the path where the image was saved on localhost. This role requires privilege escalation because the .qcow2 file created by ``virt-clone`` is owned by root and ``qemu-img convert`` requires access to convert it to .raw. + +Requirements +------------ + +**qemu** and **qemu-img** packages installed. + +Role Variables +-------------- + +* **clone_on_prem_vm_source_vm_name**: (Required) The name of the on-prem VM you want to clone. +* **clone_on_prem_vm_image_name**: (Optional) The name you want to call the cloned image. If not set, the **clone_on_prem_vm_source_vm_name** will be used with a _-clone_ suffix. +* **clone_on_prem_vm_overwrite**: (Optional) Whether to overwrite or not an already existing on prem VM clone. Default: true. +* **clone_on_prem_vm_local_image_path**: (Optional) The path where you would like to save the image. If the path does not exists on localhost, the role will create it. If this parameter is not set, the role will save the image in a _~/tmp_ folder. +* **clone_on_prem_vm_uri**: (Optional) Libvirt connection uri. Default: "qemu:///system". + +Dependencies +------------ + +N/A + +Example Playbook +---------------- + +Create an `inventory.yml` file with information about the host running the KVM hypervisor. + +```yaml +--- +all: + hosts: + kvm: + ansible_host: myhost + ansible_user: myuser + ansible_ssh_private_key_file: /path/to/private_key + groups: mygroup +``` + +All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars.yml`` file. + +Create a ``playbook.yml`` file like this: + +``` +--- +- hosts: kvm + gather_facts: true + + tasks: + - name: Import 'cloud.aws_ops.clone_on_prem_vm' role + ansible.builtin.import_role: + name: cloud.aws_ops.clone_on_prem_vm + vars: + clone_on_prem_vm_source_vm_name: "{{ clone_on_prem_vm_source_vm_name }}" + clone_on_prem_vm_image_name: "{{ clone_on_prem_vm_image_name }}" + clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" + clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" +``` + +Run the playbook: + +```shell +ansible-playbook playbook.yml -i inventory.yml -e "@vars.yml" +``` + +License +------- + +GNU General Public License v3.0 or later + +See [LICENCE](https://github.com/ansible-collections/cloud.aws_ops/blob/main/LICENSE) to see the full text. + +Author Information +------------------ + +- Ansible Cloud Content Team diff --git a/roles/clone_on_prem_vm/defaults/main.yml b/roles/clone_on_prem_vm/defaults/main.yml new file mode 100644 index 00000000..4cbefde6 --- /dev/null +++ b/roles/clone_on_prem_vm/defaults/main.yml @@ -0,0 +1,3 @@ +--- +clone_on_prem_vm_uri: "qemu:///system" +clone_on_prem_vm_overwrite: true diff --git a/roles/clone_on_prem_vm/handlers/main.yml b/roles/clone_on_prem_vm/handlers/main.yml new file mode 100644 index 00000000..ab77c652 --- /dev/null +++ b/roles/clone_on_prem_vm/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Delete temporary directory + ansible.builtin.file: + state: absent + path: "{{ clone_on_prem_vm__tmpdir.path }}" diff --git a/roles/clone_on_prem_vm/tasks/main.yml b/roles/clone_on_prem_vm/tasks/main.yml new file mode 100644 index 00000000..8807f73f --- /dev/null +++ b/roles/clone_on_prem_vm/tasks/main.yml @@ -0,0 +1,144 @@ +--- +- name: Fail when 'clone_on_prem_vm_source_vm_name' is undefined + ansible.builtin.fail: + msg: The name of the VM you want to clone must be defined as clone_on_prem_vm_source_vm_name + when: clone_on_prem_vm_source_vm_name is undefined + +- name: Gather package facts + ansible.builtin.package_facts: + manager: auto + register: package_facts + +- name: qemu is not installed + debug: + msg: "qemu is not installed" + when: "'qemu' not in package_facts.ansible_facts.packages" + +- name: qemu-img is not installed + debug: + msg: "qemu-img is not installed" + when: "'qemu-img' not in package_facts.ansible_facts.packages" + +- name: Create temporary directory to create the clone in + ansible.builtin.tempfile: + state: directory + suffix: .storage + register: clone_on_prem_vm__tmpdir + notify: + - "Delete temporary directory" + +- name: Get information about the on prem VM + community.libvirt.virt: + command: info + name: "{{ clone_on_prem_vm_source_vm_name }}" + uri: "{{ clone_on_prem_vm_uri }}" + register: clone_on_prem_vm__vm_info + +- name: Fail when on prem VM does not exist + ansible.builtin.fail: + msg: "The on prem VM {{ clone_on_prem_vm_source_vm_name }} does not exist." + when: clone_on_prem_vm_source_vm_name not in clone_on_prem_vm__vm_info + +- name: Fail when on prem VM's state is destroyed + ansible.builtin.fail: + msg: "The VM {{ clone_on_prem_vm_source_vm_name }} has been destroyed." + when: clone_on_prem_vm__vm_info[clone_on_prem_vm_source_vm_name].state == "destroyed" + +- name: Set 'clone_on_prem_vm_image_name' varible + ansible.builtin.set_fact: + clone_on_prem_vm_image_name: "{{ clone_on_prem_vm_source_vm_name }}-clone" + when: clone_on_prem_vm_image_name is undefined + +- name: Check if domain exists + community.libvirt.virt: + name: "{{ clone_on_prem_vm_image_name }}" + command: info + uri: "{{ clone_on_prem_vm_uri }}" + register: clone_on_prem_vm__domain_info + +- name: Fail when a domain already exists + ansible.builtin.fail: + msg: "A domain {{ clone_on_prem_vm_image_name }} already exists. Please undefine it first or set clone_on_prem_vm_overwrite: true." + when: clone_on_prem_vm_image_name in clone_on_prem_vm__domain_info and clone_on_prem_vm_overwrite is false + +- name: Undefine domain + community.libvirt.virt: + name: "{{ clone_on_prem_vm_image_name }}" + command: undefine + when: clone_on_prem_vm_image_name in clone_on_prem_vm__domain_info and clone_on_prem_vm_overwrite is true + +- name: Ensure on prem VM is paused + community.libvirt.virt: + state: paused + name: "{{ clone_on_prem_vm_source_vm_name }}" + uri: "{{ clone_on_prem_vm_uri }}" + when: clone_on_prem_vm__vm_info[clone_on_prem_vm_source_vm_name].state == "running" + +- name: Set 'clone_on_prem_vm__clone_path' and 'clone_on_prem_vm__raw_image_path' + ansible.builtin.set_fact: + clone_on_prem_vm__clone_path: "{{ clone_on_prem_vm__tmpdir.path }}/{{ clone_on_prem_vm_image_name }}.qcow2" + clone_on_prem_vm__raw_image_path: "{{ clone_on_prem_vm__tmpdir.path }}/{{ clone_on_prem_vm_image_name }}.raw" + +- name: Cloning {{ clone_on_prem_vm_source_vm_name }} on prem VM + ansible.builtin.command: | + virt-clone --original {{ clone_on_prem_vm_source_vm_name }} \ + --name {{ clone_on_prem_vm_image_name }} \ + --file {{ clone_on_prem_vm__clone_path }} + environment: + LIBVIRT_DEFAULT_URI: "{{ clone_on_prem_vm_uri }}" + +- name: Get information about the clone + ansible.builtin.stat: + path: "{{ clone_on_prem_vm__clone_path }}" + register: clone_on_prem_vm__clone_info + +# Privilege escalation is needed because the .qcow2 file is owned by root +# when default hypervisor is used +- name: Convert qcow2 to raw using qemu-img with privilege escalation + ansible.builtin.command: | + qemu-img convert -f qcow2 -O raw \ + {{ clone_on_prem_vm__clone_path }} \ + {{ clone_on_prem_vm__raw_image_path }} + become: true + become_method: sudo + environment: + LIBVIRT_DEFAULT_URI: "{{ clone_on_prem_vm_uri }}" + when: clone_on_prem_vm__clone_info.stat.exists and clone_on_prem_vm__clone_info.stat.pw_name == "root" + +- name: Convert qcow2 to raw using qemu-img + ansible.builtin.command: | + qemu-img convert -f qcow2 -O raw \ + {{ clone_on_prem_vm__clone_path }} \ + {{ clone_on_prem_vm__raw_image_path }} + environment: + LIBVIRT_DEFAULT_URI: "{{ clone_on_prem_vm_uri }}" + when: clone_on_prem_vm__clone_info.stat.exists and clone_on_prem_vm__clone_info.stat.pw_name != "root" + +- name: Create temporary directory to localhost when clone_on_prem_vm_local_image_path is not set + ansible.builtin.tempfile: + state: directory + suffix: .storage + register: clone_on_prem_vm__dir_localhost + when: clone_on_prem_vm_local_image_path is undefined + delegate_to: localhost + +- name: Create directory if it does not exist + ansible.builtin.file: + path: "{{ clone_on_prem_vm_local_image_path }}" + state: directory + mode: 0775 + recurse: yes + register: clone_on_prem_vm__dir_localhost + when: clone_on_prem_vm_local_image_path is defined + delegate_to: localhost + +- name: Fetch the converted RAW image to localhost + ansible.builtin.fetch: + src: "{{ clone_on_prem_vm__raw_image_path }}" + dest: "{{ clone_on_prem_vm__dir_localhost.path }}" + validate_checksum: true + register: clone_on_prem_vm_fetch_to_localhost + +- name: Set 'clone_on_prem_vm_raw_image_path' + ansible.builtin.set_fact: + clone_on_prem_vm_raw_image_path: "{{ clone_on_prem_vm_fetch_to_localhost.dest }}" diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md new file mode 100644 index 00000000..e4ff6d3f --- /dev/null +++ b/roles/import_image_and_run_aws_instance/README.md @@ -0,0 +1,128 @@ +import_image_and_run_aws_instance +================================= + +A role that imports a local .raw image into an Amazon Machine Image (AMI) and run an AWS EC2 instance. + +Requirements +------------ + +VM Import requires a role to perform certain operations on your behalf. You must create a service role named vmimport with a trust relationship policy document that allows VM Import to assume the role, and you must attach an IAM policy to the role. + +AWS User Account with the following permissions: +* s3:GetBucketLocation +* s3:GetObject +* s3:ListBucket +* s3:GetBucketLocation +* s3:GetObject +* s3:ListBucket +* s3:PutObject +* s3:GetBucketAcl +* ec2:ModifySnapshotAttribute +* ec2:CopySnapshot +* ec2:RegisterImage +* ec2:Describe* +* ec2:RunInstances + +(Optional) To import resources encrypted using an AWS KMS key from AWS Key Management Service, add the following permissions: +* kms:CreateGrant +* kms:Decrypt +* kms:DescribeKey +* kms:Encrypt +* kms:GenerateDataKey* +* kms:ReEncrypt* + +Role Variables +-------------- + +* **import_image_and_run_aws_instance_import_image_task_name**: (Required) The name you want to assign to the AWS EC2 import image task. +* **import_image_and_run_aws_instance_bucket_name**: (Required) The name of the S3 bucket name where you want to upload the .raw image. +* **import_image_and_run_aws_instance_image_path**: (Required) The path where the .raw image is stored. +* **import_image_and_run_aws_instance_instance_name**: (Required) The name of the EC2 instance you want to create using the imported AMI. +* **import_image_and_run_aws_instance_instance_type**: The EC2 instance type you want to use. Default: "t2.micro". +* **import_image_and_run_aws_instances_keypair_name**: The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. +* **import_image_and_run_aws_instance_security_groups**: A list of security group IDs or names to associate to the EC2 instance. +* **import_image_and_run_aws_instance_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will choose the default zone of the default VPC. +* **import_image_and_run_aws_instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: + * **device_name** (str): The device name (for example, /dev/sdh or xvdh). + * **ebs** (dict): Parameters used to automatically set up EBS volumes when the instance is launched. + * **volume_type** (str): The volume type. Valid Values: standard, io1, io2, gp2, sc1, st1, gp3. + * **volume_size** (int): The size of the volume, in GiBs. + * **kms_key_id** (str): Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted. + * **iops** (str): The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. + * **delete_on_termination_** (bool): Indicates whether the EBS volume is deleted on instance termination. + +Dependencies +------------ + +- role: [aws_setup_credentials](../aws_setup_credentials/README.md) + +Example Playbook +---------------- +This role can be used together with the [cloud.aws_ops.clone_on_prem_vm](../clone_on_prem_vm/README.md) role as shown below. + +Create an `inventory.yml` file with information about the host running the KVM hypervisor. + +```yaml +--- +all: + hosts: + kvm: + ansible_host: myhost + ansible_user: myuser + ansible_ssh_private_key_file: /path/to/private_key + groups: mygroup +``` + +All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars.yml`` file. + +Create a ``playbook.yml`` file like this: + +``` +--- +- hosts: localhost + gather_facts: false + + tasks: + - name: Import 'cloud.aws_ops.clone_on_prem_vm' role + ansible.builtin.import_role: + name: cloud.aws_ops.clone_on_prem_vm + vars: + clone_on_prem_vm_source_vm_name: "{{ clone_on_prem_vm_source_vm_name }}" + clone_on_prem_vm_image_name: "{{ clone_on_prem_vm_image_name }}" + clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" + clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" + delegate_to: kvm + + - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role + ansible.builtin.import_role: + name: cloud.aws_ops.import_image_and_run_aws_instance + vars: + import_image_and_run_aws_instance_bucket_name: "{{ import_image_and_run_aws_instance_bucket_name }}" + import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_raw_image_path }}" + import_image_and_run_aws_instance_instance_name: "{{ import_image_and_run_aws_instance_instance_name }}" + import_image_and_run_aws_instance_instance_type: "{{ import_image_and_run_aws_instance_instance_type }}" + import_image_and_run_aws_instance_import_image_task_name: "{{ import_image_and_run_aws_instance_import_image_task_name }}" + import_image_and_run_aws_instances_keypair_name: "{{ import_image_and_run_aws_instances_keypair_name }}" + import_image_and_run_aws_instance_security_groups: "{{ import_image_and_run_aws_instance_security_groups }}" + import_image_and_run_aws_instance_vpc_subnet_id: "{{ import_image_and_run_aws_instance_vpc_subnet_id }}" + import_image_and_run_aws_instance_volumes: "{{ import_image_and_run_aws_instance_volumes }}" +``` + +Run the playbook: + +```shell +ansible-playbook playbook.yml -i inventory.yml -e "@vars.yml" +``` + + +License +------- + +GNU General Public License v3.0 or later + +See [LICENCE](https://github.com/ansible-collections/cloud.aws_ops/blob/main/LICENSE) to see the full text. + +Author Information +------------------ + +- Ansible Cloud Content Team diff --git a/roles/import_image_and_run_aws_instance/defaults/main.yml b/roles/import_image_and_run_aws_instance/defaults/main.yml new file mode 100644 index 00000000..f28fd776 --- /dev/null +++ b/roles/import_image_and_run_aws_instance/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for import_image_and_run_aws_instance diff --git a/roles/import_image_and_run_aws_instance/meta/main.yml b/roles/import_image_and_run_aws_instance/meta/main.yml new file mode 100644 index 00000000..6f4abcea --- /dev/null +++ b/roles/import_image_and_run_aws_instance/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: cloud.aws_ops.aws_setup_credentials diff --git a/roles/import_image_and_run_aws_instance/tasks/main.yml b/roles/import_image_and_run_aws_instance/tasks/main.yml new file mode 100644 index 00000000..e691db52 --- /dev/null +++ b/roles/import_image_and_run_aws_instance/tasks/main.yml @@ -0,0 +1,105 @@ +--- +- name: Run 'cloud.aws_ops.import_image_and_run_aws_instance' role + module_defaults: + group/aws: "{{ aws_setup_credentials__output }}" + + block: + - name: Fail when 'import_image_and_run_aws_instance_bucket_name' is undefined + ansible.builtin.fail: + msg: S3 bucket name to host the .raw image must be defined as import_image_and_run_aws_instance_bucket_name + when: import_image_and_run_aws_instance_bucket_name is undefined + + - name: Fail when 'import_image_and_run_aws_instance_image_path' is undefined + ansible.builtin.fail: + msg: The .raw image path must be defined as import_image_and_run_aws_instance_image_path + when: import_image_and_run_aws_instance_image_path is undefined + + - name: Fail when 'import_image_and_run_aws_instance_instance_name' is undefined + ansible.builtin.fail: + msg: The name to assign to the AWS EC2 instance must be defined as import_image_and_run_aws_instance_instance_name + when: import_image_and_run_aws_instance_instance_name is undefined + + - name: Fail when 'import_image_and_run_aws_instance_import_image_task_name' is undefined + ansible.builtin.fail: + msg: The name of the EC2 import image task must be defined as import_image_and_run_aws_instance_import_image_task_name + when: import_image_and_run_aws_instance_import_image_task_name is undefined + + - name: Get information about the S3 bucket + community.aws.s3_bucket_info: + name: "{{ import_image_and_run_aws_instance_bucket_name }}" + register: import_image_and_run_aws_instance__bucket_info + + - name: Fail when S3 bucket does not exist + ansible.builtin.fail: + msg: "The S3 bucket {{ import_image_and_run_aws_instance_bucket_name }} does not exist." + when: import_image_and_run_aws_instance__bucket_info.buckets | length == 0 + + - name: Check if an instance with the specified name already exists + amazon.aws.ec2_instance_info: + filters: + "tag:Name": "{{ import_image_and_run_aws_instance_instance_name }}" + instance-state-name: [ "running", "pending", "shutting-down", "stopping", "stopped"] + register: import_image_and_run_aws_instance__ec2_instance_info + + - name: Fail when an instance with the specified name already exists + ansible.builtin.fail: + msg: "An EC2 instance with name {{ import_image_and_run_aws_instance_instance_name }} already exists" + when: import_image_and_run_aws_instance__ec2_instance_info.instances | length == 1 + + - name: Set 'import_image_and_run_aws_instance__s3_object_key' variable + ansible.builtin.set_fact: + import_image_and_run_aws_instance__s3_object_key: "{{ import_image_and_run_aws_instance_bucket_name }}/{{ import_image_and_run_aws_instance_instance_name }}.raw" + + - name: Upload raw image to S3 bucket + amazon.aws.s3_object: + bucket: "{{ import_image_and_run_aws_instance_bucket_name }}" + object: "{{ import_image_and_run_aws_instance__s3_object_key }}" + src: "{{ import_image_and_run_aws_instance_image_path }}" + mode: put + + - name: Import image + amazon.aws.ec2_import_image: + state: present + task_name: "{{ import_image_and_run_aws_instance_import_image_task_name }}" + disk_containers: + - format: raw + user_bucket: + s3_bucket: "{{ import_image_and_run_aws_instance_bucket_name }}" + s3_key: "{{ import_image_and_run_aws_instance__s3_object_key }}" + register: import_image_and_run_aws_instance__import_result + + # image_id and snapshot_id are not available until the import image task is completed + - name: Check status of the import image task + amazon.aws.ec2_import_image_info: + filters: + - Name: "tag:Name" + Values: ["{{ import_image_and_run_aws_instance_import_image_task_name }}"] + - Name: "task-state" + Values: ["completed", "active"] + register: import_image_and_run_aws_instance__import_image_info + until: import_image_and_run_aws_instance__import_image_info.import_image[0].status == "completed" + delay: 10 + retries: 300 + + - name: Set 'import_image_and_run_aws_instance__ami_id' and 'import_image_and_run_aws_instance__snapshot_id' + ansible.builtin.set_fact: + import_image_and_run_aws_instance__ami_id: "{{ import_image_and_run_aws_instance__import_image_info.import_image[0].image_id }}" + import_image_and_run_aws_instance__snapshot_id: "{{ import_image_and_run_aws_instance__import_image_info.import_image[0].snapshot_details[0].snapshot_id }}" + + - name: Start EC2 instance + amazon.aws.ec2_instance: + name: "{{ import_image_and_run_aws_instance_instance_name }}" + instance_type: "{{ import_image_and_run_aws_instance_instance_type | default('t2.micro') }}" + key_name: "{{ import_image_and_run_aws_instances_keypair_name | default(omit) }}" + security_groups: "{{ import_image_and_run_aws_instance_security_groups | default(omit) }}" + vpc_subnet_id: "{{ import_image_and_run_aws_instance_vpc_subnet_id | default(omit) }}" + image_id: "{{ import_image_and_run_aws_instance__ami_id }}" + volumes: + - device_name: "{{ import_image_and_run_aws_instance_volumes.device_name | default('/dev/sda1') }}" + ebs: + volume_size: "{{ import_image_and_run_aws_instance_volumes.ebs.volume_size | default(omit) }}" + volume_type: "{{ import_image_and_run_aws_instance_volumes.ebs.volume_type | default(omit) }}" + iops: "{{ import_image_and_run_aws_instance_volumes.ebs.iops | default(omit) }}" + kms_key_id: "{{ import_image_and_run_aws_instance_volumes.ebs.kms_key_id | default(omit) }}" + delete_on_termination: "{{ import_image_and_run_aws_instance_volumes.ebs.delete_on_termination | default(true) }}" + snapshot_id: "{{ import_image_and_run_aws_instance__snapshot_id }}"