From e928a050d3cabda40e42052a8cf2ffa35eb8e230 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 12 Sep 2023 17:22:14 +0200 Subject: [PATCH 01/26] Add move_vm_from_on_prem_to_aws role Signed-off-by: Alina Buzachis --- roles/move_vm_from_on_prem_to_aws/README.md | 53 ++++++++++++ .../defaults/main.yml | 3 + .../handlers/main.yml | 6 ++ .../move_vm_from_on_prem_to_aws/meta/main.yml | 3 + .../tasks/clone_on_prem_vm.yml | 80 +++++++++++++++++++ .../import_image_and_start_ec2_instance.yml | 61 ++++++++++++++ .../tasks/main.yml | 52 ++++++++++++ .../move_vm_from_on_prem_to_aws/vars/main.yml | 2 + 8 files changed, 260 insertions(+) create mode 100644 roles/move_vm_from_on_prem_to_aws/README.md create mode 100644 roles/move_vm_from_on_prem_to_aws/defaults/main.yml create mode 100644 roles/move_vm_from_on_prem_to_aws/handlers/main.yml create mode 100644 roles/move_vm_from_on_prem_to_aws/meta/main.yml create mode 100644 roles/move_vm_from_on_prem_to_aws/tasks/clone_on_prem_vm.yml create mode 100644 roles/move_vm_from_on_prem_to_aws/tasks/import_image_and_start_ec2_instance.yml create mode 100644 roles/move_vm_from_on_prem_to_aws/tasks/main.yml create mode 100644 roles/move_vm_from_on_prem_to_aws/vars/main.yml diff --git a/roles/move_vm_from_on_prem_to_aws/README.md b/roles/move_vm_from_on_prem_to_aws/README.md new file mode 100644 index 00000000..5da32bdd --- /dev/null +++ b/roles/move_vm_from_on_prem_to_aws/README.md @@ -0,0 +1,53 @@ +Role Name +========= + +A role to migrate an existing on prem VM to AWS. + +Requirements +------------ + +AWS User Account with the following permissions: + + +Role Variables +-------------- + +* **move_vm_from_on_prem_to_aws_on_prem_vm_name**: (Required) The name of the on-prem VM you want to clone. +* **move_vm_from_on_prem_to_aws_bucket_name**: (Required) The name of the S3 bucket name where you want to upload the .raw image. +* **move_vm_from_on_prem_to_aws_on_prem_instance_name**: (Required) The name of the EC2 instance you want to create using the imported AMI. +* **move_vm_from_on_prem_to_aws_instance_type**: The EC2 instance type you want to use. Default: "t2.micro". +* **move_vm_from_on_prem_to_aws_keypair_name**: The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. +* **move_vm_from_on_prem_to_aws_security_group**: A list of security group IDs or names to assiciate to the EC2 instance. +* **move_vm_from_on_prem_to_aws_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. +* **move_vm_from_on_prem_to_aws_uri**: (Required) # Libvirt connection uri.Default: "qemu:///system". +* **move_vm_from_on_prem_to_aws_volumes**: A dictionary of a block device mappings, by default this will always use the AMI root device so the **move_vm_from_on_prem_to_aws_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys _device_name_, _ebs.volume_type_, _ebs.volume_size_, _ebs.kms_key_id_, _ebs.iops_, and _ebs.delete_on_termination_. + + +Dependencies +------------ + +- role: cloud.aws_ops.aws_setup_credentials + +Example Playbook +---------------- + + - hosts: localhost + + - ansible.builtin.import_role: + name: cloud.aws_ops.move_vm_from_on_prem_to_aws + vars: + move_vm_from_on_prem_to_aws_on_prem_vm_name: "test-vm" + move_vm_from_on_prem_to_aws_on_prem_bucket_name: "test-s3-bucket" + move_vm_from_on_prem_to_aws_on_prem_instance_name: "test-instance-name" + +License +------- + +GNU General Public License v3.0 or later + +See [LICENCE](https://github.com/ansible-collections/cloud.azure_roles/blob/main/LICENSE) to see the full text. + +Author Information +------------------ + +- Ansible Cloud Content Team diff --git a/roles/move_vm_from_on_prem_to_aws/defaults/main.yml b/roles/move_vm_from_on_prem_to_aws/defaults/main.yml new file mode 100644 index 00000000..0a84d886 --- /dev/null +++ b/roles/move_vm_from_on_prem_to_aws/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for move_vm_from_on_prem_to_aws +move_vm_from_on_prem_to_aws_uri: "qemu:///system" diff --git a/roles/move_vm_from_on_prem_to_aws/handlers/main.yml b/roles/move_vm_from_on_prem_to_aws/handlers/main.yml new file mode 100644 index 00000000..ccf1521f --- /dev/null +++ b/roles/move_vm_from_on_prem_to_aws/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for move_vm_from_on_prem_to_aws +- name: Delete temporary directory + ansible.builtin.file: + state: absent + path: "{{ move_vm_from_on_prem_to_aws__tmpdir.path }}" diff --git a/roles/move_vm_from_on_prem_to_aws/meta/main.yml b/roles/move_vm_from_on_prem_to_aws/meta/main.yml new file mode 100644 index 00000000..e8b3ab42 --- /dev/null +++ b/roles/move_vm_from_on_prem_to_aws/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: cloud.aws_ops.aws_setup_credentials diff --git a/roles/move_vm_from_on_prem_to_aws/tasks/clone_on_prem_vm.yml b/roles/move_vm_from_on_prem_to_aws/tasks/clone_on_prem_vm.yml new file mode 100644 index 00000000..4dd170c3 --- /dev/null +++ b/roles/move_vm_from_on_prem_to_aws/tasks/clone_on_prem_vm.yml @@ -0,0 +1,80 @@ +--- +- name: Get information about the on prem VM + community.libvirt.virt: + command: info + name: "{{ move_vm_from_on_prem_to_aws_on_prem_vm_name }}" + uri: "{{ move_vm_from_on_prem_to_aws_uri }}" + register: move_vm_from_on_prem_to_aw__vm_info + +- name: Fail when on prem VM does not exist + ansible.builtin.fail: + msg: "The on prem VM {{ move_vm_from_on_prem_to_aws_on_prem_vm_name }} does not exist." + when: move_vm_from_on_prem_to_aws_on_prem_vm_name not in move_vm_from_on_prem_to_aw__vm_info + +- name: Fail when on prem VM's state is destroyed + ansible.builtin.fail: + msg: "The VM {{ source_vm_name }} has been destroyed." + when: _result_vm_info[source_vm_name].state == "destroyed" + +- name: Ensure on prem VM is paused + community.libvirt.virt: + state: paused + name: "{{ move_vm_from_on_prem_to_aws_on_prem_vm_name }}" + uri: "{{ move_vm_from_on_prem_to_aws_uri }}" + when: move_vm_from_on_prem_to_aw__vm_info[move_vm_from_on_prem_to_aws_on_prem_vm_name].state == "running" + +- set_fact: + move_vm_from_on_prem_to_aws__clone_path: "{{ move_vm_from_on_prem_to_aws__tmpdir.path }}/{{ move_vm_from_on_prem_to_aws_aws_vm_name }}.qcow2" + move_vm_from_on_prem_to_aws__raw_image_path: "{{ move_vm_from_on_prem_to_aws__tmpdir.path }}/{{ move_vm_from_on_prem_to_aws_aws_vm_name }}.raw" + +- name: Cloning {{ move_vm_from_on_prem_to_aws_on_prem_vm_name }} on prem VM + command: | + virt-clone --original {{ move_vm_from_on_prem_to_aws_on_prem_vm_name }} \ + --name {{ move_vm_from_on_prem_to_aws_aws_vm_name }} \ + --file {{ move_vm_from_on_prem_to_aws__clone_path }} + environment: + LIBVIRT_DEFAULT_URI: "{{ move_vm_from_on_prem_to_aws_uri }}" + +- name: Get information about the clone + stat: + path: "{{ move_vm_from_on_prem_to_aws__clone_path }}" + register: move_vm_from_on_prem_to_aws__clone_info + +# Priviledge escalation is needed because the .qcow2 file is owned by root +# when default hypervisor is used +- name: Convert qcow2 to raw using qemu-img with priviledge escalation + command: | + qemu-img convert -f qcow2 -O raw \ + {{ move_vm_from_on_prem_to_aws__clone_path }} \ + {{ move_vm_from_on_prem_to_aws__raw_image_path }} + become: true + become_method: sudo + environment: + LIBVIRT_DEFAULT_URI: "{{ move_vm_from_on_prem_to_aws_uri }}" + when: move_vm_from_on_prem_to_aws__clone_info.stat.exists and move_vm_from_on_prem_to_aws__clone_info.stat.pw_name == "root" + +- name: Convert qcow2 to raw using qemu-img + command: | + qemu-img convert -f qcow2 -O raw \ + {{ move_vm_from_on_prem_to_aws__clone_path }} \ + {{ move_vm_from_on_prem_to_aws__raw_image_path }} + environment: + LIBVIRT_DEFAULT_URI: "{{ move_vm_from_on_prem_to_aws_uri }}" + when: move_vm_from_on_prem_to_aws__clone_info.stat.exists and move_vm_from_on_prem_to_aws__clone_info.stat.pw_name != "root" + +- name: Create temporary directory to fetch the raw in on localhost + ansible.builtin.tempfile: + state: directory + suffix: .storage + register: move_vm_from_on_prem_to_aws__tmpdir_localhost + notify: + - 'Delete temporary directory' + delegate_to: localhost + +- name: Fetch the converted RAW image to localhost + ansible.builtin.fetch: + src: "{{ move_vm_from_on_prem_to_aws__raw_image_path }}" + dest: "{{ move_vm_from_on_prem_to_aws__tmpdir_localhost.path }}" + flat: yes + fail_on_missing: yes + validate_checksum: true diff --git a/roles/move_vm_from_on_prem_to_aws/tasks/import_image_and_start_ec2_instance.yml b/roles/move_vm_from_on_prem_to_aws/tasks/import_image_and_start_ec2_instance.yml new file mode 100644 index 00000000..3930ac78 --- /dev/null +++ b/roles/move_vm_from_on_prem_to_aws/tasks/import_image_and_start_ec2_instance.yml @@ -0,0 +1,61 @@ +--- +- name: Get information about the S3 bucket + community.aws.s3_bucket_info: + name: "{{ move_vm_from_on_prem_to_aws_bucket_name }}" + register: move_vm_from_on_prem_to_aws__bucket_info + +- set_fact: + s3_object_key: "{{ move_vm_from_on_prem_to_aws_bucket_name }}/{{ move_vm_from_on_prem_to_aws_aws_vm_name }}.qcow2" + +- name: Fail when S3 bucket name does not exist + ansible.builtin.fail: + msg: "The on prem VM {{ move_vm_from_on_prem_to_aws_on_prem_bucket_name }} does not exist." + when: move_vm_from_on_prem_to_aws__bucket_info.bucket_list | length == 0 + +- name: Upload raw image to S3 bucket + amazon.aws.s3_object: + bucket: "{{ move_vm_from_on_prem_to_aws_bucket_name }}" + object: "{{ s3_object_key }}" + src: "{{ move_vm_from_on_prem_to_aws__raw_image_path }}" + mode: put + +- name: Import image + amazon.aws.ec2_import_image: + state: present + task_name: "{{ move_vm_from_on_prem_to_aws_import_image_task_name }}" + disk_containers: + - format: raw + user_bucket: + s3_bucket: "{{ move_vm_from_on_prem_to_aws_bucket_name }}" + s3_key: "{{ s3_object_key }}" + +- name: Check status of the import image task + amazon.aws.ec2_import_image_info: + filters: + - Name: "tag:Name" + Values: ["{{ move_vm_from_on_prem_to_aws_import_image_task_name }}"] + - Name: "task-state" + Values: ["completed", "active"] + register: move_vm_from_on_prem_to_aws__import_image_info + +- set_fact: + ami_id: "{{ move_vm_from_on_prem_to_aws__import_image_info.import_image[0].image_id }}" + snapshot_id: "{{ move_vm_from_on_prem_to_aws__import_image_info.import_image[0].snapshot_details[0].snapshot_id }}" + +- name: Start EC2 instance + amazon.aws.ec2_instance: + name: "{{ move_vm_from_on_prem_to_aws_instance_name | default('t2.micro')}}" + instance_type: "{{ move_vm_from_on_prem_to_aws_instance_type }}" + key_name: "{{ move_vm_from_on_prem_to_aws_keypair_name | default(omit) }}" + security_group_ids: ["{{ move_vm_from_on_prem_to_aws_security_group | default(omit) }}"] + vpc_subnet_id: "{{ move_vm_from_on_prem_to_aws_vpc_subnet_id | default(omit) }}" + image_id: "{{ ami_id }}" + volumes: + - device_name: "{{ move_vm_from_on_prem_to_aws_volumes.device_name | default('/dev/sda1') }}" + ebs: + volume_size: "{{move_vm_from_on_prem_to_aws_volumes.ebs.volume_size | default(omit) }}" + volume_type: "{{ move_vm_from_on_prem_to_aws_volumes.ebs.volume_type | default(omit) }}" + iops: "{{ move_vm_from_on_prem_to_aws_volumes.ebs.iops | default(omit) }}" + kms_key_id: "{{ move_vm_from_on_prem_to_aws_volumes.ebs.kms_key_id | default(omit) }}" + delete_on_termination: "{{ move_vm_from_on_prem_to_aws_volumes.ebs.delete_on_termination | default(true) }}" + snapshot_id: "{{ snapshot_id }}" diff --git a/roles/move_vm_from_on_prem_to_aws/tasks/main.yml b/roles/move_vm_from_on_prem_to_aws/tasks/main.yml new file mode 100644 index 00000000..bf3ebdc4 --- /dev/null +++ b/roles/move_vm_from_on_prem_to_aws/tasks/main.yml @@ -0,0 +1,52 @@ +--- +# tasks file for move_vm_from_on_prem_to_aws + +- name: Fail when 'move_vm_from_on_prem_to_aws_on_prem_vm_name' is undefined + ansible.builtin.fail: + msg: Source on prem VM name should be defined as move_vm_from_on_prem_to_aws_on_prem_vm_name + when: move_vm_from_on_prem_to_aws_on_prem_vm_name is undefined + +- name: Fail when 'move_vm_from_on_prem_to_aws_bucket_name' is undefined + ansible.builtin.fail: + msg: Source on prem VM name should be defined as move_vm_from_on_prem_to_aws_bucket_name + when: move_vm_from_on_prem_to_aws_bucket_name is undefined + +- name: Fail when 'move_vm_from_on_prem_to_aws_on_prem_instance_name' is undefined + ansible.builtin.fail: + msg: "The EC2 instance name {{ move_vm_from_on_prem_to_aws_on_prem_instance_name }} should be defined" + when: move_vm_from_on_prem_to_aws_on_prem_instance_name is undefined + +- set_fact: + move_vm_from_on_prem_to_aws_aws_vm_name: "{{move_vm_from_on_prem_to_aws_aws_vm_name | default(move_vm_from_on_prem_to_aws_on_prem_vm_name)}}" + +- name: Ensure qemu is installed + ansible.builtin.package: + name: "qemu" + update_homebrew: True + when: ansible_facts.os_family == "Darwin" + +- name: Ensure qemu-img is installed + ansible.builtin.package: + name: "{{ 'qemu-img' if ansible_facts.os_family == 'RedHat' else 'qemu'}}" + update_cache: "{{ True if ansible_facts.pkg_mgr == 'apt' else omit }}" + when: ansible_facts.os_family != "Darwin" + become: true + +- name: Create temporary directory to create the clone in + ansible.builtin.tempfile: + state: directory + suffix: .storage + register: move_vm_from_on_prem_to_aws__tmpdir + notify: + - "Delete temporary directory" + +- name: Move on prem VM to an AWS EC2 instamce + module_defaults: + group/gcp: "{{ aws_setup_credentials__output }}" + block: + + - name: Include tasks 'clone_on_prem_vm.yml' + ansible.builtin.include_tasks: clone_one_prem_vm.yml + + - name: Include tasks 'import_image_and_start_ec2_instance.yml.yml' + ansible.builtin.include_tasks: import_image_and_start_ec2_instance.yml.yml diff --git a/roles/move_vm_from_on_prem_to_aws/vars/main.yml b/roles/move_vm_from_on_prem_to_aws/vars/main.yml new file mode 100644 index 00000000..6c37e80d --- /dev/null +++ b/roles/move_vm_from_on_prem_to_aws/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for move_vm_from_on_prem_to_aws From 42abee20e97e1b227f2fb0bca2645437216bae76 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 19 Sep 2023 16:10:17 +0200 Subject: [PATCH 02/26] Rebase Signed-off-by: Alina Buzachis --- README.md | 3 + .../move_vm_from_on_prem_to_aws/README.md | 78 ++++++++++ .../move_vm_from_on_prem_to_aws.yml | 42 ++++++ .../move_vm_from_on_prem_to_aws/vars/main.yml | 14 ++ roles/clone_on_prem_vm/README.md | 69 +++++++++ roles/clone_on_prem_vm/defaults/main.yml | 3 + roles/clone_on_prem_vm/handlers/main.yml | 5 + roles/clone_on_prem_vm/tasks/main.yml | 141 ++++++++++++++++++ .../README.md | 111 ++++++++++++++ .../defaults/main.yml | 2 + .../meta/main.yml | 1 - .../tasks/main.yml | 100 +++++++++++++ roles/move_vm_from_on_prem_to_aws/README.md | 53 ------- .../defaults/main.yml | 3 - .../handlers/main.yml | 6 - .../tasks/clone_on_prem_vm.yml | 80 ---------- .../import_image_and_start_ec2_instance.yml | 61 -------- .../tasks/main.yml | 52 ------- .../move_vm_from_on_prem_to_aws/vars/main.yml | 2 - 19 files changed, 568 insertions(+), 258 deletions(-) create mode 100644 playbooks/move_vm_from_on_prem_to_aws/README.md create mode 100644 playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml create mode 100644 playbooks/move_vm_from_on_prem_to_aws/vars/main.yml create mode 100644 roles/clone_on_prem_vm/README.md create mode 100644 roles/clone_on_prem_vm/defaults/main.yml create mode 100644 roles/clone_on_prem_vm/handlers/main.yml create mode 100644 roles/clone_on_prem_vm/tasks/main.yml create mode 100644 roles/import_image_and_run_aws_instance/README.md create mode 100644 roles/import_image_and_run_aws_instance/defaults/main.yml rename roles/{move_vm_from_on_prem_to_aws => import_image_and_run_aws_instance}/meta/main.yml (93%) create mode 100644 roles/import_image_and_run_aws_instance/tasks/main.yml delete mode 100644 roles/move_vm_from_on_prem_to_aws/README.md delete mode 100644 roles/move_vm_from_on_prem_to_aws/defaults/main.yml delete mode 100644 roles/move_vm_from_on_prem_to_aws/handlers/main.yml delete mode 100644 roles/move_vm_from_on_prem_to_aws/tasks/clone_on_prem_vm.yml delete mode 100644 roles/move_vm_from_on_prem_to_aws/tasks/import_image_and_start_ec2_instance.yml delete mode 100644 roles/move_vm_from_on_prem_to_aws/tasks/main.yml delete mode 100644 roles/move_vm_from_on_prem_to_aws/vars/main.yml diff --git a/README.md b/README.md index b8e69f79..ec0d1f92 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,8 @@ Name | Description [cloud.aws_ops.manage_transit_gateway](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/manage_transit_gateway/README.md)|A role to create/delete transit_gateway with vpc and vpn attachments. [cloud.aws_ops.deploy_flask_app](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/deploy_flask_app/README.md)|A role to deploy a flask web application on AWS. [cloud.aws_ops.create_rds_global_cluster](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/create_rds_global_cluster/README.md)|A role to create, delete aurora global cluster with a primary cluster and a replica cluster in different regions. +[cloud.aws_ops.clone_on_prem_vm](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/clone_on_prem_vm/README.md)|A role to clone an existing on prem VM using the KVM hypervisor. +[cloud.aws_ops.import_image_and_run_aws_instance](https://github.com/ansible-collections/cloud.aws_ops/blob/main/roles/import_image_and_run_aws_instance/README.md)|A role that imports a local .raw image into an Amazon Machine Image (AMI) and run an AWS EC2 instance. ### Playbooks Name | Description @@ -38,6 +40,7 @@ Name | Description [cloud.aws_ops.eda](https://github.com/ansible-collections/cloud.aws_ops/blob/main/playbooks/README.md)|A set of playbooks to restore AWS Cloudtrail configurations, created for use with the [cloud.aws_manage_cloudtrail_encryption rulebook](https://github.com/ansible-collections/cloud.aws_ops/blob/main/extensions/eda/rulebooks/AWS_MANAGE_CLOUDTRAIL_ENCRYPTION.md). [cloud.aws_ops.webapp](https://github.com/ansible-collections/cloud.aws_ops/blob/main/playbooks/webapp/README.md)|A set of playbooks to create, delete, or migrate a webapp on AWS. [cloud.aws_ops.upload_file_to_s3](https://github.com/ansible-collections/cloud.aws_ops/blob/main/playbooks/UPLOAD_FILE_TO_S3.md)|A playbook to upload a local file to S3. +[cloud.aws_ops.move_vm_from_on_prem_to_aws](https://github.com/ansible-collections/cloud.aws_ops/blob/main/playbooks/move_vm_from_on_prem_to_aws/README.md)|A playbook to migrate an existing on prem VM running on KVM hypervisor to AWS. ### Rulebooks Name | Description diff --git a/playbooks/move_vm_from_on_prem_to_aws/README.md b/playbooks/move_vm_from_on_prem_to_aws/README.md new file mode 100644 index 00000000..163965a9 --- /dev/null +++ b/playbooks/move_vm_from_on_prem_to_aws/README.md @@ -0,0 +1,78 @@ +# cloud.aws_ops.move_vm_from_on_prem_to_aws playbooks + +A playbook to migrate an existing on prem VM running on KVM hypervisor to AWS. + +## Requirements + +VM Import requires a role to perform certain operations on your behalf. You must create a service role named vmimport with a trust relationship policy document that allows VM Import to assume the role, and you must attach an IAM policy to the role. + +AWS User Account with the following permissions: +* s3:GetBucketLocation +* s3:GetObject +* s3:ListBucket +* s3:GetBucketLocation +* s3:GetObject +* s3:ListBucket +* s3:PutObject +* s3:GetBucketAcl +* ec2:ModifySnapshotAttribute +* ec2:CopySnapshot +* ec2:RegisterImage +* ec2:Describe* +* ec2:RunInstances + +(Optional) To import resources encrypted using an AWS KMS key from AWS Key Management Service, add the following permissions: +* kms:CreateGrant +* kms:Decrypt +* kms:DescribeKey +* kms:Encrypt +* kms:GenerateDataKey* +* kms:ReEncrypt* + +## Playbook Variables + +### Needed for the cloud.aws_ops.clone_on_prem_vm role + +* **on_prem_source_vm_name** (str): (Required) The name of the on-prem VM you want to clone. +* **on_prem_vm_clone_name** (str): (Optional) The name you want to call the cloned image. If not set, the the **on_prem_vm_clone_name** will be used with a _-clone_ suffix. +* **uri** (str): (Optional) Libvirt connection uri. Default: "qemu:///system". +* **overwrite_clone** (bool): (Optional) Weather to overwrite or not an already existing on prem VM clone. Default: true. + +### Needed for the cloud.aws_ops.clone_on_prem_vm role + +* **aws_access_key** (str): (Required) AWS access key ID for user account with the above permissions +* **aws_secret_key** (str): (Required) AWS secret access key for user account with the above permissions +* **aws_region** (str): (Required) AWS region in which to run the EC2 instance +* **security_token** (str): (Optional) Security token for AWS session authentication +* **s3_bucket_name** (str): (Required) The name of the S3 bucket name where you want to upload the .raw image. It must exist in the region the instance is created. +* **import_task_name** (str): (Required) The name you want to assign to the AWS EC2 import image task. +* **image_path** (str): (Required) The path where the .raw image is stored. +* **instance_name** (str): (Required) The name of the EC2 instance you want to create using the imported AMI. +* **instance_type** (str): (Optional) The EC2 instance type you want to use. Default: "t2.micro". +* **keypair_name** (str): (Optional) The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. +* **security_groups** (list): (Optional) A list of security group IDs or names to assiciate to the EC2 instance. +* **vpc_subnet_id** (str): (Optional) The subnet ID in which to launch the EC2 instance instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. +* **instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys _device_name_, _ebs.volume_type_, _ebs.volume_size_, _ebs.kms_key_id_, _ebs.iops_, and _ebs.delete_on_termination_. + +* **kvm_host** (str): Information about the host running the KVM hypervisr that are dynamically added to the inventory. + * **name**: This is a user-defined name for the host you are adding to the inventory. + * **ansible_host**: This variable specifies the hostname or IP address of the host you are adding to the inventory. + * **ansible_user**: This variable specifies the SSH username that Ansible should use when connecting to the host. + * **ansible_ssh_private_key_file** This variable specifies the path to the SSH private key file that Ansible should use for authentication when connecting to the host. + * **groups** This variable enabled you to assign the newly added host to one or more groups in the inventory. + +## Example Usage + +Create a `credentials.yaml` file with the folling contents: + +```yaml +aws_access_key: "xxxxxxxxxxxxxxxxxxxx" +aws_secret_key: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +aws_region: "us-east-1" +``` + +To migrate an existing on prem VM running on KVM hypervisor to AWS, run: + +```bash +ansible-playbook move_vm_from_on_prem_to_aws.yml -e "@credentials.yaml" +``` diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml new file mode 100644 index 00000000..617692d0 --- /dev/null +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -0,0 +1,42 @@ +- name: A playbook to migrate an existing on prem VM running on KVM hypervisor to AWS + hosts: localhost + gather_facts: false + + vars_files: + - vars/main.yml + + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key | default(omit) }}" + aws_secret_key: "{{ aws_secret_key | default(omit) }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region | default('us-east-1') }}" + + tasks: + - name: Add host to inventory + ansible.builtin.add_host: + name: "{{ kvm_host.name }}" + ansible_host: "{{ kvm_host.ip }}" + ansible_user: "{{ kvm_host.ansible_user }}" + ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} + groups: "{{ kvm_host.groups }}" + + - name: Import 'cloud.aws_ops.clone_on_prem_vm' role + ansible.builtin.import_role: + name: cloud.aws_ops.clone_on_prem_vm + vars: + clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" + clone_on_prem_vm_dest_vm_name: "{{ on_prem_vm_clone_name }}" + clone_on_prem_vm_uri: "{{ uri }}" + clone_on_prem_vm_local_image_path: "{{ local_image_path }}" + delegate_to: kvm + + - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role + ansible.builtin.import_role: + name: cloud.aws_ops.import_image_and_run_aws_instance + vars: + import_image_and_run_aws_instance_bucket_name: "{{ s3_bucket_name }}" + import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_local_image_path }}" + import_image_and_run_aws_instance_instance_name: "{{ instance_name }}" + import_image_and_run_aws_instance_instance_type: "{{ instance_type }}" + import_image_and_run_aws_instance_import_image_task_name: "{{ import_task_name }}" diff --git a/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml b/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml new file mode 100644 index 00000000..df25925b --- /dev/null +++ b/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml @@ -0,0 +1,14 @@ +on_prem_source_vm_name: "ubuntu-guest" +on_prem_vm_clone_name: "ubuntu-guest-clone" +s3_bucket_name: "clone-vm-s3-bucket" +instance_name: "ubuntu-vm-clone" +local_image_path: "~/images" +import_task_name: "import-clone" +instance_type: "t2.micro" +uri: "qemu:///system" +kvm_host: + name: kvm + ip: 192.168.1.117 + ansible_user: vagrant + ansible_ssh_private_key_file: ~/.ssh/id_rsa.pub + groups: "libvirt" diff --git a/roles/clone_on_prem_vm/README.md b/roles/clone_on_prem_vm/README.md new file mode 100644 index 00000000..edecabb3 --- /dev/null +++ b/roles/clone_on_prem_vm/README.md @@ -0,0 +1,69 @@ +Role Name +========= + +A role to clone an existing on prem VM using the KVM hypervisor. The role sets the **clone_on_prem_vm_local_image_path** variable containing the path where the image was saved on localhost. + +Requirements +------------ + +**qemu** and **qemu-img** packages installed. + +Role Variables +-------------- + +* **clone_on_prem_vm_source_vm_name**: (Required) The name of the on-prem VM you want to clone. +* **clone_on_prem_vm_image_name**: The name you want to call the cloned image. If not set, the the **clone_on_prem_vm_source_vm_name** will be used with a _-clone_ suffix. +* **clone_on_prem_vm_overwrite**: Weather to overwrite or not an already existing on prem VM clone. Default: true. +* **clone_on_prem_vm_local_image_path**: The path where you would like to save the image. If the path does not exists on localhost, the role will create it. If this parameter is not set, the role will save the image in a _~/tmp_ folder. +* **clone_on_prem_vm_uri**: Libvirt connection uri. Default: "qemu:///system". + +Dependencies +------------ + +N/A + +Example Playbook +---------------- + + - hosts: localhost + gather_facts: false + + vars: + on_prem_source_vm_name: "ubuntu-guest" + on_prem_vm_image_name: "ubuntu-guest-image" + local_image_path: "~/images/" + kvm_host: + name: kvm + ip: 192.168.1.117 + ansible_user: vagrant + ansible_ssh_private_key_file: ~/.ssh/id_rsa.pub + + tasks: + - name: Add host to inventory + ansible.builtin.add_host: + name: "{{ kvm_host.name }}" + ansible_host: "{{ kvm_host.ip }}" + ansible_user: "{{ kvm_host.ansible_user }}" + ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} + groups: "libvirt" + + - name: Import 'cloud.aws_ops.clone_on_prem_vm' role + ansible.builtin.import_role: + name: cloud.aws_ops.clone_on_prem_vm + vars: + clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" + clone_on_prem_vm_dest_image_name: "{{ on_prem_vm_image_name }}" + clone_on_prem_vm_local_image_path: "{{ local_image_path }}" + delegate_to: kvm + +License +------- + +GNU General Public License v3.0 or later + +See [LICENCE](https://github.com/ansible-collections/cloud.azure_roles/blob/main/LICENSE) to see the full text. + +Author Information +------------------ + +- Ansible Cloud Content Team diff --git a/roles/clone_on_prem_vm/defaults/main.yml b/roles/clone_on_prem_vm/defaults/main.yml new file mode 100644 index 00000000..4cbefde6 --- /dev/null +++ b/roles/clone_on_prem_vm/defaults/main.yml @@ -0,0 +1,3 @@ +--- +clone_on_prem_vm_uri: "qemu:///system" +clone_on_prem_vm_overwrite: true diff --git a/roles/clone_on_prem_vm/handlers/main.yml b/roles/clone_on_prem_vm/handlers/main.yml new file mode 100644 index 00000000..ab77c652 --- /dev/null +++ b/roles/clone_on_prem_vm/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Delete temporary directory + ansible.builtin.file: + state: absent + path: "{{ clone_on_prem_vm__tmpdir.path }}" diff --git a/roles/clone_on_prem_vm/tasks/main.yml b/roles/clone_on_prem_vm/tasks/main.yml new file mode 100644 index 00000000..6b5df70e --- /dev/null +++ b/roles/clone_on_prem_vm/tasks/main.yml @@ -0,0 +1,141 @@ +--- +- name: Gather package facts + ansible.builtin.package_facts: + manager: auto + register: package_facts + +- name: qemu is not installed + debug: + msg: "qemu is not installed" + when: "'qemu' not in package_facts.ansible_facts.packages" + +- name: qemu-img is not installed + debug: + msg: "qemu-img is not installed" + when: "'qemu-img' not in package_facts.ansible_facts.packages" + +- name: Create temporary directory to create the clone in + ansible.builtin.tempfile: + state: directory + suffix: .storage + register: clone_on_prem_vm__tmpdir + notify: + - "Delete temporary directory" + +- name: Get information about the on prem VM + community.libvirt.virt: + command: info + name: "{{ clone_on_prem_vm_source_vm_name }}" + uri: "{{ clone_on_prem_vm_uri }}" + register: clone_on_prem_vm__vm_info + +- name: Fail when on prem VM does not exist + ansible.builtin.fail: + msg: "The on prem VM {{ clone_on_prem_vm_source_vm_name }} does not exist." + when: clone_on_prem_vm_source_vm_name not in clone_on_prem_vm__vm_info + +- name: Fail when on prem VM's state is destroyed + ansible.builtin.fail: + msg: "The VM {{ clone_on_prem_vm_source_vm_name }} has been destroyed." + when: clone_on_prem_vm__vm_info[clone_on_prem_vm_source_vm_name].state == "destroyed" + +- name: Set 'clone_on_prem_vm_image_name' varible + ansible.builtin.set_fact: + clone_on_prem_vm_image_name: "{{ clone_on_prem_vm_source_vm_name }}-clone" + when: clone_on_prem_vm_image_name is undefined + +- name: Check if domain exists + community.libvirt.virt: + name: "{{ clone_on_prem_vm_image_name }}" + command: info + uri: "{{ clone_on_prem_vm_uri }}" + register: clone_on_prem_vm__domain_info + +- name: Fail when a domain already exists + ansible.builtin.fail: + msg: "A domain {{ clone_on_prem_vm_image_name }} already exists. Please undefine it first or set clone_on_prem_vm_overwrite: true." + when: clone_on_prem_vm_image_name in clone_on_prem_vm__domain_info and clone_on_prem_vm_overwrite is false + +- name: Undefine domain + community.libvirt.virt: + name: "{{ clone_on_prem_vm_image_name }}" + command: undefine + when: clone_on_prem_vm_image_name in clone_on_prem_vm__domain_info and clone_on_prem_vm_overwrite is true + +- name: Ensure on prem VM is paused + community.libvirt.virt: + state: paused + name: "{{ clone_on_prem_vm_source_vm_name }}" + uri: "{{ clone_on_prem_vm_uri }}" + when: clone_on_prem_vm__vm_info[clone_on_prem_vm_source_vm_name].state == "running" + +- name: Set 'clone_on_prem_vm__clone_path' and 'clone_on_prem_vm__raw_image_path' + ansible.builtin.set_fact: + clone_on_prem_vm__clone_path: "{{ clone_on_prem_vm__tmpdir.path }}/{{ clone_on_prem_vm_dest_vm_name }}.qcow2" + clone_on_prem_vm__raw_image_path: "{{ clone_on_prem_vm__tmpdir.path }}/{{ clone_on_prem_vm_dest_vm_name }}.raw" + +- name: Cloning {{ clone_on_prem_vm_source_vm_name }} on prem VM + ansible.builtin.command: | + virt-clone --original {{ clone_on_prem_vm_source_vm_name }} \ + --name {{ clone_on_prem_vm_image_name }} \ + --file {{ clone_on_prem_vm__clone_path }} + environment: + LIBVIRT_DEFAULT_URI: "{{ clone_on_prem_vm_uri }}" + +- name: Get information about the clone + ansible.builtin.stat: + path: "{{ clone_on_prem_vm__clone_path }}" + register: clone_on_prem_vm__clone_info + +# Priviledge escalation is needed because the .qcow2 file is owned by root +# when default hypervisor is used +- name: Convert qcow2 to raw using qemu-img with priviledge escalation + ansible.builtin.command: | + qemu-img convert -f qcow2 -O raw \ + {{ clone_on_prem_vm__clone_path }} \ + {{ clone_on_prem_vm__raw_image_path }} + become: true + become_method: sudo + environment: + LIBVIRT_DEFAULT_URI: "{{ clone_on_prem_vm_uri }}" + when: clone_on_prem_vm__clone_info.stat.exists and clone_on_prem_vm__clone_info.stat.pw_name == "root" + +- name: Convert qcow2 to raw using qemu-img + ansible.builtin.command: | + qemu-img convert -f qcow2 -O raw \ + {{ clone_on_prem_vm__clone_path }} \ + {{ clone_on_prem_vm__raw_image_path }} + environment: + LIBVIRT_DEFAULT_URI: "{{ clone_on_prem_vm_uri }}" + when: clone_on_prem_vm__clone_info.stat.exists and clone_on_prem_vm__clone_info.stat.pw_name != "root" + +- name: Create temporary directory to localcolhost when clone_on_prem_vm_local_image_path is not set + ansible.builtin.tempfile: + state: directory + suffix: .storage + register: clone_on_prem_vm__dir_localhost + when: clone_on_prem_vm_local_image_path is undefined + delegate_to: localhost + +- name: Create directory if it does not exist + ansible.builtin.file: + path: "{{ clone_on_prem_vm_local_image_path }}" + state: directory + mode: 0775 + recurse: yes + register: clone_on_prem_vm__dir_localhost + when: clone_on_prem_vm_local_image_path is defined + delegate_to: localhost + +- name: Fetch the converted RAW image to localhost + ansible.builtin.fetch: + src: "{{ clone_on_prem_vm__raw_image_path }}" + dest: "{{ clone_on_prem_vm__dir_localhost.path }}" + flat: yes + fail_on_missing: yes + validate_checksum: true + register: clone_on_prem_vm_fetch_to_localhost + +- name: Set 'clone_on_prem_vm_local_image_path' + ansible.builtin.set_fact: + clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_fetch_to_localhost.dest }}" diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md new file mode 100644 index 00000000..1cf635d3 --- /dev/null +++ b/roles/import_image_and_run_aws_instance/README.md @@ -0,0 +1,111 @@ +Role Name +========= + +A role that imports a local .raw image into an Amazon Machine Image (AMI) and run an AWS EC2 instance. + +Requirements +------------ + +VM Import requires a role to perform certain operations on your behalf. You must create a service role named vmimport with a trust relationship policy document that allows VM Import to assume the role, and you must attach an IAM policy to the role. + +AWS User Account with the following permissions: +* s3:GetBucketLocation +* s3:GetObject +* s3:ListBucket +* s3:GetBucketLocation +* s3:GetObject +* s3:ListBucket +* s3:PutObject +* s3:GetBucketAcl +* ec2:ModifySnapshotAttribute +* ec2:CopySnapshot +* ec2:RegisterImage +* ec2:Describe* +* ec2:RunInstances + +(Optional) To import resources encrypted using an AWS KMS key from AWS Key Management Service, add the following permissions: +* kms:CreateGrant +* kms:Decrypt +* kms:DescribeKey +* kms:Encrypt +* kms:GenerateDataKey* +* kms:ReEncrypt* + +Role Variables +-------------- + +* **import_image_and_run_aws_instance_import_image_task_name**: (Required) The name you want to assign to the AWS EC2 import image task. +* **import_image_and_run_aws_instance_bucket_name**: (Required) The name of the S3 bucket name where you want to upload the .raw image. +**import_image_and_run_aws_instance_image_path**: (Required) The path where the .raw image is stored. +* **import_image_and_run_aws_instance_instance_name**: (Required) The name of the EC2 instance you want to create using the imported AMI. +* **move_vm_from_on_prem_to_aws_instance_type**: The EC2 instance type you want to use. Default: "t2.micro". +* **import_image_and_run_aws_instances_keypair_name**: The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. +* **import_image_and_run_aws_instance_security_groups**: A list of security group IDs or names to assiciate to the EC2 instance. +* **import_image_and_run_aws_instance_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. +* **import_image_and_run_aws_instance_volumes**: A dictionary of a block device mappings, by default this will always use the AMI root device so the **import_image_and_run_aws_instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys _device_name_, _ebs.volume_type_, _ebs.volume_size_, _ebs.kms_key_id_, _ebs.iops_, and _ebs.delete_on_termination_. + +Dependencies +------------ + +- role: [aws_setup_credentials](../aws_setup_credentials/README.md) + +Example Playbook +---------------- +This role can be used together with the [cloud.aws_ops.clone_on_prem_vm](../clone_on_prem_vm/README.md) role as shown below. If you wish to use it separately, just ensure you set the **clone_on_prem_vm_local_image_path** parameter. + + - hosts: localhost + gather_facts: false + + vars: + on_prem_source_vm_name: "ubuntu-guest" + on_prem_vm_image_name: "ubuntu-guest-image" + s3_bucket_name: "vm-s3-bucket" + instance_name: "vm-clone" + local_image_path: "~/images/" + kvm_host: + name: kvm + ip: 192.168.1.117 + ansible_user: vagrant + ansible_ssh_private_key_file: ~/.ssh/id_rsa.pub + instance_type: "t2.micro" + import_task_name: "import-clone" + + tasks: + - name: Add host to inventory + ansible.builtin.add_host: + name: "{{ kvm_host.name }}" + ansible_host: "{{ kvm_host.ip }}" + ansible_user: "{{ kvm_host.ansible_user }}" + ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} + groups: "libvirt" + + - name: Import 'cloud.aws_ops.clone_on_prem_vm' role + ansible.builtin.import_role: + name: cloud.aws_ops.clone_on_prem_vm + vars: + clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" + clone_on_prem_vm_dest_image_name: "{{ on_prem_vm_image_name }}" + clone_on_prem_vm_local_image_path: "{{ local_image_path }}" + delegate_to: kvm + + - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role + ansible.builtin.import_role: + name: cloud.aws_ops.import_image_and_run_aws_instance + vars: + import_image_and_run_aws_instance_bucket_name: "{{ s3_bucket_name }}" + import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_local_image_path }}" + import_image_and_run_aws_instance_instance_name: "{{ instance_name }}" + import_image_and_run_aws_instance_instance_type: "{{ instance_type }}" + import_image_and_run_aws_instance_import_image_task_name: "{{ import_task_name }}" + +License +------- + +GNU General Public License v3.0 or later + +See [LICENCE](https://github.com/ansible-collections/cloud.azure_roles/blob/main/LICENSE) to see the full text. + +Author Information +------------------ + +- Ansible Cloud Content Team diff --git a/roles/import_image_and_run_aws_instance/defaults/main.yml b/roles/import_image_and_run_aws_instance/defaults/main.yml new file mode 100644 index 00000000..f28fd776 --- /dev/null +++ b/roles/import_image_and_run_aws_instance/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for import_image_and_run_aws_instance diff --git a/roles/move_vm_from_on_prem_to_aws/meta/main.yml b/roles/import_image_and_run_aws_instance/meta/main.yml similarity index 93% rename from roles/move_vm_from_on_prem_to_aws/meta/main.yml rename to roles/import_image_and_run_aws_instance/meta/main.yml index e8b3ab42..6f4abcea 100644 --- a/roles/move_vm_from_on_prem_to_aws/meta/main.yml +++ b/roles/import_image_and_run_aws_instance/meta/main.yml @@ -1,3 +1,2 @@ ---- dependencies: - role: cloud.aws_ops.aws_setup_credentials diff --git a/roles/import_image_and_run_aws_instance/tasks/main.yml b/roles/import_image_and_run_aws_instance/tasks/main.yml new file mode 100644 index 00000000..ae549eee --- /dev/null +++ b/roles/import_image_and_run_aws_instance/tasks/main.yml @@ -0,0 +1,100 @@ +--- +- name: Run 'cloud.aws_ops.import_image_and_run_aws_instance' role + module_defaults: + group/aws: "{{ aws_setup_credentials__output }}" + + block: + - name: Fail when 'import_image_and_run_aws_instance_bucket_name' is undefined + ansible.builtin.fail: + msg: S3 bucket name to host the .raw image must be defined as import_image_and_run_aws_instance_bucket_name + when: import_image_and_run_aws_instance_bucket_name is undefined + + - name: Fail when 'import_image_and_run_aws_instance_image_path' is undefined + ansible.builtin.fail: + msg: S3 bucket name to host the .raw image must be defined as import_image_and_run_aws_instance_image_path + when: import_image_and_run_aws_instance_image_path is undefined + + - name: Fail when 'import_image_and_run_aws_instance_instance_name' is undefined + ansible.builtin.fail: + msg: The name to assign the to AWS EC2 instance must be defined as import_image_and_run_aws_instance_instance_name + when: import_image_and_run_aws_instance_instance_name is undefined + + - name: Get information about the S3 bucket + community.aws.s3_bucket_info: + name: "{{ import_image_and_run_aws_instance_bucket_name }}" + register: import_image_and_run_aws_instance__bucket_info + + - name: Fail when S3 bucket name does not exist + ansible.builtin.fail: + msg: "The on prem VM {{ import_image_and_run_aws_instance_bucket_name }} does not exist." + when: import_image_and_run_aws_instance__bucket_info.buckets | length == 0 + + - name: Set 'import_image_and_run_aws_instance__s3_object_key' variable + ansible.builtin.set_fact: + import_image_and_run_aws_instance__s3_object_key: "{{ import_image_and_run_aws_instance_bucket_name }}/{{ import_image_and_run_aws_instance_instance_name }}.raw" + + - name: Upload raw image to S3 bucket + amazon.aws.s3_object: + bucket: "{{ import_image_and_run_aws_instance_bucket_name }}" + object: "{{ import_image_and_run_aws_instance__s3_object_key }}" + src: "{{ import_image_and_run_aws_instance_image_path }}" + mode: put + + - name: Import image + amazon.aws.ec2_import_image: + state: present + task_name: "{{ import_image_and_run_aws_instance_import_image_task_name }}" + disk_containers: + - format: raw + user_bucket: + s3_bucket: "{{ import_image_and_run_aws_instance_bucket_name }}" + s3_key: "{{ import_image_and_run_aws_instance__s3_object_key }}" + register: import_image_and_run_aws_instance__import_result + + # image_id and snapshot_id are not available until the import image task is completed + - name: Check status of the import image task + amazon.aws.ec2_import_image_info: + filters: + - Name: "tag:Name" + Values: ["{{ import_image_and_run_aws_instance_import_image_task_name }}"] + - Name: "task-state" + Values: ["completed", "active"] + register: import_image_and_run_aws_instance__import_image_info + until: import_image_and_run_aws_instance__import_image_info.import_image[0].status == "completed" + delay: 3 + retries: 30 + + - name: Set 'import_image_and_run_aws_instance__ami_id' and 'import_image_and_run_aws_instance__snapshot_id' + ansible.builtin.set_fact: + import_image_and_run_aws_instance__ami_id: "{{ import_image_and_run_aws_instance__import_image_info.import_image[0].image_id }}" + import_image_and_run_aws_instance__snapshot_id: "{{ import_image_and_run_aws_instance__import_image_info.import_image[0].snapshot_details[0].snapshot_id }}" + + - name: Check if an instance with the specified name alrerady exists + amazon.aws.ec2_instance_info: + filters: + "tag:Name": "{{ import_image_and_run_aws_instance_instance_name }}" + instance-state-name: [ "running", "pending", "shutting-down", "stopping", "stopped"] + register: import_image_and_run_aws_instance__ec2_instance_info + + - name: Fail when an instance with the specified name already exists + ansible.builtin.fail: + msg: "An EC2 instance with name {{ import_image_and_run_aws_instance_instance_name }} already exists" + when: import_image_and_run_aws_instance__ec2_instance_info.instances | length == 1 + + - name: Start EC2 instance + amazon.aws.ec2_instance: + name: "{{ import_image_and_run_aws_instance_instance_name }}" + instance_type: "{{ import_image_and_run_aws_instance_instance_type | default('t2.micro') }}" + key_name: "{{ import_image_and_run_aws_instances_keypair_name | default(omit) }}" + security_groups: "{{ import_image_and_run_aws_instance_security_groups | default(omit) }}" + vpc_subnet_id: "{{ import_image_and_run_aws_instance_vpc_subnet_id | default(omit) }}" + image_id: "{{ import_image_and_run_aws_instance__ami_id }}" + volumes: + - device_name: "{{ import_image_and_run_aws_instance_volumes.device_name | default('/dev/sda1') }}" + ebs: + volume_size: "{{ import_image_and_run_aws_instance_volumes.ebs.volume_size | default(omit) }}" + volume_type: "{{ import_image_and_run_aws_instance_volumes.ebs.volume_type | default(omit) }}" + iops: "{{ import_image_and_run_aws_instance_volumes.ebs.iops | default(omit) }}" + kms_key_id: "{{ import_image_and_run_aws_instance_volumes.ebs.kms_key_id | default(omit) }}" + delete_on_termination: "{{ import_image_and_run_aws_instance_volumes.ebs.delete_on_termination | default(true) }}" + snapshot_id: "{{ import_image_and_run_aws_instance__snapshot_id }}" diff --git a/roles/move_vm_from_on_prem_to_aws/README.md b/roles/move_vm_from_on_prem_to_aws/README.md deleted file mode 100644 index 5da32bdd..00000000 --- a/roles/move_vm_from_on_prem_to_aws/README.md +++ /dev/null @@ -1,53 +0,0 @@ -Role Name -========= - -A role to migrate an existing on prem VM to AWS. - -Requirements ------------- - -AWS User Account with the following permissions: - - -Role Variables --------------- - -* **move_vm_from_on_prem_to_aws_on_prem_vm_name**: (Required) The name of the on-prem VM you want to clone. -* **move_vm_from_on_prem_to_aws_bucket_name**: (Required) The name of the S3 bucket name where you want to upload the .raw image. -* **move_vm_from_on_prem_to_aws_on_prem_instance_name**: (Required) The name of the EC2 instance you want to create using the imported AMI. -* **move_vm_from_on_prem_to_aws_instance_type**: The EC2 instance type you want to use. Default: "t2.micro". -* **move_vm_from_on_prem_to_aws_keypair_name**: The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. -* **move_vm_from_on_prem_to_aws_security_group**: A list of security group IDs or names to assiciate to the EC2 instance. -* **move_vm_from_on_prem_to_aws_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. -* **move_vm_from_on_prem_to_aws_uri**: (Required) # Libvirt connection uri.Default: "qemu:///system". -* **move_vm_from_on_prem_to_aws_volumes**: A dictionary of a block device mappings, by default this will always use the AMI root device so the **move_vm_from_on_prem_to_aws_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys _device_name_, _ebs.volume_type_, _ebs.volume_size_, _ebs.kms_key_id_, _ebs.iops_, and _ebs.delete_on_termination_. - - -Dependencies ------------- - -- role: cloud.aws_ops.aws_setup_credentials - -Example Playbook ----------------- - - - hosts: localhost - - - ansible.builtin.import_role: - name: cloud.aws_ops.move_vm_from_on_prem_to_aws - vars: - move_vm_from_on_prem_to_aws_on_prem_vm_name: "test-vm" - move_vm_from_on_prem_to_aws_on_prem_bucket_name: "test-s3-bucket" - move_vm_from_on_prem_to_aws_on_prem_instance_name: "test-instance-name" - -License -------- - -GNU General Public License v3.0 or later - -See [LICENCE](https://github.com/ansible-collections/cloud.azure_roles/blob/main/LICENSE) to see the full text. - -Author Information ------------------- - -- Ansible Cloud Content Team diff --git a/roles/move_vm_from_on_prem_to_aws/defaults/main.yml b/roles/move_vm_from_on_prem_to_aws/defaults/main.yml deleted file mode 100644 index 0a84d886..00000000 --- a/roles/move_vm_from_on_prem_to_aws/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# defaults file for move_vm_from_on_prem_to_aws -move_vm_from_on_prem_to_aws_uri: "qemu:///system" diff --git a/roles/move_vm_from_on_prem_to_aws/handlers/main.yml b/roles/move_vm_from_on_prem_to_aws/handlers/main.yml deleted file mode 100644 index ccf1521f..00000000 --- a/roles/move_vm_from_on_prem_to_aws/handlers/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# handlers file for move_vm_from_on_prem_to_aws -- name: Delete temporary directory - ansible.builtin.file: - state: absent - path: "{{ move_vm_from_on_prem_to_aws__tmpdir.path }}" diff --git a/roles/move_vm_from_on_prem_to_aws/tasks/clone_on_prem_vm.yml b/roles/move_vm_from_on_prem_to_aws/tasks/clone_on_prem_vm.yml deleted file mode 100644 index 4dd170c3..00000000 --- a/roles/move_vm_from_on_prem_to_aws/tasks/clone_on_prem_vm.yml +++ /dev/null @@ -1,80 +0,0 @@ ---- -- name: Get information about the on prem VM - community.libvirt.virt: - command: info - name: "{{ move_vm_from_on_prem_to_aws_on_prem_vm_name }}" - uri: "{{ move_vm_from_on_prem_to_aws_uri }}" - register: move_vm_from_on_prem_to_aw__vm_info - -- name: Fail when on prem VM does not exist - ansible.builtin.fail: - msg: "The on prem VM {{ move_vm_from_on_prem_to_aws_on_prem_vm_name }} does not exist." - when: move_vm_from_on_prem_to_aws_on_prem_vm_name not in move_vm_from_on_prem_to_aw__vm_info - -- name: Fail when on prem VM's state is destroyed - ansible.builtin.fail: - msg: "The VM {{ source_vm_name }} has been destroyed." - when: _result_vm_info[source_vm_name].state == "destroyed" - -- name: Ensure on prem VM is paused - community.libvirt.virt: - state: paused - name: "{{ move_vm_from_on_prem_to_aws_on_prem_vm_name }}" - uri: "{{ move_vm_from_on_prem_to_aws_uri }}" - when: move_vm_from_on_prem_to_aw__vm_info[move_vm_from_on_prem_to_aws_on_prem_vm_name].state == "running" - -- set_fact: - move_vm_from_on_prem_to_aws__clone_path: "{{ move_vm_from_on_prem_to_aws__tmpdir.path }}/{{ move_vm_from_on_prem_to_aws_aws_vm_name }}.qcow2" - move_vm_from_on_prem_to_aws__raw_image_path: "{{ move_vm_from_on_prem_to_aws__tmpdir.path }}/{{ move_vm_from_on_prem_to_aws_aws_vm_name }}.raw" - -- name: Cloning {{ move_vm_from_on_prem_to_aws_on_prem_vm_name }} on prem VM - command: | - virt-clone --original {{ move_vm_from_on_prem_to_aws_on_prem_vm_name }} \ - --name {{ move_vm_from_on_prem_to_aws_aws_vm_name }} \ - --file {{ move_vm_from_on_prem_to_aws__clone_path }} - environment: - LIBVIRT_DEFAULT_URI: "{{ move_vm_from_on_prem_to_aws_uri }}" - -- name: Get information about the clone - stat: - path: "{{ move_vm_from_on_prem_to_aws__clone_path }}" - register: move_vm_from_on_prem_to_aws__clone_info - -# Priviledge escalation is needed because the .qcow2 file is owned by root -# when default hypervisor is used -- name: Convert qcow2 to raw using qemu-img with priviledge escalation - command: | - qemu-img convert -f qcow2 -O raw \ - {{ move_vm_from_on_prem_to_aws__clone_path }} \ - {{ move_vm_from_on_prem_to_aws__raw_image_path }} - become: true - become_method: sudo - environment: - LIBVIRT_DEFAULT_URI: "{{ move_vm_from_on_prem_to_aws_uri }}" - when: move_vm_from_on_prem_to_aws__clone_info.stat.exists and move_vm_from_on_prem_to_aws__clone_info.stat.pw_name == "root" - -- name: Convert qcow2 to raw using qemu-img - command: | - qemu-img convert -f qcow2 -O raw \ - {{ move_vm_from_on_prem_to_aws__clone_path }} \ - {{ move_vm_from_on_prem_to_aws__raw_image_path }} - environment: - LIBVIRT_DEFAULT_URI: "{{ move_vm_from_on_prem_to_aws_uri }}" - when: move_vm_from_on_prem_to_aws__clone_info.stat.exists and move_vm_from_on_prem_to_aws__clone_info.stat.pw_name != "root" - -- name: Create temporary directory to fetch the raw in on localhost - ansible.builtin.tempfile: - state: directory - suffix: .storage - register: move_vm_from_on_prem_to_aws__tmpdir_localhost - notify: - - 'Delete temporary directory' - delegate_to: localhost - -- name: Fetch the converted RAW image to localhost - ansible.builtin.fetch: - src: "{{ move_vm_from_on_prem_to_aws__raw_image_path }}" - dest: "{{ move_vm_from_on_prem_to_aws__tmpdir_localhost.path }}" - flat: yes - fail_on_missing: yes - validate_checksum: true diff --git a/roles/move_vm_from_on_prem_to_aws/tasks/import_image_and_start_ec2_instance.yml b/roles/move_vm_from_on_prem_to_aws/tasks/import_image_and_start_ec2_instance.yml deleted file mode 100644 index 3930ac78..00000000 --- a/roles/move_vm_from_on_prem_to_aws/tasks/import_image_and_start_ec2_instance.yml +++ /dev/null @@ -1,61 +0,0 @@ ---- -- name: Get information about the S3 bucket - community.aws.s3_bucket_info: - name: "{{ move_vm_from_on_prem_to_aws_bucket_name }}" - register: move_vm_from_on_prem_to_aws__bucket_info - -- set_fact: - s3_object_key: "{{ move_vm_from_on_prem_to_aws_bucket_name }}/{{ move_vm_from_on_prem_to_aws_aws_vm_name }}.qcow2" - -- name: Fail when S3 bucket name does not exist - ansible.builtin.fail: - msg: "The on prem VM {{ move_vm_from_on_prem_to_aws_on_prem_bucket_name }} does not exist." - when: move_vm_from_on_prem_to_aws__bucket_info.bucket_list | length == 0 - -- name: Upload raw image to S3 bucket - amazon.aws.s3_object: - bucket: "{{ move_vm_from_on_prem_to_aws_bucket_name }}" - object: "{{ s3_object_key }}" - src: "{{ move_vm_from_on_prem_to_aws__raw_image_path }}" - mode: put - -- name: Import image - amazon.aws.ec2_import_image: - state: present - task_name: "{{ move_vm_from_on_prem_to_aws_import_image_task_name }}" - disk_containers: - - format: raw - user_bucket: - s3_bucket: "{{ move_vm_from_on_prem_to_aws_bucket_name }}" - s3_key: "{{ s3_object_key }}" - -- name: Check status of the import image task - amazon.aws.ec2_import_image_info: - filters: - - Name: "tag:Name" - Values: ["{{ move_vm_from_on_prem_to_aws_import_image_task_name }}"] - - Name: "task-state" - Values: ["completed", "active"] - register: move_vm_from_on_prem_to_aws__import_image_info - -- set_fact: - ami_id: "{{ move_vm_from_on_prem_to_aws__import_image_info.import_image[0].image_id }}" - snapshot_id: "{{ move_vm_from_on_prem_to_aws__import_image_info.import_image[0].snapshot_details[0].snapshot_id }}" - -- name: Start EC2 instance - amazon.aws.ec2_instance: - name: "{{ move_vm_from_on_prem_to_aws_instance_name | default('t2.micro')}}" - instance_type: "{{ move_vm_from_on_prem_to_aws_instance_type }}" - key_name: "{{ move_vm_from_on_prem_to_aws_keypair_name | default(omit) }}" - security_group_ids: ["{{ move_vm_from_on_prem_to_aws_security_group | default(omit) }}"] - vpc_subnet_id: "{{ move_vm_from_on_prem_to_aws_vpc_subnet_id | default(omit) }}" - image_id: "{{ ami_id }}" - volumes: - - device_name: "{{ move_vm_from_on_prem_to_aws_volumes.device_name | default('/dev/sda1') }}" - ebs: - volume_size: "{{move_vm_from_on_prem_to_aws_volumes.ebs.volume_size | default(omit) }}" - volume_type: "{{ move_vm_from_on_prem_to_aws_volumes.ebs.volume_type | default(omit) }}" - iops: "{{ move_vm_from_on_prem_to_aws_volumes.ebs.iops | default(omit) }}" - kms_key_id: "{{ move_vm_from_on_prem_to_aws_volumes.ebs.kms_key_id | default(omit) }}" - delete_on_termination: "{{ move_vm_from_on_prem_to_aws_volumes.ebs.delete_on_termination | default(true) }}" - snapshot_id: "{{ snapshot_id }}" diff --git a/roles/move_vm_from_on_prem_to_aws/tasks/main.yml b/roles/move_vm_from_on_prem_to_aws/tasks/main.yml deleted file mode 100644 index bf3ebdc4..00000000 --- a/roles/move_vm_from_on_prem_to_aws/tasks/main.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -# tasks file for move_vm_from_on_prem_to_aws - -- name: Fail when 'move_vm_from_on_prem_to_aws_on_prem_vm_name' is undefined - ansible.builtin.fail: - msg: Source on prem VM name should be defined as move_vm_from_on_prem_to_aws_on_prem_vm_name - when: move_vm_from_on_prem_to_aws_on_prem_vm_name is undefined - -- name: Fail when 'move_vm_from_on_prem_to_aws_bucket_name' is undefined - ansible.builtin.fail: - msg: Source on prem VM name should be defined as move_vm_from_on_prem_to_aws_bucket_name - when: move_vm_from_on_prem_to_aws_bucket_name is undefined - -- name: Fail when 'move_vm_from_on_prem_to_aws_on_prem_instance_name' is undefined - ansible.builtin.fail: - msg: "The EC2 instance name {{ move_vm_from_on_prem_to_aws_on_prem_instance_name }} should be defined" - when: move_vm_from_on_prem_to_aws_on_prem_instance_name is undefined - -- set_fact: - move_vm_from_on_prem_to_aws_aws_vm_name: "{{move_vm_from_on_prem_to_aws_aws_vm_name | default(move_vm_from_on_prem_to_aws_on_prem_vm_name)}}" - -- name: Ensure qemu is installed - ansible.builtin.package: - name: "qemu" - update_homebrew: True - when: ansible_facts.os_family == "Darwin" - -- name: Ensure qemu-img is installed - ansible.builtin.package: - name: "{{ 'qemu-img' if ansible_facts.os_family == 'RedHat' else 'qemu'}}" - update_cache: "{{ True if ansible_facts.pkg_mgr == 'apt' else omit }}" - when: ansible_facts.os_family != "Darwin" - become: true - -- name: Create temporary directory to create the clone in - ansible.builtin.tempfile: - state: directory - suffix: .storage - register: move_vm_from_on_prem_to_aws__tmpdir - notify: - - "Delete temporary directory" - -- name: Move on prem VM to an AWS EC2 instamce - module_defaults: - group/gcp: "{{ aws_setup_credentials__output }}" - block: - - - name: Include tasks 'clone_on_prem_vm.yml' - ansible.builtin.include_tasks: clone_one_prem_vm.yml - - - name: Include tasks 'import_image_and_start_ec2_instance.yml.yml' - ansible.builtin.include_tasks: import_image_and_start_ec2_instance.yml.yml diff --git a/roles/move_vm_from_on_prem_to_aws/vars/main.yml b/roles/move_vm_from_on_prem_to_aws/vars/main.yml deleted file mode 100644 index 6c37e80d..00000000 --- a/roles/move_vm_from_on_prem_to_aws/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# vars file for move_vm_from_on_prem_to_aws From df4fd7e8f411f7aafc8eedcf01d7a9a7335f1673 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 19 Sep 2023 16:13:43 +0200 Subject: [PATCH 03/26] Linting Signed-off-by: Alina Buzachis --- .../move_vm_from_on_prem_to_aws.yml | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index 617692d0..2b65e7d1 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -13,30 +13,30 @@ region: "{{ aws_region | default('us-east-1') }}" tasks: - - name: Add host to inventory - ansible.builtin.add_host: - name: "{{ kvm_host.name }}" - ansible_host: "{{ kvm_host.ip }}" - ansible_user: "{{ kvm_host.ansible_user }}" - ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} - groups: "{{ kvm_host.groups }}" + - name: Add host to inventory + ansible.builtin.add_host: + name: "{{ kvm_host.name }}" + ansible_host: "{{ kvm_host.ip }}" + ansible_user: "{{ kvm_host.ansible_user }}" + ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} + groups: "{{ kvm_host.groups }}" - - name: Import 'cloud.aws_ops.clone_on_prem_vm' role - ansible.builtin.import_role: - name: cloud.aws_ops.clone_on_prem_vm - vars: - clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" - clone_on_prem_vm_dest_vm_name: "{{ on_prem_vm_clone_name }}" - clone_on_prem_vm_uri: "{{ uri }}" - clone_on_prem_vm_local_image_path: "{{ local_image_path }}" - delegate_to: kvm + - name: Import 'cloud.aws_ops.clone_on_prem_vm' role + ansible.builtin.import_role: + name: cloud.aws_ops.clone_on_prem_vm + vars: + clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" + clone_on_prem_vm_dest_vm_name: "{{ on_prem_vm_clone_name }}" + clone_on_prem_vm_uri: "{{ uri }}" + clone_on_prem_vm_local_image_path: "{{ local_image_path }}" + delegate_to: kvm - - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role - ansible.builtin.import_role: - name: cloud.aws_ops.import_image_and_run_aws_instance - vars: - import_image_and_run_aws_instance_bucket_name: "{{ s3_bucket_name }}" - import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_local_image_path }}" - import_image_and_run_aws_instance_instance_name: "{{ instance_name }}" - import_image_and_run_aws_instance_instance_type: "{{ instance_type }}" - import_image_and_run_aws_instance_import_image_task_name: "{{ import_task_name }}" + - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role + ansible.builtin.import_role: + name: cloud.aws_ops.import_image_and_run_aws_instance + vars: + import_image_and_run_aws_instance_bucket_name: "{{ s3_bucket_name }}" + import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_local_image_path }}" + import_image_and_run_aws_instance_instance_name: "{{ instance_name }}" + import_image_and_run_aws_instance_instance_type: "{{ instance_type }}" + import_image_and_run_aws_instance_import_image_task_name: "{{ import_task_name }}" From bed1f84093f55c594804603d2935e19e144f8e1a Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 19 Sep 2023 16:16:04 +0200 Subject: [PATCH 04/26] Update galaxy.yml Signed-off-by: Alina Buzachis --- galaxy.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/galaxy.yml b/galaxy.yml index 509ff62e..6ed7a5ea 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -20,6 +20,7 @@ dependencies: amazon.aws: '>=5.1.0' community.aws: '>=5.0.0' amazon.cloud: '>=0.4.0' + community.libvirt: '>=1.2.0' version: 1.0.3 build_ignore: - .DS_Store From 3e55e8bcdd37a78b9d1c4dff2db996ddb5f447e8 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 21 Sep 2023 20:44:33 +0200 Subject: [PATCH 05/26] Apply suggestions Signed-off-by: Alina Buzachis --- .../move_vm_from_on_prem_to_aws/README.md | 20 +++++++++++++------ .../move_vm_from_on_prem_to_aws.yml | 2 +- .../move_vm_from_on_prem_to_aws/vars/main.yml | 2 +- roles/clone_on_prem_vm/README.md | 4 ++-- roles/clone_on_prem_vm/tasks/main.yml | 2 +- .../README.md | 4 ++-- 6 files changed, 21 insertions(+), 13 deletions(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/README.md b/playbooks/move_vm_from_on_prem_to_aws/README.md index 163965a9..6768b717 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/README.md +++ b/playbooks/move_vm_from_on_prem_to_aws/README.md @@ -1,4 +1,4 @@ -# cloud.aws_ops.move_vm_from_on_prem_to_aws playbooks +# cloud.aws_ops.import_image_and_run_aws_instance playbooks A playbook to migrate an existing on prem VM running on KVM hypervisor to AWS. @@ -34,11 +34,11 @@ AWS User Account with the following permissions: ### Needed for the cloud.aws_ops.clone_on_prem_vm role * **on_prem_source_vm_name** (str): (Required) The name of the on-prem VM you want to clone. -* **on_prem_vm_clone_name** (str): (Optional) The name you want to call the cloned image. If not set, the the **on_prem_vm_clone_name** will be used with a _-clone_ suffix. +* **on_prem_vm_clone_name** (str): (Optional) The name you want to call the cloned image. If not set, the **on_prem_vm_clone_name** will be used with a _-clone_ suffix. * **uri** (str): (Optional) Libvirt connection uri. Default: "qemu:///system". * **overwrite_clone** (bool): (Optional) Weather to overwrite or not an already existing on prem VM clone. Default: true. -### Needed for the cloud.aws_ops.clone_on_prem_vm role +### Needed for the cloud.aws_ops.import_image_and_run_aws_instance role * **aws_access_key** (str): (Required) AWS access key ID for user account with the above permissions * **aws_secret_key** (str): (Required) AWS secret access key for user account with the above permissions @@ -52,9 +52,17 @@ AWS User Account with the following permissions: * **keypair_name** (str): (Optional) The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. * **security_groups** (list): (Optional) A list of security group IDs or names to assiciate to the EC2 instance. * **vpc_subnet_id** (str): (Optional) The subnet ID in which to launch the EC2 instance instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. -* **instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys _device_name_, _ebs.volume_type_, _ebs.volume_size_, _ebs.kms_key_id_, _ebs.iops_, and _ebs.delete_on_termination_. - -* **kvm_host** (str): Information about the host running the KVM hypervisr that are dynamically added to the inventory. +* **instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: + * **device_name** (str): The device name (for example, /dev/sdh or xvdh). + * **ebs** (dict): Parameters used to automatically set up EBS volumes when the instance is launched. + * **volume_type** (str): The volume type. Valid Values: standard, io1, io2, gp2, sc1, st1, gp3. + * **volume_size** (int): The size of the volume, in GiBs. + * **kms_key_id** (str): Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted. + * **iops** (str): The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. + * **delete_on_termination_** (bool): Indicates whether the EBS volume is deleted on instance termination. +* **kvm_host** (dict): Information about the host running the KVM hypervisr that are dynamically added to the inventory. + * **volume_size** This variable enabled you to assign the newly added host to one or more groups in the inventory. +* **kvm_host** (dict): Information about the host running the KVM hypervisr that are dynamically added to the inventory. * **name**: This is a user-defined name for the host you are adding to the inventory. * **ansible_host**: This variable specifies the hostname or IP address of the host you are adding to the inventory. * **ansible_user**: This variable specifies the SSH username that Ansible should use when connecting to the host. diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index 2b65e7d1..f2187fd1 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -16,7 +16,7 @@ - name: Add host to inventory ansible.builtin.add_host: name: "{{ kvm_host.name }}" - ansible_host: "{{ kvm_host.ip }}" + ansible_host: "{{ kvm_host.ansible_host }}" ansible_user: "{{ kvm_host.ansible_user }}" ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} groups: "{{ kvm_host.groups }}" diff --git a/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml b/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml index df25925b..17f3c0ca 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml @@ -8,7 +8,7 @@ instance_type: "t2.micro" uri: "qemu:///system" kvm_host: name: kvm - ip: 192.168.1.117 + ansible_host: 192.168.1.117 ansible_user: vagrant ansible_ssh_private_key_file: ~/.ssh/id_rsa.pub groups: "libvirt" diff --git a/roles/clone_on_prem_vm/README.md b/roles/clone_on_prem_vm/README.md index edecabb3..a8d24089 100644 --- a/roles/clone_on_prem_vm/README.md +++ b/roles/clone_on_prem_vm/README.md @@ -1,7 +1,7 @@ Role Name ========= -A role to clone an existing on prem VM using the KVM hypervisor. The role sets the **clone_on_prem_vm_local_image_path** variable containing the path where the image was saved on localhost. +A role to clone an existing on prem VM using the KVM hypervisor. The role sets the **clone_on_prem_vm_local_image_path** variable containing the path where the image was saved on localhost. This role requires privilege escalation because the .qcow2 file created by ``virt-clone`` is owned by root and ``qemu-img convert`` requires access to convert it to .raw. Requirements ------------ @@ -12,7 +12,7 @@ Role Variables -------------- * **clone_on_prem_vm_source_vm_name**: (Required) The name of the on-prem VM you want to clone. -* **clone_on_prem_vm_image_name**: The name you want to call the cloned image. If not set, the the **clone_on_prem_vm_source_vm_name** will be used with a _-clone_ suffix. +* **clone_on_prem_vm_image_name**: The name you want to call the cloned image. If not set, the **clone_on_prem_vm_source_vm_name** will be used with a _-clone_ suffix. * **clone_on_prem_vm_overwrite**: Weather to overwrite or not an already existing on prem VM clone. Default: true. * **clone_on_prem_vm_local_image_path**: The path where you would like to save the image. If the path does not exists on localhost, the role will create it. If this parameter is not set, the role will save the image in a _~/tmp_ folder. * **clone_on_prem_vm_uri**: Libvirt connection uri. Default: "qemu:///system". diff --git a/roles/clone_on_prem_vm/tasks/main.yml b/roles/clone_on_prem_vm/tasks/main.yml index 6b5df70e..6489982c 100644 --- a/roles/clone_on_prem_vm/tasks/main.yml +++ b/roles/clone_on_prem_vm/tasks/main.yml @@ -109,7 +109,7 @@ LIBVIRT_DEFAULT_URI: "{{ clone_on_prem_vm_uri }}" when: clone_on_prem_vm__clone_info.stat.exists and clone_on_prem_vm__clone_info.stat.pw_name != "root" -- name: Create temporary directory to localcolhost when clone_on_prem_vm_local_image_path is not set +- name: Create temporary directory to localhost when clone_on_prem_vm_local_image_path is not set ansible.builtin.tempfile: state: directory suffix: .storage diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md index 1cf635d3..c06422ae 100644 --- a/roles/import_image_and_run_aws_instance/README.md +++ b/roles/import_image_and_run_aws_instance/README.md @@ -40,8 +40,8 @@ Role Variables * **import_image_and_run_aws_instance_instance_name**: (Required) The name of the EC2 instance you want to create using the imported AMI. * **move_vm_from_on_prem_to_aws_instance_type**: The EC2 instance type you want to use. Default: "t2.micro". * **import_image_and_run_aws_instances_keypair_name**: The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. -* **import_image_and_run_aws_instance_security_groups**: A list of security group IDs or names to assiciate to the EC2 instance. -* **import_image_and_run_aws_instance_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. +* **import_image_and_run_aws_instance_security_groups**: A list of security group IDs or names to associate to the EC2 instance. +* **import_image_and_run_aws_instance_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will choose the default zone of the default VPC. * **import_image_and_run_aws_instance_volumes**: A dictionary of a block device mappings, by default this will always use the AMI root device so the **import_image_and_run_aws_instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys _device_name_, _ebs.volume_type_, _ebs.volume_size_, _ebs.kms_key_id_, _ebs.iops_, and _ebs.delete_on_termination_. Dependencies From 36d22166d612d65c34c448e6251b704e7581376d Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 22 Sep 2023 12:01:33 +0200 Subject: [PATCH 06/26] Fix typos Signed-off-by: Alina Buzachis --- roles/clone_on_prem_vm/README.md | 10 +++++----- .../import_image_and_run_aws_instance/README.md | 17 ++++++++++++----- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/roles/clone_on_prem_vm/README.md b/roles/clone_on_prem_vm/README.md index a8d24089..ed99eedc 100644 --- a/roles/clone_on_prem_vm/README.md +++ b/roles/clone_on_prem_vm/README.md @@ -1,5 +1,5 @@ -Role Name -========= +clone_on_prem_vm +================ A role to clone an existing on prem VM using the KVM hypervisor. The role sets the **clone_on_prem_vm_local_image_path** variable containing the path where the image was saved on localhost. This role requires privilege escalation because the .qcow2 file created by ``virt-clone`` is owned by root and ``qemu-img convert`` requires access to convert it to .raw. @@ -34,7 +34,7 @@ Example Playbook local_image_path: "~/images/" kvm_host: name: kvm - ip: 192.168.1.117 + ansible_host: 192.168.1.117 ansible_user: vagrant ansible_ssh_private_key_file: ~/.ssh/id_rsa.pub @@ -42,7 +42,7 @@ Example Playbook - name: Add host to inventory ansible.builtin.add_host: name: "{{ kvm_host.name }}" - ansible_host: "{{ kvm_host.ip }}" + ansible_host: "{{ kvm_host.ansible_host }}" ansible_user: "{{ kvm_host.ansible_user }}" ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} groups: "libvirt" @@ -61,7 +61,7 @@ License GNU General Public License v3.0 or later -See [LICENCE](https://github.com/ansible-collections/cloud.azure_roles/blob/main/LICENSE) to see the full text. +See [LICENCE](https://github.com/ansible-collections/cloud.aws_ops/blob/main/LICENSE) to see the full text. Author Information ------------------ diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md index c06422ae..847eba58 100644 --- a/roles/import_image_and_run_aws_instance/README.md +++ b/roles/import_image_and_run_aws_instance/README.md @@ -1,4 +1,4 @@ -Role Name +import_image_and_run_aws_instance ========= A role that imports a local .raw image into an Amazon Machine Image (AMI) and run an AWS EC2 instance. @@ -42,7 +42,14 @@ Role Variables * **import_image_and_run_aws_instances_keypair_name**: The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. * **import_image_and_run_aws_instance_security_groups**: A list of security group IDs or names to associate to the EC2 instance. * **import_image_and_run_aws_instance_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will choose the default zone of the default VPC. -* **import_image_and_run_aws_instance_volumes**: A dictionary of a block device mappings, by default this will always use the AMI root device so the **import_image_and_run_aws_instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys _device_name_, _ebs.volume_type_, _ebs.volume_size_, _ebs.kms_key_id_, _ebs.iops_, and _ebs.delete_on_termination_. +* **import_image_and_run_aws_instance_volumes**: A dictionary of a block device mappings, by default this will always use the AMI root device so the **import_image_and_run_aws_instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: + * **device_name** (str): The device name (for example, /dev/sdh or xvdh). + * **ebs** (dict): Parameters used to automatically set up EBS volumes when the instance is launched. + * **volume_type** (str): The volume type. Valid Values: standard, io1, io2, gp2, sc1, st1, gp3. + * **volume_size** (int): The size of the volume, in GiBs. + * **kms_key_id** (str): Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted. + * **iops** (str): The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. + * **delete_on_termination_** (bool): Indicates whether the EBS volume is deleted on instance termination. Dependencies ------------ @@ -64,7 +71,7 @@ This role can be used together with the [cloud.aws_ops.clone_on_prem_vm](../clon local_image_path: "~/images/" kvm_host: name: kvm - ip: 192.168.1.117 + ansible_host: 192.168.1.117 ansible_user: vagrant ansible_ssh_private_key_file: ~/.ssh/id_rsa.pub instance_type: "t2.micro" @@ -74,7 +81,7 @@ This role can be used together with the [cloud.aws_ops.clone_on_prem_vm](../clon - name: Add host to inventory ansible.builtin.add_host: name: "{{ kvm_host.name }}" - ansible_host: "{{ kvm_host.ip }}" + ansible_host: "{{ kvm_host.ansible_host }}" ansible_user: "{{ kvm_host.ansible_user }}" ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} groups: "libvirt" @@ -103,7 +110,7 @@ License GNU General Public License v3.0 or later -See [LICENCE](https://github.com/ansible-collections/cloud.azure_roles/blob/main/LICENSE) to see the full text. +See [LICENCE](https://github.com/ansible-collections/cloud.aws_ops/blob/main/LICENSE) to see the full text. Author Information ------------------ From b419c736d8c0b6574bd9c933a072961b9f816914 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 22 Sep 2023 12:53:07 +0200 Subject: [PATCH 07/26] Fix wording Signed-off-by: Alina Buzachis --- roles/import_image_and_run_aws_instance/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/import_image_and_run_aws_instance/tasks/main.yml b/roles/import_image_and_run_aws_instance/tasks/main.yml index ae549eee..0557c9b6 100644 --- a/roles/import_image_and_run_aws_instance/tasks/main.yml +++ b/roles/import_image_and_run_aws_instance/tasks/main.yml @@ -11,12 +11,12 @@ - name: Fail when 'import_image_and_run_aws_instance_image_path' is undefined ansible.builtin.fail: - msg: S3 bucket name to host the .raw image must be defined as import_image_and_run_aws_instance_image_path + msg: The .raw image path must be defined as import_image_and_run_aws_instance_image_path when: import_image_and_run_aws_instance_image_path is undefined - name: Fail when 'import_image_and_run_aws_instance_instance_name' is undefined ansible.builtin.fail: - msg: The name to assign the to AWS EC2 instance must be defined as import_image_and_run_aws_instance_instance_name + msg: The name to assign to the AWS EC2 instance must be defined as import_image_and_run_aws_instance_instance_name when: import_image_and_run_aws_instance_instance_name is undefined - name: Get information about the S3 bucket From 06fb898b8a58b46bdf6eff2ad16f611a60e1648d Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 22 Sep 2023 14:28:39 +0200 Subject: [PATCH 08/26] Update README.md --- roles/import_image_and_run_aws_instance/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md index 847eba58..51d4fdec 100644 --- a/roles/import_image_and_run_aws_instance/README.md +++ b/roles/import_image_and_run_aws_instance/README.md @@ -34,7 +34,7 @@ AWS User Account with the following permissions: Role Variables -------------- -* **import_image_and_run_aws_instance_import_image_task_name**: (Required) The name you want to assign to the AWS EC2 import image task. +* **import_image_and_run_aws_instance_import_image_task_name**: (Required) The name you want to assign to the AWS EC2 import image task. * **import_image_and_run_aws_instance_bucket_name**: (Required) The name of the S3 bucket name where you want to upload the .raw image. **import_image_and_run_aws_instance_image_path**: (Required) The path where the .raw image is stored. * **import_image_and_run_aws_instance_instance_name**: (Required) The name of the EC2 instance you want to create using the imported AMI. From 323d0c9b314034e2f230ce0a15fa027c53f26d64 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 22 Sep 2023 16:26:03 +0200 Subject: [PATCH 09/26] Update README.md --- roles/import_image_and_run_aws_instance/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md index 51d4fdec..c0a49b48 100644 --- a/roles/import_image_and_run_aws_instance/README.md +++ b/roles/import_image_and_run_aws_instance/README.md @@ -38,7 +38,7 @@ Role Variables * **import_image_and_run_aws_instance_bucket_name**: (Required) The name of the S3 bucket name where you want to upload the .raw image. **import_image_and_run_aws_instance_image_path**: (Required) The path where the .raw image is stored. * **import_image_and_run_aws_instance_instance_name**: (Required) The name of the EC2 instance you want to create using the imported AMI. -* **move_vm_from_on_prem_to_aws_instance_type**: The EC2 instance type you want to use. Default: "t2.micro". +* **import_image_and_run_aws_instance_instance_type**: The EC2 instance type you want to use. Default: "t2.micro". * **import_image_and_run_aws_instances_keypair_name**: The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. * **import_image_and_run_aws_instance_security_groups**: A list of security group IDs or names to associate to the EC2 instance. * **import_image_and_run_aws_instance_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will choose the default zone of the default VPC. From 0245bb947fbab8038b1f80d933061fa34f02d965 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 26 Sep 2023 22:24:59 +0200 Subject: [PATCH 10/26] Doc fixes Signed-off-by: Alina Buzachis --- .../move_vm_from_on_prem_to_aws/README.md | 56 ++++++++++--------- .../move_vm_from_on_prem_to_aws.yml | 23 +++++--- .../move_vm_from_on_prem_to_aws/vars/main.yml | 14 ----- .../README.md | 2 +- 4 files changed, 46 insertions(+), 49 deletions(-) delete mode 100644 playbooks/move_vm_from_on_prem_to_aws/vars/main.yml diff --git a/playbooks/move_vm_from_on_prem_to_aws/README.md b/playbooks/move_vm_from_on_prem_to_aws/README.md index 6768b717..350d506f 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/README.md +++ b/playbooks/move_vm_from_on_prem_to_aws/README.md @@ -1,9 +1,13 @@ -# cloud.aws_ops.import_image_and_run_aws_instance playbooks +# cloud.aws_ops.move_vm_from_on_prem_to_aws playbooks A playbook to migrate an existing on prem VM running on KVM hypervisor to AWS. ## Requirements +**qemu** and **qemu-img** packages installed. + +The ``cloud.gcp_ops.clone_one_prem_vm`` requires privilege escalation because the .qcow2 file created by ``virt-clone`` is owned by root and ``qemu-img convert`` requires access to convert it to .raw. + VM Import requires a role to perform certain operations on your behalf. You must create a service role named vmimport with a trust relationship policy document that allows VM Import to assume the role, and you must attach an IAM policy to the role. AWS User Account with the following permissions: @@ -31,12 +35,20 @@ AWS User Account with the following permissions: ## Playbook Variables +* **kvm_host** (dict): Information about the host running the KVM hypervisr that are dynamically added to the inventory. + * **name**: This is a user-defined name for the host you are adding to the inventory. + * **ansible_host**: This variable specifies the hostname or IP address of the host you are adding to the inventory. + * **ansible_user**: This variable specifies the SSH username that Ansible should use when connecting to the host. + * **ansible_ssh_private_key_file** This variable specifies the path to the SSH private key file that Ansible should use for authentication when connecting to the host. + * **groups** This variable enabled you to assign the newly added host to one or more groups in the inventory. + ### Needed for the cloud.aws_ops.clone_on_prem_vm role -* **on_prem_source_vm_name** (str): (Required) The name of the on-prem VM you want to clone. -* **on_prem_vm_clone_name** (str): (Optional) The name you want to call the cloned image. If not set, the **on_prem_vm_clone_name** will be used with a _-clone_ suffix. -* **uri** (str): (Optional) Libvirt connection uri. Default: "qemu:///system". -* **overwrite_clone** (bool): (Optional) Weather to overwrite or not an already existing on prem VM clone. Default: true. +* **clone_on_prem_vm_source_vm_name** (str): (Required) The name of the on-prem VM you want to clone. +* **clone_on_prem_vm_image_name** (str): (Optional) The name you want to call the cloned image. If not set, the **on_prem_vm_clone_name** will be used with a _-clone_ suffix. +* **clone_on_prem_vm_uri** (str): (Optional) Libvirt connection uri. Default: "qemu:///system". +* **clone_on_prem_vm_overwrite** (bool): (Optional) Weather to overwrite or not an already existing on prem VM clone. Default: true. +* **clone_on_prem_vm_local_image_path** (str): (Optional) The path where you would like to save the image. If the path does not exists on localhost, the role will create it. If this parameter is not set, the role will save the image in a _~/tmp_ folder. ### Needed for the cloud.aws_ops.import_image_and_run_aws_instance role @@ -44,15 +56,15 @@ AWS User Account with the following permissions: * **aws_secret_key** (str): (Required) AWS secret access key for user account with the above permissions * **aws_region** (str): (Required) AWS region in which to run the EC2 instance * **security_token** (str): (Optional) Security token for AWS session authentication -* **s3_bucket_name** (str): (Required) The name of the S3 bucket name where you want to upload the .raw image. It must exist in the region the instance is created. -* **import_task_name** (str): (Required) The name you want to assign to the AWS EC2 import image task. -* **image_path** (str): (Required) The path where the .raw image is stored. -* **instance_name** (str): (Required) The name of the EC2 instance you want to create using the imported AMI. -* **instance_type** (str): (Optional) The EC2 instance type you want to use. Default: "t2.micro". -* **keypair_name** (str): (Optional) The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. -* **security_groups** (list): (Optional) A list of security group IDs or names to assiciate to the EC2 instance. -* **vpc_subnet_id** (str): (Optional) The subnet ID in which to launch the EC2 instance instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. -* **instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: +* **import_image_and_run_aws_instance_bucket_name** (str): (Required) The name of the S3 bucket name where you want to upload the .raw image. It must exist in the region the instance is created. +* **import_image_and_run_aws_instance_import_image_task_name** (str): (Required) The name you want to assign to the AWS EC2 import image task. +* **import_image_and_run_aws_instance_image_path** (str): (Required) The path where the .raw image is stored. +* **import_image_and_run_aws_instance_instance_name** (str): (Required) The name of the EC2 instance you want to create using the imported AMI. +* **import_image_and_run_aws_instance_instance_type** (str): (Optional) The EC2 instance type you want to use. Default: "t2.micro". +* **import_image_and_run_aws_instances_keypair_name** (str): (Optional) The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. +* **import_image_and_run_aws_instance_security_groups** (list): (Optional) A list of security group IDs or names to assiciate to the EC2 instance. +* **import_image_and_run_aws_instance_vpc_subnet_id** (str): (Optional) The subnet ID in which to launch the EC2 instance instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. +* **import_image_and_run_aws_instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **import_image_and_run_aws_instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: * **device_name** (str): The device name (for example, /dev/sdh or xvdh). * **ebs** (dict): Parameters used to automatically set up EBS volumes when the instance is launched. * **volume_type** (str): The volume type. Valid Values: standard, io1, io2, gp2, sc1, st1, gp3. @@ -60,14 +72,6 @@ AWS User Account with the following permissions: * **kms_key_id** (str): Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted. * **iops** (str): The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. * **delete_on_termination_** (bool): Indicates whether the EBS volume is deleted on instance termination. -* **kvm_host** (dict): Information about the host running the KVM hypervisr that are dynamically added to the inventory. - * **volume_size** This variable enabled you to assign the newly added host to one or more groups in the inventory. -* **kvm_host** (dict): Information about the host running the KVM hypervisr that are dynamically added to the inventory. - * **name**: This is a user-defined name for the host you are adding to the inventory. - * **ansible_host**: This variable specifies the hostname or IP address of the host you are adding to the inventory. - * **ansible_user**: This variable specifies the SSH username that Ansible should use when connecting to the host. - * **ansible_ssh_private_key_file** This variable specifies the path to the SSH private key file that Ansible should use for authentication when connecting to the host. - * **groups** This variable enabled you to assign the newly added host to one or more groups in the inventory. ## Example Usage @@ -79,8 +83,10 @@ aws_secret_key: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" aws_region: "us-east-1" ``` -To migrate an existing on prem VM running on KVM hypervisor to AWS, run: +All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars/main.yml`` file. + +Run the playbook: -```bash -ansible-playbook move_vm_from_on_prem_to_aws.yml -e "@credentials.yaml" +```shell +ansible-playbook cloud.gcp_ops.move_vm_from_on_prem_to_aws -e "@credentials.yaml" ``` diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index f2187fd1..afff7f71 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -25,18 +25,23 @@ ansible.builtin.import_role: name: cloud.aws_ops.clone_on_prem_vm vars: - clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" - clone_on_prem_vm_dest_vm_name: "{{ on_prem_vm_clone_name }}" - clone_on_prem_vm_uri: "{{ uri }}" - clone_on_prem_vm_local_image_path: "{{ local_image_path }}" + clone_on_prem_vm_source_vm_name: "{{ clone_on_prem_vm_source_vm_name }}" + clone_on_prem_vm_dest_vm_name: "{{ clone_on_prem_vm_dest_vm_name }}" + clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" + clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" + clone_on_prem_vm_overwrite: "{{ clone_on_prem_vm_overwrite }}" delegate_to: kvm - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role ansible.builtin.import_role: name: cloud.aws_ops.import_image_and_run_aws_instance vars: - import_image_and_run_aws_instance_bucket_name: "{{ s3_bucket_name }}" - import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_local_image_path }}" - import_image_and_run_aws_instance_instance_name: "{{ instance_name }}" - import_image_and_run_aws_instance_instance_type: "{{ instance_type }}" - import_image_and_run_aws_instance_import_image_task_name: "{{ import_task_name }}" + import_image_and_run_aws_instance_bucket_name: "{{ import_image_and_run_aws_instance_bucket_name }}" + import_image_and_run_aws_instance_image_path: "{{ import_image_and_run_aws_instance_image_path }}" + import_image_and_run_aws_instance_instance_name: "{{ import_image_and_run_aws_instance_instance_name }}" + import_image_and_run_aws_instance_instance_type: "{{ import_image_and_run_aws_instance_instance_type }}" + import_image_and_run_aws_instance_import_image_task_name: "{{ import_image_and_run_aws_instance_import_image_task_name }}" + import_image_and_run_aws_instances_keypair_name: "{{ import_image_and_run_aws_instances_keypair_name }}" + import_image_and_run_aws_instance_security_groups: "{{ import_image_and_run_aws_instance_security_groups }}" + import_image_and_run_aws_instance_vpc_subnet_id: "{{ import_image_and_run_aws_instance_vpc_subnet_id }}" + import_image_and_run_aws_instance_volumes: "{{ import_image_and_run_aws_instance_volumes }}" diff --git a/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml b/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml deleted file mode 100644 index 17f3c0ca..00000000 --- a/playbooks/move_vm_from_on_prem_to_aws/vars/main.yml +++ /dev/null @@ -1,14 +0,0 @@ -on_prem_source_vm_name: "ubuntu-guest" -on_prem_vm_clone_name: "ubuntu-guest-clone" -s3_bucket_name: "clone-vm-s3-bucket" -instance_name: "ubuntu-vm-clone" -local_image_path: "~/images" -import_task_name: "import-clone" -instance_type: "t2.micro" -uri: "qemu:///system" -kvm_host: - name: kvm - ansible_host: 192.168.1.117 - ansible_user: vagrant - ansible_ssh_private_key_file: ~/.ssh/id_rsa.pub - groups: "libvirt" diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md index c0a49b48..0dcf0735 100644 --- a/roles/import_image_and_run_aws_instance/README.md +++ b/roles/import_image_and_run_aws_instance/README.md @@ -42,7 +42,7 @@ Role Variables * **import_image_and_run_aws_instances_keypair_name**: The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. * **import_image_and_run_aws_instance_security_groups**: A list of security group IDs or names to associate to the EC2 instance. * **import_image_and_run_aws_instance_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will choose the default zone of the default VPC. -* **import_image_and_run_aws_instance_volumes**: A dictionary of a block device mappings, by default this will always use the AMI root device so the **import_image_and_run_aws_instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: +**import_image_and_run_aws_instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: * **device_name** (str): The device name (for example, /dev/sdh or xvdh). * **ebs** (dict): Parameters used to automatically set up EBS volumes when the instance is launched. * **volume_type** (str): The volume type. Valid Values: standard, io1, io2, gp2, sc1, st1, gp3. From 7a5eddd5e3cf01b71595eadc5cd9bf0e10e31875 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 5 Oct 2023 16:14:10 +0200 Subject: [PATCH 11/26] Update upon review Signed-off-by: Alina Buzachis --- .../move_vm_from_on_prem_to_aws/README.md | 4 +-- .../move_vm_from_on_prem_to_aws.yml | 2 +- roles/clone_on_prem_vm/README.md | 10 +++--- roles/clone_on_prem_vm/tasks/main.yml | 6 ++-- .../README.md | 4 +-- .../tasks/main.yml | 33 +++++++++++-------- 6 files changed, 32 insertions(+), 27 deletions(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/README.md b/playbooks/move_vm_from_on_prem_to_aws/README.md index 350d506f..44f7105d 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/README.md +++ b/playbooks/move_vm_from_on_prem_to_aws/README.md @@ -45,9 +45,9 @@ AWS User Account with the following permissions: ### Needed for the cloud.aws_ops.clone_on_prem_vm role * **clone_on_prem_vm_source_vm_name** (str): (Required) The name of the on-prem VM you want to clone. -* **clone_on_prem_vm_image_name** (str): (Optional) The name you want to call the cloned image. If not set, the **on_prem_vm_clone_name** will be used with a _-clone_ suffix. +* **clone_on_prem_vm_image_name** (str): (Optional) The name you want to call the cloned image. If not set, the **clone_on_prem_vm_source_vm_name** will be used with a _-clone_ suffix. * **clone_on_prem_vm_uri** (str): (Optional) Libvirt connection uri. Default: "qemu:///system". -* **clone_on_prem_vm_overwrite** (bool): (Optional) Weather to overwrite or not an already existing on prem VM clone. Default: true. +* **clone_on_prem_vm_overwrite** (bool): (Optional) Whether to overwrite or not an already existing on prem VM clone. Default: true. * **clone_on_prem_vm_local_image_path** (str): (Optional) The path where you would like to save the image. If the path does not exists on localhost, the role will create it. If this parameter is not set, the role will save the image in a _~/tmp_ folder. ### Needed for the cloud.aws_ops.import_image_and_run_aws_instance role diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index afff7f71..9768840c 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -26,7 +26,7 @@ name: cloud.aws_ops.clone_on_prem_vm vars: clone_on_prem_vm_source_vm_name: "{{ clone_on_prem_vm_source_vm_name }}" - clone_on_prem_vm_dest_vm_name: "{{ clone_on_prem_vm_dest_vm_name }}" + clone_on_prem_vm_image_name: "{{ clone_on_prem_vm_image_name }}" clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" clone_on_prem_vm_overwrite: "{{ clone_on_prem_vm_overwrite }}" diff --git a/roles/clone_on_prem_vm/README.md b/roles/clone_on_prem_vm/README.md index ed99eedc..988a5d90 100644 --- a/roles/clone_on_prem_vm/README.md +++ b/roles/clone_on_prem_vm/README.md @@ -12,10 +12,10 @@ Role Variables -------------- * **clone_on_prem_vm_source_vm_name**: (Required) The name of the on-prem VM you want to clone. -* **clone_on_prem_vm_image_name**: The name you want to call the cloned image. If not set, the **clone_on_prem_vm_source_vm_name** will be used with a _-clone_ suffix. -* **clone_on_prem_vm_overwrite**: Weather to overwrite or not an already existing on prem VM clone. Default: true. -* **clone_on_prem_vm_local_image_path**: The path where you would like to save the image. If the path does not exists on localhost, the role will create it. If this parameter is not set, the role will save the image in a _~/tmp_ folder. -* **clone_on_prem_vm_uri**: Libvirt connection uri. Default: "qemu:///system". +* **clone_on_prem_vm_image_name**: (Optional) The name you want to call the cloned image. If not set, the **clone_on_prem_vm_source_vm_name** will be used with a _-clone_ suffix. +* **clone_on_prem_vm_overwrite**: (Optional) Whether to overwrite or not an already existing on prem VM clone. Default: true. +* **clone_on_prem_vm_local_image_path**: (Optional) The path where you would like to save the image. If the path does not exists on localhost, the role will create it. If this parameter is not set, the role will save the image in a _~/tmp_ folder. +* **clone_on_prem_vm_uri**: (Optional) Libvirt connection uri. Default: "qemu:///system". Dependencies ------------ @@ -52,7 +52,7 @@ Example Playbook name: cloud.aws_ops.clone_on_prem_vm vars: clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" - clone_on_prem_vm_dest_image_name: "{{ on_prem_vm_image_name }}" + clone_on_prem_vm_image_name: "{{ on_prem_vm_image_name }}" clone_on_prem_vm_local_image_path: "{{ local_image_path }}" delegate_to: kvm diff --git a/roles/clone_on_prem_vm/tasks/main.yml b/roles/clone_on_prem_vm/tasks/main.yml index 6489982c..d5d7ac5e 100644 --- a/roles/clone_on_prem_vm/tasks/main.yml +++ b/roles/clone_on_prem_vm/tasks/main.yml @@ -71,8 +71,8 @@ - name: Set 'clone_on_prem_vm__clone_path' and 'clone_on_prem_vm__raw_image_path' ansible.builtin.set_fact: - clone_on_prem_vm__clone_path: "{{ clone_on_prem_vm__tmpdir.path }}/{{ clone_on_prem_vm_dest_vm_name }}.qcow2" - clone_on_prem_vm__raw_image_path: "{{ clone_on_prem_vm__tmpdir.path }}/{{ clone_on_prem_vm_dest_vm_name }}.raw" + clone_on_prem_vm__clone_path: "{{ clone_on_prem_vm__tmpdir.path }}/{{ clone_on_prem_vm_image_name }}.qcow2" + clone_on_prem_vm__raw_image_path: "{{ clone_on_prem_vm__tmpdir.path }}/{{ clone_on_prem_vm_image_name }}.raw" - name: Cloning {{ clone_on_prem_vm_source_vm_name }} on prem VM ansible.builtin.command: | @@ -87,7 +87,7 @@ path: "{{ clone_on_prem_vm__clone_path }}" register: clone_on_prem_vm__clone_info -# Priviledge escalation is needed because the .qcow2 file is owned by root +# Privilege escalation is needed because the .qcow2 file is owned by root # when default hypervisor is used - name: Convert qcow2 to raw using qemu-img with priviledge escalation ansible.builtin.command: | diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md index 0dcf0735..638e4c83 100644 --- a/roles/import_image_and_run_aws_instance/README.md +++ b/roles/import_image_and_run_aws_instance/README.md @@ -58,7 +58,7 @@ Dependencies Example Playbook ---------------- -This role can be used together with the [cloud.aws_ops.clone_on_prem_vm](../clone_on_prem_vm/README.md) role as shown below. If you wish to use it separately, just ensure you set the **clone_on_prem_vm_local_image_path** parameter. +This role can be used together with the [cloud.aws_ops.clone_on_prem_vm](../clone_on_prem_vm/README.md) role as shown below. - hosts: localhost gather_facts: false @@ -91,7 +91,7 @@ This role can be used together with the [cloud.aws_ops.clone_on_prem_vm](../clon name: cloud.aws_ops.clone_on_prem_vm vars: clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" - clone_on_prem_vm_dest_image_name: "{{ on_prem_vm_image_name }}" + clone_on_prem_vm_image_name: "{{ on_prem_vm_image_name }}" clone_on_prem_vm_local_image_path: "{{ local_image_path }}" delegate_to: kvm diff --git a/roles/import_image_and_run_aws_instance/tasks/main.yml b/roles/import_image_and_run_aws_instance/tasks/main.yml index 0557c9b6..8d1f9655 100644 --- a/roles/import_image_and_run_aws_instance/tasks/main.yml +++ b/roles/import_image_and_run_aws_instance/tasks/main.yml @@ -19,16 +19,33 @@ msg: The name to assign to the AWS EC2 instance must be defined as import_image_and_run_aws_instance_instance_name when: import_image_and_run_aws_instance_instance_name is undefined + - name: Fail when 'import_image_and_run_aws_instance_import_image_task_name' is undefined + ansible.builtin.fail: + msg: The name of the EC2 import image task must be defined as import_image_and_run_aws_instance_import_image_task_name + when: import_image_and_run_aws_instance_import_image_task_name is undefined + - name: Get information about the S3 bucket community.aws.s3_bucket_info: name: "{{ import_image_and_run_aws_instance_bucket_name }}" register: import_image_and_run_aws_instance__bucket_info - - name: Fail when S3 bucket name does not exist + - name: Fail when S3 bucket does not exist ansible.builtin.fail: - msg: "The on prem VM {{ import_image_and_run_aws_instance_bucket_name }} does not exist." + msg: "The S3 bucket {{ import_image_and_run_aws_instance_bucket_name }} does not exist." when: import_image_and_run_aws_instance__bucket_info.buckets | length == 0 + - name: Check if an instance with the specified name already exists + amazon.aws.ec2_instance_info: + filters: + "tag:Name": "{{ import_image_and_run_aws_instance_instance_name }}" + instance-state-name: [ "running", "pending", "shutting-down", "stopping", "stopped"] + register: import_image_and_run_aws_instance__ec2_instance_info + + - name: Fail when an instance with the specified name already exists + ansible.builtin.fail: + msg: "An EC2 instance with name {{ import_image_and_run_aws_instance_instance_name }} already exists" + when: import_image_and_run_aws_instance__ec2_instance_info.instances | length == 1 + - name: Set 'import_image_and_run_aws_instance__s3_object_key' variable ansible.builtin.set_fact: import_image_and_run_aws_instance__s3_object_key: "{{ import_image_and_run_aws_instance_bucket_name }}/{{ import_image_and_run_aws_instance_instance_name }}.raw" @@ -69,18 +86,6 @@ import_image_and_run_aws_instance__ami_id: "{{ import_image_and_run_aws_instance__import_image_info.import_image[0].image_id }}" import_image_and_run_aws_instance__snapshot_id: "{{ import_image_and_run_aws_instance__import_image_info.import_image[0].snapshot_details[0].snapshot_id }}" - - name: Check if an instance with the specified name alrerady exists - amazon.aws.ec2_instance_info: - filters: - "tag:Name": "{{ import_image_and_run_aws_instance_instance_name }}" - instance-state-name: [ "running", "pending", "shutting-down", "stopping", "stopped"] - register: import_image_and_run_aws_instance__ec2_instance_info - - - name: Fail when an instance with the specified name already exists - ansible.builtin.fail: - msg: "An EC2 instance with name {{ import_image_and_run_aws_instance_instance_name }} already exists" - when: import_image_and_run_aws_instance__ec2_instance_info.instances | length == 1 - - name: Start EC2 instance amazon.aws.ec2_instance: name: "{{ import_image_and_run_aws_instance_instance_name }}" From 6ce9958125421014935aeae9f39fba5ad24af20c Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Mon, 9 Oct 2023 15:40:31 +0200 Subject: [PATCH 12/26] Apply suggestions Signed-off-by: Alina Buzachis --- .../move_vm_from_on_prem_to_aws/README.md | 83 ++++--------------- .../move_vm_from_on_prem_to_aws.yml | 2 +- 2 files changed, 18 insertions(+), 67 deletions(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/README.md b/playbooks/move_vm_from_on_prem_to_aws/README.md index 44f7105d..4d6b1260 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/README.md +++ b/playbooks/move_vm_from_on_prem_to_aws/README.md @@ -4,78 +4,16 @@ A playbook to migrate an existing on prem VM running on KVM hypervisor to AWS. ## Requirements -**qemu** and **qemu-img** packages installed. +This playbook uses the ``cloud.aws_ops.clone_on_prem_vm`` role to clone an existing VM on prem using the KVM hypervisor and the ``cloud.aws_ops.import_image_and_run_aws_instance`` role to import a local .raw image into an Amazon machine image (AMI) and run an AWS EC2 instance. For a complete list of requirements, see [clone_on_prem_vm](../clone_on_prem_vm/README.md#Requirements) and [import_image_and_run_aws_instance](../roles/import_image_and_run_aws_instance/REAME.md#Requirements), respectively. -The ``cloud.gcp_ops.clone_one_prem_vm`` requires privilege escalation because the .qcow2 file created by ``virt-clone`` is owned by root and ``qemu-img convert`` requires access to convert it to .raw. - -VM Import requires a role to perform certain operations on your behalf. You must create a service role named vmimport with a trust relationship policy document that allows VM Import to assume the role, and you must attach an IAM policy to the role. - -AWS User Account with the following permissions: -* s3:GetBucketLocation -* s3:GetObject -* s3:ListBucket -* s3:GetBucketLocation -* s3:GetObject -* s3:ListBucket -* s3:PutObject -* s3:GetBucketAcl -* ec2:ModifySnapshotAttribute -* ec2:CopySnapshot -* ec2:RegisterImage -* ec2:Describe* -* ec2:RunInstances - -(Optional) To import resources encrypted using an AWS KMS key from AWS Key Management Service, add the following permissions: -* kms:CreateGrant -* kms:Decrypt -* kms:DescribeKey -* kms:Encrypt -* kms:GenerateDataKey* -* kms:ReEncrypt* ## Playbook Variables -* **kvm_host** (dict): Information about the host running the KVM hypervisr that are dynamically added to the inventory. - * **name**: This is a user-defined name for the host you are adding to the inventory. - * **ansible_host**: This variable specifies the hostname or IP address of the host you are adding to the inventory. - * **ansible_user**: This variable specifies the SSH username that Ansible should use when connecting to the host. - * **ansible_ssh_private_key_file** This variable specifies the path to the SSH private key file that Ansible should use for authentication when connecting to the host. - * **groups** This variable enabled you to assign the newly added host to one or more groups in the inventory. - -### Needed for the cloud.aws_ops.clone_on_prem_vm role - -* **clone_on_prem_vm_source_vm_name** (str): (Required) The name of the on-prem VM you want to clone. -* **clone_on_prem_vm_image_name** (str): (Optional) The name you want to call the cloned image. If not set, the **clone_on_prem_vm_source_vm_name** will be used with a _-clone_ suffix. -* **clone_on_prem_vm_uri** (str): (Optional) Libvirt connection uri. Default: "qemu:///system". -* **clone_on_prem_vm_overwrite** (bool): (Optional) Whether to overwrite or not an already existing on prem VM clone. Default: true. -* **clone_on_prem_vm_local_image_path** (str): (Optional) The path where you would like to save the image. If the path does not exists on localhost, the role will create it. If this parameter is not set, the role will save the image in a _~/tmp_ folder. - -### Needed for the cloud.aws_ops.import_image_and_run_aws_instance role - -* **aws_access_key** (str): (Required) AWS access key ID for user account with the above permissions -* **aws_secret_key** (str): (Required) AWS secret access key for user account with the above permissions -* **aws_region** (str): (Required) AWS region in which to run the EC2 instance -* **security_token** (str): (Optional) Security token for AWS session authentication -* **import_image_and_run_aws_instance_bucket_name** (str): (Required) The name of the S3 bucket name where you want to upload the .raw image. It must exist in the region the instance is created. -* **import_image_and_run_aws_instance_import_image_task_name** (str): (Required) The name you want to assign to the AWS EC2 import image task. -* **import_image_and_run_aws_instance_image_path** (str): (Required) The path where the .raw image is stored. -* **import_image_and_run_aws_instance_instance_name** (str): (Required) The name of the EC2 instance you want to create using the imported AMI. -* **import_image_and_run_aws_instance_instance_type** (str): (Optional) The EC2 instance type you want to use. Default: "t2.micro". -* **import_image_and_run_aws_instances_keypair_name** (str): (Optional) The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. -* **import_image_and_run_aws_instance_security_groups** (list): (Optional) A list of security group IDs or names to assiciate to the EC2 instance. -* **import_image_and_run_aws_instance_vpc_subnet_id** (str): (Optional) The subnet ID in which to launch the EC2 instance instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. -* **import_image_and_run_aws_instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **import_image_and_run_aws_instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: - * **device_name** (str): The device name (for example, /dev/sdh or xvdh). - * **ebs** (dict): Parameters used to automatically set up EBS volumes when the instance is launched. - * **volume_type** (str): The volume type. Valid Values: standard, io1, io2, gp2, sc1, st1, gp3. - * **volume_size** (int): The size of the volume, in GiBs. - * **kms_key_id** (str): Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted. - * **iops** (str): The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. - * **delete_on_termination_** (bool): Indicates whether the EBS volume is deleted on instance termination. +For a fullo list of accepted variables see: [clone_on_prem_vm](../clone_on_prem_vm/README.md#Role-Variables) and respectively [import_image_and_run_aws_instance](../roles/import_image_and_run_aws_instance/REAME.md#Role-Variables). ## Example Usage -Create a `credentials.yaml` file with the folling contents: +Create a `credentials.yml` file with the folling contents: ```yaml aws_access_key: "xxxxxxxxxxxxxxxxxxxx" @@ -83,10 +21,23 @@ aws_secret_key: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" aws_region: "us-east-1" ``` +Create an `inventory.yml` file with information about the host running the KVM hypervisor. + +```yaml +--- +all: + hosts: + kvm: + ansible_host: myhost + ansible_user: myuser + ansible_ssh_private_key_file: /path/to/private_key + groups: mygroup +``` + All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars/main.yml`` file. Run the playbook: ```shell -ansible-playbook cloud.gcp_ops.move_vm_from_on_prem_to_aws -e "@credentials.yaml" +ansible-playbook cloud.gcp_ops.move_vm_from_on_prem_to_aws -e "@credentials.yml - i inventory.yml" ``` diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index 9768840c..0b81b666 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -30,7 +30,7 @@ clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" clone_on_prem_vm_overwrite: "{{ clone_on_prem_vm_overwrite }}" - delegate_to: kvm + delegate_to: all - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role ansible.builtin.import_role: From e46f8c6d12997e35f34828cafc62b2b4d0577c20 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Mon, 9 Oct 2023 15:48:13 +0200 Subject: [PATCH 13/26] Update main.yml --- roles/clone_on_prem_vm/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/clone_on_prem_vm/tasks/main.yml b/roles/clone_on_prem_vm/tasks/main.yml index d5d7ac5e..c79e0ed4 100644 --- a/roles/clone_on_prem_vm/tasks/main.yml +++ b/roles/clone_on_prem_vm/tasks/main.yml @@ -89,7 +89,7 @@ # Privilege escalation is needed because the .qcow2 file is owned by root # when default hypervisor is used -- name: Convert qcow2 to raw using qemu-img with priviledge escalation +- name: Convert qcow2 to raw using qemu-img with privilege escalation ansible.builtin.command: | qemu-img convert -f qcow2 -O raw \ {{ clone_on_prem_vm__clone_path }} \ From 57d40f0710c9beaf47b03e6df73fcb77b741603d Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 10 Oct 2023 16:18:29 +0200 Subject: [PATCH 14/26] Apply reveiw comments Signed-off-by: Alina Buzachis --- playbooks/move_vm_from_on_prem_to_aws/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/README.md b/playbooks/move_vm_from_on_prem_to_aws/README.md index 4d6b1260..8f15712d 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/README.md +++ b/playbooks/move_vm_from_on_prem_to_aws/README.md @@ -9,7 +9,7 @@ This playbook uses the ``cloud.aws_ops.clone_on_prem_vm`` role to clone an exist ## Playbook Variables -For a fullo list of accepted variables see: [clone_on_prem_vm](../clone_on_prem_vm/README.md#Role-Variables) and respectively [import_image_and_run_aws_instance](../roles/import_image_and_run_aws_instance/REAME.md#Role-Variables). +For a full list of accepted variables see: [clone_on_prem_vm](../clone_on_prem_vm/README.md#Role-Variables) and respectively [import_image_and_run_aws_instance](../roles/import_image_and_run_aws_instance/REAME.md#Role-Variables). ## Example Usage @@ -39,5 +39,5 @@ All the variables defined in section ``Playbook Variables`` can be defined insid Run the playbook: ```shell -ansible-playbook cloud.gcp_ops.move_vm_from_on_prem_to_aws -e "@credentials.yml - i inventory.yml" +ansible-playbook cloud.aws_ops.move_vm_from_on_prem_to_aws -e "@credentials.yml" -i inventory.yml ``` From b7112a80d295542404841bd03b0f0fd1c6fcd16f Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 12 Oct 2023 20:02:45 +0200 Subject: [PATCH 15/26] Update move_vm_from_on_prem_to_aws.yml --- .../move_vm_from_on_prem_to_aws.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index 0b81b666..19019188 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -2,9 +2,6 @@ hosts: localhost gather_facts: false - vars_files: - - vars/main.yml - module_defaults: group/aws: aws_access_key: "{{ aws_access_key | default(omit) }}" From adc7d3045f12f125a4531360bd981c772624904f Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 13 Oct 2023 12:36:35 +0200 Subject: [PATCH 16/26] Update move_vm_from_on_prem_to_aws.yml --- .../move_vm_from_on_prem_to_aws.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index 19019188..f4da2f62 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -10,14 +10,6 @@ region: "{{ aws_region | default('us-east-1') }}" tasks: - - name: Add host to inventory - ansible.builtin.add_host: - name: "{{ kvm_host.name }}" - ansible_host: "{{ kvm_host.ansible_host }}" - ansible_user: "{{ kvm_host.ansible_user }}" - ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} - groups: "{{ kvm_host.groups }}" - - name: Import 'cloud.aws_ops.clone_on_prem_vm' role ansible.builtin.import_role: name: cloud.aws_ops.clone_on_prem_vm From 09e68f61a27b9747a1c1fad2784f40f6f5f68deb Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 13 Oct 2023 12:49:33 +0200 Subject: [PATCH 17/26] Update move_vm_from_on_prem_to_aws.yml --- .../move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index f4da2f62..4658c170 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -19,7 +19,7 @@ clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" clone_on_prem_vm_overwrite: "{{ clone_on_prem_vm_overwrite }}" - delegate_to: all + delegate_to: all_hosts - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role ansible.builtin.import_role: From db70365aebd50f0350f4a5263217893443ed190a Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Fri, 13 Oct 2023 12:51:14 +0200 Subject: [PATCH 18/26] Update move_vm_from_on_prem_to_aws.yml --- .../move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index 4658c170..fbcc5d1b 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -19,7 +19,7 @@ clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" clone_on_prem_vm_overwrite: "{{ clone_on_prem_vm_overwrite }}" - delegate_to: all_hosts + delegate_to: kvm - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role ansible.builtin.import_role: From 4f34046ff7d50b865b2503da47ce8db306a9b1f2 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 17 Oct 2023 15:42:02 +0200 Subject: [PATCH 19/26] Update README.md --- roles/clone_on_prem_vm/README.md | 66 ++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/roles/clone_on_prem_vm/README.md b/roles/clone_on_prem_vm/README.md index 988a5d90..8f9c4903 100644 --- a/roles/clone_on_prem_vm/README.md +++ b/roles/clone_on_prem_vm/README.md @@ -25,36 +25,44 @@ N/A Example Playbook ---------------- - - hosts: localhost - gather_facts: false - +Create an `inventory.yml` file with information about the host running the KVM hypervisor. + +```yaml +--- +all: + hosts: + kvm: + ansible_host: myhost + ansible_user: myuser + ansible_ssh_private_key_file: /path/to/private_key + groups: mygroup +``` + +All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars/main.yml`` file. + +Create a ``playbook.ym`` file like this: + +``` +--- +- hosts: kvm + gather_facts: true + + tasks: + - name: Import 'cloud.aws_ops.clone_on_prem_vm' role + ansible.builtin.import_role: + name: cloud.aws_ops.clone_on_prem_vm vars: - on_prem_source_vm_name: "ubuntu-guest" - on_prem_vm_image_name: "ubuntu-guest-image" - local_image_path: "~/images/" - kvm_host: - name: kvm - ansible_host: 192.168.1.117 - ansible_user: vagrant - ansible_ssh_private_key_file: ~/.ssh/id_rsa.pub - - tasks: - - name: Add host to inventory - ansible.builtin.add_host: - name: "{{ kvm_host.name }}" - ansible_host: "{{ kvm_host.ansible_host }}" - ansible_user: "{{ kvm_host.ansible_user }}" - ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} - groups: "libvirt" - - - name: Import 'cloud.aws_ops.clone_on_prem_vm' role - ansible.builtin.import_role: - name: cloud.aws_ops.clone_on_prem_vm - vars: - clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" - clone_on_prem_vm_image_name: "{{ on_prem_vm_image_name }}" - clone_on_prem_vm_local_image_path: "{{ local_image_path }}" - delegate_to: kvm + clone_on_prem_vm_source_vm_name: "{{ clone_on_prem_vm_source_vm_name }}" + clone_on_prem_vm_image_name: "{{ clone_on_prem_vm_image_name }}" + clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" + clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" +``` + +Run the playbook: + +```shell +ansible-playbook playbook.yml -i inventory.yml +``` License ------- From 376986cbba123656f46752333eb55fa61a7b6e0c Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 17 Oct 2023 15:48:59 +0200 Subject: [PATCH 20/26] Update README.md --- .../README.md | 104 ++++++++++-------- 1 file changed, 57 insertions(+), 47 deletions(-) diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md index 638e4c83..6413318e 100644 --- a/roles/import_image_and_run_aws_instance/README.md +++ b/roles/import_image_and_run_aws_instance/README.md @@ -1,5 +1,5 @@ import_image_and_run_aws_instance -========= +================================= A role that imports a local .raw image into an Amazon Machine Image (AMI) and run an AWS EC2 instance. @@ -36,13 +36,13 @@ Role Variables * **import_image_and_run_aws_instance_import_image_task_name**: (Required) The name you want to assign to the AWS EC2 import image task. * **import_image_and_run_aws_instance_bucket_name**: (Required) The name of the S3 bucket name where you want to upload the .raw image. -**import_image_and_run_aws_instance_image_path**: (Required) The path where the .raw image is stored. +* **import_image_and_run_aws_instance_image_path**: (Required) The path where the .raw image is stored. * **import_image_and_run_aws_instance_instance_name**: (Required) The name of the EC2 instance you want to create using the imported AMI. * **import_image_and_run_aws_instance_instance_type**: The EC2 instance type you want to use. Default: "t2.micro". * **import_image_and_run_aws_instances_keypair_name**: The name of the SSH access key to assign to the EC2 instance. It must exist in the region the instance is created. If not set, your default AWS account keypair will be used. * **import_image_and_run_aws_instance_security_groups**: A list of security group IDs or names to associate to the EC2 instance. * **import_image_and_run_aws_instance_vpc_subnet_id**: The subnet ID in which to launch the EC2 instance (VPC). If none is provided, M(amazon.aws.ec2_instance) will choose the default zone of the default VPC. -**import_image_and_run_aws_instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: +* **import_image_and_run_aws_instance_volumes** (dict): (Optional) A dictionary of a block device mappings, by default this will always use the AMI root device so the **instance_volumes** option is primarily for adding more storage. A mapping contains the (optional) keys: * **device_name** (str): The device name (for example, /dev/sdh or xvdh). * **ebs** (dict): Parameters used to automatically set up EBS volumes when the instance is launched. * **volume_type** (str): The volume type. Valid Values: standard, io1, io2, gp2, sc1, st1, gp3. @@ -60,50 +60,60 @@ Example Playbook ---------------- This role can be used together with the [cloud.aws_ops.clone_on_prem_vm](../clone_on_prem_vm/README.md) role as shown below. - - hosts: localhost - gather_facts: false - - vars: - on_prem_source_vm_name: "ubuntu-guest" - on_prem_vm_image_name: "ubuntu-guest-image" - s3_bucket_name: "vm-s3-bucket" - instance_name: "vm-clone" - local_image_path: "~/images/" - kvm_host: - name: kvm - ansible_host: 192.168.1.117 - ansible_user: vagrant - ansible_ssh_private_key_file: ~/.ssh/id_rsa.pub - instance_type: "t2.micro" - import_task_name: "import-clone" - - tasks: - - name: Add host to inventory - ansible.builtin.add_host: - name: "{{ kvm_host.name }}" - ansible_host: "{{ kvm_host.ansible_host }}" - ansible_user: "{{ kvm_host.ansible_user }}" - ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ kvm_host.ansible_ssh_private_key_file }} - groups: "libvirt" - - - name: Import 'cloud.aws_ops.clone_on_prem_vm' role - ansible.builtin.import_role: - name: cloud.aws_ops.clone_on_prem_vm - vars: - clone_on_prem_vm_source_vm_name: "{{ on_prem_source_vm_name }}" - clone_on_prem_vm_image_name: "{{ on_prem_vm_image_name }}" - clone_on_prem_vm_local_image_path: "{{ local_image_path }}" - delegate_to: kvm - - - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role - ansible.builtin.import_role: - name: cloud.aws_ops.import_image_and_run_aws_instance - vars: - import_image_and_run_aws_instance_bucket_name: "{{ s3_bucket_name }}" - import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_local_image_path }}" - import_image_and_run_aws_instance_instance_name: "{{ instance_name }}" - import_image_and_run_aws_instance_instance_type: "{{ instance_type }}" - import_image_and_run_aws_instance_import_image_task_name: "{{ import_task_name }}" +Create an `inventory.yml` file with information about the host running the KVM hypervisor. + +```yaml +--- +all: + hosts: + kvm: + ansible_host: myhost + ansible_user: myuser + ansible_ssh_private_key_file: /path/to/private_key + groups: mygroup +``` + +All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars.yml`` file. + +Create a ``playbook.ym`` file like this: + +``` +--- +- hosts: localhost + gather_facts: false + + tasks: + - name: Import 'cloud.aws_ops.clone_on_prem_vm' role + ansible.builtin.import_role: + name: cloud.aws_ops.clone_on_prem_vm + vars: + clone_on_prem_vm_source_vm_name: "{{ clone_on_prem_vm_source_vm_name }}" + clone_on_prem_vm_image_name: "{{ clone_on_prem_vm_image_name }}" + clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" + clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" + delegate_to: kvm + + - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role + ansible.builtin.import_role: + name: cloud.aws_ops.import_image_and_run_aws_instance + vars: + import_image_and_run_aws_instance_bucket_name: "{{ import_image_and_run_aws_instance_bucket_name }}" + import_image_and_run_aws_instance_image_path: "{{ import_image_and_run_aws_instance_image_path }}" + import_image_and_run_aws_instance_instance_name: "{{ import_image_and_run_aws_instance_instance_name }}" + import_image_and_run_aws_instance_instance_type: "{{ import_image_and_run_aws_instance_instance_type }}" + import_image_and_run_aws_instance_import_image_task_name: "{{ import_image_and_run_aws_instance_import_image_task_name }}" + import_image_and_run_aws_instances_keypair_name: "{{ import_image_and_run_aws_instances_keypair_name }}" + import_image_and_run_aws_instance_security_groups: "{{ import_image_and_run_aws_instance_security_groups }}" + import_image_and_run_aws_instance_vpc_subnet_id: "{{ import_image_and_run_aws_instance_vpc_subnet_id }}" + import_image_and_run_aws_instance_volumes: "{{ import_image_and_run_aws_instance_volumes }}" +``` + +Run the playbook: + +```shell +ansible-playbook playbook.yml -i inventory.yml -e "@vars.yml" +``` + License ------- From 9c7f86da710dc6d11f404a3a8b1873d31b1d42d3 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 17 Oct 2023 15:50:36 +0200 Subject: [PATCH 21/26] Update README.md --- roles/clone_on_prem_vm/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/clone_on_prem_vm/README.md b/roles/clone_on_prem_vm/README.md index 8f9c4903..4d08d3e9 100644 --- a/roles/clone_on_prem_vm/README.md +++ b/roles/clone_on_prem_vm/README.md @@ -38,7 +38,7 @@ all: groups: mygroup ``` -All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars/main.yml`` file. +All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars.yml`` file. Create a ``playbook.ym`` file like this: @@ -61,7 +61,7 @@ Create a ``playbook.ym`` file like this: Run the playbook: ```shell -ansible-playbook playbook.yml -i inventory.yml +ansible-playbook playbook.yml -i inventory.yml -e "@vars.yml" ``` License From 8afe2feddf312ee030a5364f318be2bc2dd22284 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 17 Oct 2023 15:51:17 +0200 Subject: [PATCH 22/26] Update README.md --- playbooks/move_vm_from_on_prem_to_aws/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/README.md b/playbooks/move_vm_from_on_prem_to_aws/README.md index 8f15712d..9dd9ea68 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/README.md +++ b/playbooks/move_vm_from_on_prem_to_aws/README.md @@ -34,10 +34,10 @@ all: groups: mygroup ``` -All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars/main.yml`` file. +All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars.yml`` file. Run the playbook: ```shell -ansible-playbook cloud.aws_ops.move_vm_from_on_prem_to_aws -e "@credentials.yml" -i inventory.yml +ansible-playbook cloud.aws_ops.move_vm_from_on_prem_to_aws -e "@credentials.yml" -e "@vars.yml" -i inventory.yml ``` From ecb43f6121114c0f71c446ab89a8f687b7284855 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 17 Oct 2023 15:55:00 +0200 Subject: [PATCH 23/26] Update main.yml --- roles/clone_on_prem_vm/tasks/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/clone_on_prem_vm/tasks/main.yml b/roles/clone_on_prem_vm/tasks/main.yml index c79e0ed4..65a9c463 100644 --- a/roles/clone_on_prem_vm/tasks/main.yml +++ b/roles/clone_on_prem_vm/tasks/main.yml @@ -1,4 +1,9 @@ --- +- name: Fail when 'clone_on_prem_vm_source_vm_name' is undefined + ansible.builtin.fail: + msg: The name of the VM you want to clone must be defined as clone_on_prem_vm_source_vm_name + when: clone_on_prem_vm_source_vm_name is undefined + - name: Gather package facts ansible.builtin.package_facts: manager: auto From fd9a487288eb245c08f943693815d7cf6412c84e Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 18 Oct 2023 11:17:58 +0200 Subject: [PATCH 24/26] Revision after running on awx Signed-off-by: Alina Buzachis --- playbooks/move_vm_from_on_prem_to_aws/README.md | 2 +- .../move_vm_from_on_prem_to_aws.yml | 2 +- roles/clone_on_prem_vm/README.md | 2 +- roles/clone_on_prem_vm/tasks/main.yml | 6 ++---- roles/import_image_and_run_aws_instance/README.md | 8 ++++---- roles/import_image_and_run_aws_instance/tasks/main.yml | 4 ++-- 6 files changed, 11 insertions(+), 13 deletions(-) diff --git a/playbooks/move_vm_from_on_prem_to_aws/README.md b/playbooks/move_vm_from_on_prem_to_aws/README.md index 9dd9ea68..57e9947a 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/README.md +++ b/playbooks/move_vm_from_on_prem_to_aws/README.md @@ -39,5 +39,5 @@ All the variables defined in section ``Playbook Variables`` can be defined insid Run the playbook: ```shell -ansible-playbook cloud.aws_ops.move_vm_from_on_prem_to_aws -e "@credentials.yml" -e "@vars.yml" -i inventory.yml +ansible-playbook cloud.aws_ops.move_vm_from_on_prem_to_aws.move_vm_from_on_prem_to_aws -e "@credentials.yml" -e "@vars.yml" -i inventory.yml ``` diff --git a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml index fbcc5d1b..d14b752e 100644 --- a/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml +++ b/playbooks/move_vm_from_on_prem_to_aws/move_vm_from_on_prem_to_aws.yml @@ -26,7 +26,7 @@ name: cloud.aws_ops.import_image_and_run_aws_instance vars: import_image_and_run_aws_instance_bucket_name: "{{ import_image_and_run_aws_instance_bucket_name }}" - import_image_and_run_aws_instance_image_path: "{{ import_image_and_run_aws_instance_image_path }}" + import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_raw_image_path }}" import_image_and_run_aws_instance_instance_name: "{{ import_image_and_run_aws_instance_instance_name }}" import_image_and_run_aws_instance_instance_type: "{{ import_image_and_run_aws_instance_instance_type }}" import_image_and_run_aws_instance_import_image_task_name: "{{ import_image_and_run_aws_instance_import_image_task_name }}" diff --git a/roles/clone_on_prem_vm/README.md b/roles/clone_on_prem_vm/README.md index 4d08d3e9..d850d17b 100644 --- a/roles/clone_on_prem_vm/README.md +++ b/roles/clone_on_prem_vm/README.md @@ -1,7 +1,7 @@ clone_on_prem_vm ================ -A role to clone an existing on prem VM using the KVM hypervisor. The role sets the **clone_on_prem_vm_local_image_path** variable containing the path where the image was saved on localhost. This role requires privilege escalation because the .qcow2 file created by ``virt-clone`` is owned by root and ``qemu-img convert`` requires access to convert it to .raw. +A role to clone an existing on prem VM using the KVM hypervisor. The role sets the **clone_on_prem_vm_raw_image_path** variable containing the path where the image was saved on localhost. This role requires privilege escalation because the .qcow2 file created by ``virt-clone`` is owned by root and ``qemu-img convert`` requires access to convert it to .raw. Requirements ------------ diff --git a/roles/clone_on_prem_vm/tasks/main.yml b/roles/clone_on_prem_vm/tasks/main.yml index 65a9c463..8807f73f 100644 --- a/roles/clone_on_prem_vm/tasks/main.yml +++ b/roles/clone_on_prem_vm/tasks/main.yml @@ -136,11 +136,9 @@ ansible.builtin.fetch: src: "{{ clone_on_prem_vm__raw_image_path }}" dest: "{{ clone_on_prem_vm__dir_localhost.path }}" - flat: yes - fail_on_missing: yes validate_checksum: true register: clone_on_prem_vm_fetch_to_localhost -- name: Set 'clone_on_prem_vm_local_image_path' +- name: Set 'clone_on_prem_vm_raw_image_path' ansible.builtin.set_fact: - clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_fetch_to_localhost.dest }}" + clone_on_prem_vm_raw_image_path: "{{ clone_on_prem_vm_fetch_to_localhost.dest }}" diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md index 6413318e..4985a4dd 100644 --- a/roles/import_image_and_run_aws_instance/README.md +++ b/roles/import_image_and_run_aws_instance/README.md @@ -81,7 +81,7 @@ Create a ``playbook.ym`` file like this: --- - hosts: localhost gather_facts: false - + tasks: - name: Import 'cloud.aws_ops.clone_on_prem_vm' role ansible.builtin.import_role: @@ -90,15 +90,15 @@ Create a ``playbook.ym`` file like this: clone_on_prem_vm_source_vm_name: "{{ clone_on_prem_vm_source_vm_name }}" clone_on_prem_vm_image_name: "{{ clone_on_prem_vm_image_name }}" clone_on_prem_vm_local_image_path: "{{ clone_on_prem_vm_local_image_path }}" - clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" + clone_on_prem_vm_uri: "{{ clone_on_prem_vm_uri }}" delegate_to: kvm - + - name: Import 'cloud.aws_ops.import_image_and_run_aws_instance' role ansible.builtin.import_role: name: cloud.aws_ops.import_image_and_run_aws_instance vars: import_image_and_run_aws_instance_bucket_name: "{{ import_image_and_run_aws_instance_bucket_name }}" - import_image_and_run_aws_instance_image_path: "{{ import_image_and_run_aws_instance_image_path }}" + import_image_and_run_aws_instance_image_path: "{{ clone_on_prem_vm_raw_image_path }}" import_image_and_run_aws_instance_instance_name: "{{ import_image_and_run_aws_instance_instance_name }}" import_image_and_run_aws_instance_instance_type: "{{ import_image_and_run_aws_instance_instance_type }}" import_image_and_run_aws_instance_import_image_task_name: "{{ import_image_and_run_aws_instance_import_image_task_name }}" diff --git a/roles/import_image_and_run_aws_instance/tasks/main.yml b/roles/import_image_and_run_aws_instance/tasks/main.yml index 8d1f9655..e691db52 100644 --- a/roles/import_image_and_run_aws_instance/tasks/main.yml +++ b/roles/import_image_and_run_aws_instance/tasks/main.yml @@ -78,8 +78,8 @@ Values: ["completed", "active"] register: import_image_and_run_aws_instance__import_image_info until: import_image_and_run_aws_instance__import_image_info.import_image[0].status == "completed" - delay: 3 - retries: 30 + delay: 10 + retries: 300 - name: Set 'import_image_and_run_aws_instance__ami_id' and 'import_image_and_run_aws_instance__snapshot_id' ansible.builtin.set_fact: From 584e6073804bf2ae25f116682ad7f4c98a3388e9 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 24 Oct 2023 21:38:57 +0200 Subject: [PATCH 25/26] Update README.md --- roles/import_image_and_run_aws_instance/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/import_image_and_run_aws_instance/README.md b/roles/import_image_and_run_aws_instance/README.md index 4985a4dd..e4ff6d3f 100644 --- a/roles/import_image_and_run_aws_instance/README.md +++ b/roles/import_image_and_run_aws_instance/README.md @@ -75,7 +75,7 @@ all: All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars.yml`` file. -Create a ``playbook.ym`` file like this: +Create a ``playbook.yml`` file like this: ``` --- From 1c39b43aa8030eb00b01b81bd404f211c61cd8db Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 24 Oct 2023 21:39:48 +0200 Subject: [PATCH 26/26] Update README.md --- roles/clone_on_prem_vm/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/clone_on_prem_vm/README.md b/roles/clone_on_prem_vm/README.md index d850d17b..2a1da0ab 100644 --- a/roles/clone_on_prem_vm/README.md +++ b/roles/clone_on_prem_vm/README.md @@ -40,7 +40,7 @@ all: All the variables defined in section ``Playbook Variables`` can be defined inside the ``vars.yml`` file. -Create a ``playbook.ym`` file like this: +Create a ``playbook.yml`` file like this: ``` ---