From d76b055ed4e23af97bb7b58101a18570208b144f Mon Sep 17 00:00:00 2001 From: Allar Viik Date: Fri, 15 Sep 2023 13:24:12 +0300 Subject: [PATCH 1/7] Migrated Providentia inventory pluging from clarified.core collection --- nova/core/plugins/inventory/providentia_v3.py | 209 ++++++++++++++++++ 1 file changed, 209 insertions(+) create mode 100644 nova/core/plugins/inventory/providentia_v3.py diff --git a/nova/core/plugins/inventory/providentia_v3.py b/nova/core/plugins/inventory/providentia_v3.py new file mode 100644 index 00000000..8d554a2f --- /dev/null +++ b/nova/core/plugins/inventory/providentia_v3.py @@ -0,0 +1,209 @@ +DOCUMENTATION = """ + name: providentia_v3 + plugin_type: inventory + short_description: Providentia inventory source + requirements: + - requests >= 2.18.4 + - requests_oauthlib + - oauthlib + description: + - Get inventory hosts and groups from Providentia. + - Uses a YAML configuration file that ends with providentia.(yml|yaml). + options: + plugin: + description: token that ensures this is a source file for the 'providentia' plugin. + required: True + providentia_host: + description: Root URL to Providentia. + type: string + required: True + exercise: + description: Exercise abbreviation which defines configuration to populate inventory with. + type: string + required: True + sso_token_url: + description: The endpoint where token may be obtained for Providentia + sso_client_id: + description: SSO client id for Providentia. + type: string + default: "Providentia" + credentials_lookup_env: + description: ENV var used to lookup Providentia credentials KeePass path + type: string + default: KEEPASS_DEPLOYER_CREDENTIALS_PATH + required: False +""" + +from typing import DefaultDict +import requests +import os +import json +import socket +import aiohttp +import asyncio +from oauthlib.oauth2 import LegacyApplicationClient +from pykeepass import PyKeePass +from requests_oauthlib import OAuth2Session +from ansible.plugins.inventory import BaseInventoryPlugin +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.utils.vars import combine_vars, load_extra_vars +from pprint import pprint + +class InventoryModule(BaseInventoryPlugin): + NAME = 'providentia_v3' + + def verify_file(self, path): + if super(InventoryModule, self).verify_file(path): + return True + return False + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + self._read_config_data(path) + # merge extra vars + self._options = combine_vars(self._options, load_extra_vars(loader)) + + asyncio.run(self.run()) + + async def run(self): + self.init_inventory() + await self.store_access_token() + + async with aiohttp.ClientSession() as session: + self._session = session + await self.fetch_environment() + await self.fetch_groups() + await self.fetch_hosts() + + def init_inventory(self): + self.inventory.add_group("all") + + self.inventory.set_variable("all", "providentia_api_version", 3) + + async def store_access_token(self): + keepass_creds = os.environ.get(self.get_option('credentials_lookup_env'),"").strip() + sso_creds = self.fetch_keepass_creds(keepass_creds) + + self._access_token = self.fetch_access_token(sso_creds) + + def fetch_keepass_creds(self, creds_path): + kp_soc = "/tmp/ansible-keepass.sock" + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(kp_soc) + + username = {'attr': "username", 'path': creds_path} + sock.send(json.dumps(username).encode()) + username = json.loads(sock.recv(1024).decode()) + + password = {'attr': "password", 'path': creds_path} + sock.send(json.dumps(password).encode()) + password = json.loads(sock.recv(1024).decode()) + + sock.close() + + if(username['status']=='error' or password['status']=='error'): + raise Exception('Error retrieving credentials from Keepass') + + return { + 'username': username['text'], + 'password': password['text'] + } + + async def fetch_environment(self): + event = await self.fetch_from_providentia('') + for key,value in event['result'].items(): + self.inventory.set_variable("all", key, value) + + async def fetch_groups(self): + groups = await self.fetch_from_providentia('tags') + + # Add groups to inventory + for group_data in groups['result']: + group = group_data['id'] + group_vars = group_data['config_map'] + priority = group_data.get('priority') + + self.inventory.add_group(group) + + # Add group specific variables to group + for key, value in group_vars.items(): + self.inventory.set_variable(group, key, value) + + if priority: + self.inventory.set_variable(group, 'ansible_group_priority', int(priority)) + + # Add groups to inventory + # We do this in separate loop because of groups can reference to + # child groups that may not have been added to inventory already + for group_data in groups['result']: + group = group_data['id'] + group_children = group_data['children'] + + for child_group in group_children: + self.inventory.add_child(group, child_group) + + async def fetch_hosts(self): + hosts = await self.fetch_from_providentia('inventory') + + # List of keys that should be excluded from host variables to avoid endless recursion and overwriting + excluded_keys = ["id", "instances"] + + # Creating a new dictionary with filtered parent vars using first host as a template since all of the host have the same keys + filtered_parent_vars = {key: value for key, value in hosts['result'][0].items() if key not in excluded_keys} + + # Add hosts to inventory + for host in hosts['result']: + for host_instance in host.get('instances', []): + host_instance_id = host_instance['id'] + self.inventory.add_host(host_instance_id) + + self.inventory.set_variable(host_instance_id, "main_id", host['id']) + + for var_name in filtered_parent_vars: + if var_name in host: + self.inventory.set_variable(host_instance_id, var_name, host[var_name]) + + for key, value in host_instance.items(): + self.inventory.set_variable(host_instance_id, key, value) + + for group in host.get('tags', []): + self.inventory.add_child(group, host_instance_id) + + for group in host_instance.get('tags', []): + self.inventory.add_child(group, host_instance_id) + + async def fetch_from_providentia(self, endpoint=""): + providentia_host = self.get_option('providentia_host') + exercise = self.get_option('exercise') + + url = f"{providentia_host}/api/v3/{exercise}/{endpoint}" + + headers = { + 'Authorization': f"{self._access_token['token_type']} {self._access_token['access_token']}" + } + async with self._session.get(url, headers=headers) as response: + if response.status == 200: + return await response.json() + + if response.status == 401: + raise Exception('Providentia responded with 401: Unauthenticated') + + if response.status == 403: + raise Exception('Requested token is not authorized to perform this action') + + if response.status == 404: + raise Exception('Providentia responded with 404: not found') + + if response.status == 500: + raise Exception('Providentia responded with 500: server error') + + def fetch_access_token(self, creds): + client_id = self.get_option('sso_client_id') + oauth = OAuth2Session(client=LegacyApplicationClient(client_id=client_id)) + token = oauth.fetch_token( + token_url=self.get_option('sso_token_url'), + username=creds['username'], + password=creds['password'], + client_id=client_id) + + return token From 0a7443da857efcf49a429e7e7317d99776f57834 Mon Sep 17 00:00:00 2001 From: Allar Viik Date: Fri, 15 Sep 2023 13:32:06 +0300 Subject: [PATCH 2/7] Migrated create role from clarified.core --- nova/core/roles/create/README.md | 27 +++ nova/core/roles/create/defaults/main.yml | 119 ++++++++++ nova/core/roles/create/tasks/aws/create.yml | 83 +++++++ nova/core/roles/create/tasks/aws/main.yml | 10 + nova/core/roles/create/tasks/aws/remove.yml | 27 +++ .../roles/create/tasks/aws/setup_vars.yml | 4 + .../core/roles/create/tasks/external/main.yml | 4 + .../core/roles/create/tasks/linode/create.yml | 24 ++ nova/core/roles/create/tasks/linode/main.yml | 21 ++ .../roles/create/tasks/linode/rebuild.yml | 45 ++++ .../core/roles/create/tasks/linode/remove.yml | 21 ++ nova/core/roles/create/tasks/main.yml | 27 +++ .../tasks/vmware_workstation/create.yml | 172 ++++++++++++++ .../create/tasks/vmware_workstation/main.yml | 24 ++ .../tasks/vmware_workstation/remove.yml | 58 +++++ .../roles/create/tasks/vsphere/create.yml | 211 ++++++++++++++++++ .../roles/create/tasks/vsphere/extra_nics.yml | 17 ++ nova/core/roles/create/tasks/vsphere/main.yml | 11 + .../roles/create/tasks/vsphere/remove.yml | 25 +++ .../roles/create/tasks/vsphere/setup_vars.yml | 17 ++ 20 files changed, 947 insertions(+) create mode 100644 nova/core/roles/create/README.md create mode 100644 nova/core/roles/create/defaults/main.yml create mode 100644 nova/core/roles/create/tasks/aws/create.yml create mode 100644 nova/core/roles/create/tasks/aws/main.yml create mode 100644 nova/core/roles/create/tasks/aws/remove.yml create mode 100644 nova/core/roles/create/tasks/aws/setup_vars.yml create mode 100644 nova/core/roles/create/tasks/external/main.yml create mode 100644 nova/core/roles/create/tasks/linode/create.yml create mode 100644 nova/core/roles/create/tasks/linode/main.yml create mode 100644 nova/core/roles/create/tasks/linode/rebuild.yml create mode 100644 nova/core/roles/create/tasks/linode/remove.yml create mode 100644 nova/core/roles/create/tasks/main.yml create mode 100644 nova/core/roles/create/tasks/vmware_workstation/create.yml create mode 100644 nova/core/roles/create/tasks/vmware_workstation/main.yml create mode 100644 nova/core/roles/create/tasks/vmware_workstation/remove.yml create mode 100644 nova/core/roles/create/tasks/vsphere/create.yml create mode 100644 nova/core/roles/create/tasks/vsphere/extra_nics.yml create mode 100644 nova/core/roles/create/tasks/vsphere/main.yml create mode 100644 nova/core/roles/create/tasks/vsphere/remove.yml create mode 100644 nova/core/roles/create/tasks/vsphere/setup_vars.yml diff --git a/nova/core/roles/create/README.md b/nova/core/roles/create/README.md new file mode 100644 index 00000000..4725faf4 --- /dev/null +++ b/nova/core/roles/create/README.md @@ -0,0 +1,27 @@ +# Role Name + +This role is used to create Virtual Machines in different environments. Currently supported environments are: + +- AWS +- Linode +- VMware vSphere +- VMWare Workstation + +## Requirements + +none + +## Role Variables + +Refer to the [defaults/main.yml](https://github.com/novateams/nova.core/blob/main/nova/core/roles/create/defaults/main.yml) file for a list of variables and their default values. + +## Dependencies + +Depending on the environment you want to create the VM in, you will need to install the following Ansible collections: + +- amazon.aws +- community.aws +- vmware.vmware_rest +- community.vmware + +## Example diff --git a/nova/core/roles/create/defaults/main.yml b/nova/core/roles/create/defaults/main.yml new file mode 100644 index 00000000..f0081057 --- /dev/null +++ b/nova/core/roles/create/defaults/main.yml @@ -0,0 +1,119 @@ +--- +#################### +# vSphere defaults # +#################### + +# Networking +extra_interfaces: false + +default_interface: + - name: "{{ interfaces[0].cloud_id }}" + device_type: "{{ vmware_nic_type }}" +extra_network_interfaces: [] +# Hardware +cpus: 2 +ram_gb: 2 +hardware_cpu: +hardware_ram: +vmware_nic_type: vmxnet3 +vmware_scsi_controller_type: paravirtual + +# Deployment +vapp_options: [] +immutable: false +no_undeploy: false + +# Linked clones +linked_clone_status: false +linked_clone_snapshot_name: LinkedCloneSource # Case sensitive default value + +# Connection +real_connection_plugin: smart + +# Connection mode +primary_network: "{{ interfaces | selectattr('connection', 'equalto', true) | first }}" +connection_ip: "{{ connection_address | default(primary_network.addresses | selectattr('connection', 'equalto', true) | map(attribute='address') | first | default(omit) | ansible.utils.ipaddr('address')) }}" +connection_mode: "{{ primary_network.addresses | selectattr('connection', 'equalto', true) | map(attribute='mode') | first }}" +connection_mode_dhcp: "{{ true if connection_mode | regex_search('.*dhcp.*') else false }}" + +# Video memory +video_increase_memory: false +video_memory: 64 +video_3d: false +video_3d_memory: 256 + +vm_description: Created on {{ ansible_date_time.iso8601 }} by {{ deployer_username }} - {{ inventory_hostname }} +template_description: Created on {{ ansible_date_time.iso8601 }} + +###################### +# VMWare workstation # +###################### + +# VM paths +template_export_folder: /home/{{ lookup('env', 'USER') }}/vmware/templates +local_vmx_template_path: "{{template_export_folder }}/{{ vm_template }}/{{ vm_template }}.vmx" + +local_vm_folder: /home/{{ lookup('env', 'USER') }}/vmware +local_vmx_path: /home/{{ lookup('env', 'USER') }}/vmware/{{ custom_vm_name | default(vm_name) }}/{{ custom_vm_name | default(vm_name) }}.vmx + +# Shared folders +enable_shared_folder: true +host_shared_folder_path: /home/{{ ansible_user }}/Documents/vm_shares/{{ custom_vm_name | default(vm_name) }} # In VMware Workstation case the ansible_user is the host's username +supported_shared_folder_os: # Operating systems that support shared folders between host and guest + - os_windows + - os_ubuntu + - os_kali + +################ +# AWS defaults # +################ + +# Temporary ssh key path on your container +temp_ssh_key_path: /tmp/{{ project_fullname }}_{{ inventory_hostname }}_aws_key + +# AWS security group rules +aws_security_group_rules: + - proto: TCP + to_port: 22 + from_port: 22 + cidr_ip: + - "0.0.0.0/0" + rule_desc: Internet IPv4 SSH + - proto: TCP + to_port: 80 + from_port: 80 + cidr_ip: + - "0.0.0.0/0" + rule_desc: Internet IPv4 HTTP + - proto: TCP + to_port: 443 + from_port: 443 + cidr_ip: + - "0.0.0.0/0" + rule_desc: Internet IPv4 HTTPS + - proto: TCP + to_port: 22 + from_port: 22 + cidr_ipv6: + - ::/0 + rule_desc: Internet IPv6 SSH + - proto: TCP + to_port: 80 + from_port: 80 + cidr_ipv6: + - ::/0 + rule_desc: Internet IPv6 HTTP + - proto: TCP + to_port: 443 + from_port: 443 + cidr_ipv6: + - ::/0 + rule_desc: Internet IPv6 HTTPS + +################### +# Linode defaults # +################### + +# The default root password for Linode VM +linode_vm_password: "{{ lookup('community.hashi_vault.hashi_vault', vault_lookup_fragment + 'secret={{ environment_name }}/data/{{ project_fullname }}:{{ inventory_hostname }}_{{ admin_account }}') }}" +rebuild: false # Use ctp-rebuild alias to rebuild the VM with different size but keeping the data diff --git a/nova/core/roles/create/tasks/aws/create.yml b/nova/core/roles/create/tasks/aws/create.yml new file mode 100644 index 00000000..fa72979c --- /dev/null +++ b/nova/core/roles/create/tasks/aws/create.yml @@ -0,0 +1,83 @@ +--- +- block: + - name: Getting all OS images... + amazon.aws.ec2_ami_info: + owners: "{{ ami_owner_id }}" + filters: + name: "{{ ami_name_search_string }}" + architecture: x86_64 + register: found_amis + + - name: Sorting images by creation_date... + ansible.builtin.set_fact: + all_images: "{{ found_amis.images | sort(attribute='creation_date') }}" + + - name: Gather information about all instances in {{ aws_defaults.region }}... + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ custom_vm_name | default(vm_name) }}" + register: ec2_instance_info + + - name: Sorting instances by launch_time... + ansible.builtin.set_fact: + existing_instances: "{{ ec2_instance_info.instances | sort(attribute='launch_time') }}" + + - name: Setting fresh_deploy as fact... + ansible.builtin.set_fact: + fresh_deploy: true + when: existing_instances == [] or existing_instances[-1].state.name == 'terminated' + + - name: Removing existing ssh temp key for {{ hostname }}... # If for some reason it was present from previous deploy + amazon.aws.ec2_key: + name: "{{ custom_vm_name | default(vm_name) }}" + state: absent + + - name: Including fresh deploy tasks... + block: + - name: Creating a temp ssh key for {{ hostname }}... + amazon.aws.ec2_key: + name: "{{ custom_vm_name | default(vm_name) }}" + register: aws_temp_private_key + + - name: Saving {{ hostname }} temp private key to file... + ansible.builtin.copy: + content: "{{ aws_temp_private_key.key.private_key }}" + dest: "{{ temp_ssh_key_path }}" + mode: "600" + + when: fresh_deploy + + - name: Creating AWS security group for {{ custom_vm_name | default(vm_name) }}... + amazon.aws.ec2_security_group: + name: "{{ custom_vm_name | default(vm_name) }}" + description: Security group for {{ custom_vm_name | default(vm_name) }} + rules: "{{ aws_security_group_rules }}" + state: present + + - name: Launching {{ hostname }} EC2 instance... + amazon.aws.ec2_instance: + name: "{{ custom_vm_name | default(vm_name) }}" + key_name: "{{ custom_vm_name | default(vm_name) }}" + vpc_subnet_id: "{{ ec2_subnet_id | default(omit) }}" + instance_type: "{{ aws_vm_size | default('t3.micro') }}" + security_group: "{{ aws_security_group | default(custom_vm_name) | default(vm_name) }}" + network: + assign_public_ip: true + image_id: "{{ all_images[-1].image_id }}" + register: created_ec2_instance + until: + - created_ec2_instance.instances[0].public_ip_address is defined + retries: 24 + delay: 5 + + - name: Setting primary IPv4 as fact... + ansible.builtin.set_fact: + primary_ipv4: "{{ created_ec2_instance.instances[0].public_ip_address }}" + + - name: Removing created ssh temp key for {{ hostname }}... + amazon.aws.ec2_key: + name: "{{ custom_vm_name | default(vm_name) }}" + state: absent + + delegate_to: localhost + become: false diff --git a/nova/core/roles/create/tasks/aws/main.yml b/nova/core/roles/create/tasks/aws/main.yml new file mode 100644 index 00000000..8570477c --- /dev/null +++ b/nova/core/roles/create/tasks/aws/main.yml @@ -0,0 +1,10 @@ +--- +- name: Setting required facts for deploy... + ansible.builtin.include_tasks: setup_vars.yml +- name: Starting the removal process... + ansible.builtin.include_tasks: remove.yml + when: deploy_mode == 'undeploy' or deploy_mode == 'redeploy' + # The play will end here if we are removing + +- name: Starting the cloning process... + ansible.builtin.include_tasks: create.yml diff --git a/nova/core/roles/create/tasks/aws/remove.yml b/nova/core/roles/create/tasks/aws/remove.yml new file mode 100644 index 00000000..6be2595f --- /dev/null +++ b/nova/core/roles/create/tasks/aws/remove.yml @@ -0,0 +1,27 @@ +--- +- name: Terminating {{ hostname }} EC2 instance... + amazon.aws.ec2_instance: + name: "{{ custom_vm_name | default(vm_name) }}" + state: terminated + delegate_to: localhost + when: + - not no_undeploy + - "'no_undeploy' not in group_names" + +- name: Deleting AWS security group for {{ custom_vm_name | default(vm_name) }}..." + amazon.aws.ec2_security_group: + name: "{{ custom_vm_name | default(vm_name) }}" + state: absent + delegate_to: localhost + when: + - not no_undeploy + - "'no_undeploy' not in group_names" + +- name: Warning... + ansible.builtin.debug: + msg: "{{ inventory_hostname }} is in no_undeploy zone or has a no_undeploy flag and won't be removed!" + when: no_undeploy or 'no_undeploy' in group_names + +- name: Stop executing when we are undeploying + ansible.builtin.meta: end_host + when: deploy_mode == 'undeploy' diff --git a/nova/core/roles/create/tasks/aws/setup_vars.yml b/nova/core/roles/create/tasks/aws/setup_vars.yml new file mode 100644 index 00000000..cd66833f --- /dev/null +++ b/nova/core/roles/create/tasks/aws/setup_vars.yml @@ -0,0 +1,4 @@ +--- +- name: Setting correct admin_account for AWS template... + ansible.builtin.set_fact: + admin_account: "{{ aws_template_username }}" diff --git a/nova/core/roles/create/tasks/external/main.yml b/nova/core/roles/create/tasks/external/main.yml new file mode 100644 index 00000000..c1b959d7 --- /dev/null +++ b/nova/core/roles/create/tasks/external/main.yml @@ -0,0 +1,4 @@ +--- +- name: Info... + ansible.builtin.debug: + msg: Bypassing create_vm role for external machines. diff --git a/nova/core/roles/create/tasks/linode/create.yml b/nova/core/roles/create/tasks/linode/create.yml new file mode 100644 index 00000000..25ea88a5 --- /dev/null +++ b/nova/core/roles/create/tasks/linode/create.yml @@ -0,0 +1,24 @@ +--- +- name: Creating {{ custom_vm_name | default(vm_name) }} in Linode... + linode.cloud.instance: + api_token: "{{ linode_api_token }}" + label: "{{ custom_vm_name | default(vm_name) }}" + type: "{{ linode_vm_type | default('g6-dedicated-2') }}" + region: "{{ linode_vm_region | default('eu-central') }}" + image: "{{ linode_image }}" + root_pass: "{{ linode_vm_password }}" + tags: + - "{{ project_fullname | default(omit) }}" + state: present + register: linode_vm + delegate_to: localhost + +- name: Setting fresh_deploy as fact... + ansible.builtin.set_fact: + fresh_deploy: true + when: linode_vm.changed + +- name: Setting primary IPs as fact... + ansible.builtin.set_fact: + primary_ipv4: "{{ linode_vm.instance.ipv4[0] | ansible.utils.ipaddr('address') }}" + primary_ipv6: "{{ linode_vm.instance.ipv6 | ansible.utils.ipaddr('address') }}" diff --git a/nova/core/roles/create/tasks/linode/main.yml b/nova/core/roles/create/tasks/linode/main.yml new file mode 100644 index 00000000..8a9258fa --- /dev/null +++ b/nova/core/roles/create/tasks/linode/main.yml @@ -0,0 +1,21 @@ +--- +- block: + - name: INFO + ansible.builtin.debug: + msg: KEEPASS_LINODE_API_TOKEN not defined in .makerc-vars. + + - name: Stopping play... + ansible.builtin.meta: end_host + when: lookup('env', 'KEEPASS_LINODE_API_TOKEN') | length == 0 + +- name: Starting the rebuild process... + ansible.builtin.include_tasks: rebuild.yml + when: rebuild + +- name: Starting the removal process... + ansible.builtin.include_tasks: remove.yml + when: deploy_mode == 'undeploy' or deploy_mode == 'redeploy' + # The play will end here if we are undeploying + +- name: Starting the cloning process... + ansible.builtin.include_tasks: create.yml diff --git a/nova/core/roles/create/tasks/linode/rebuild.yml b/nova/core/roles/create/tasks/linode/rebuild.yml new file mode 100644 index 00000000..588d4d19 --- /dev/null +++ b/nova/core/roles/create/tasks/linode/rebuild.yml @@ -0,0 +1,45 @@ +--- +- name: Getting {{ custom_vm_name | default(vm_name) }} info... + linode.cloud.instance_info: + api_token: "{{ linode_api_token }}" + label: "{{ custom_vm_name | default(vm_name) }}" + register: linode_vm + delegate_to: localhost + +- name: Rebuilding {{ custom_vm_name | default(vm_name) }}... + ansible.builtin.uri: + url: https://api.linode.com/v4/linode/instances/{{ linode_vm.instance.id }}/rebuild + method: POST + headers: + Content-Type: application/json + Authorization: Bearer {{ linode_api_token }} + body: + image: "{{ linode_image }}" + root_pass: "{{ linode_vm_password }}" + body_format: json + delegate_to: localhost + +# This is required because there's a delay after the POST request and actual rebuild +- name: Waiting until {{ custom_vm_name | default(vm_name) }} starts rebuilding... + linode.cloud.instance_info: + api_token: "{{ linode_api_token }}" + label: "{{ custom_vm_name | default(vm_name) }}" + register: linode_rebuild + until: linode_rebuild.instance.status == "rebuilding" + retries: 60 + delay: 1 + delegate_to: localhost + +- name: Waiting until {{ custom_vm_name | default(vm_name) }} rebuilding is complete... + linode.cloud.instance_info: + api_token: "{{ linode_api_token }}" + label: "{{ custom_vm_name | default(vm_name) }}" + register: linode_rebuild_state + until: linode_rebuild_state.instance.status == "running" + retries: 60 + delay: 1 + delegate_to: localhost + +- name: Setting fresh_deploy fact... + ansible.builtin.set_fact: + fresh_deploy: true diff --git a/nova/core/roles/create/tasks/linode/remove.yml b/nova/core/roles/create/tasks/linode/remove.yml new file mode 100644 index 00000000..be6bbc76 --- /dev/null +++ b/nova/core/roles/create/tasks/linode/remove.yml @@ -0,0 +1,21 @@ +--- +- name: Deleting {{ fqdn }} from Linode... + linode.cloud.instance: + api_token: "{{ linode_api_token }}" + label: "{{ custom_vm_name | default(vm_name) }}" + state: absent + delegate_to: localhost + when: + - not no_undeploy + - "'no_undeploy' not in group_names" + - not immutable + - "'immutable' not in group_names" + +- name: Warning... + ansible.builtin.debug: + msg: "{{ inventory_hostname }} has no_undeploy or immutable set and won't be removed!" + when: immutable or no_undeploy or 'immutable' in group_names or 'no_undeploy' in group_names + +- name: Stop executing when we are undeploying + ansible.builtin.meta: end_host + when: deploy_mode == 'undeploy' diff --git a/nova/core/roles/create/tasks/main.yml b/nova/core/roles/create/tasks/main.yml new file mode 100644 index 00000000..247d511e --- /dev/null +++ b/nova/core/roles/create/tasks/main.yml @@ -0,0 +1,27 @@ +--- +- block: + - name: Including vSphere environment tasks... + ansible.builtin.include_tasks: vsphere/main.yml + when: infra_env == 'vsphere' + + - name: Including VMware Workstation environment tasks... + ansible.builtin.include_tasks: vmware_workstation/main.yml + when: infra_env == 'vmware_workstation' + + - name: Including AWS EC2 environment tasks... + ansible.builtin.include_tasks: aws/main.yml + when: infra_env == 'aws' + + - name: Including Linode environment tasks... + ansible.builtin.include_tasks: linode/main.yml + when: infra_env == 'linode' + + - name: Including external environment tasks... + ansible.builtin.include_tasks: external/main.yml + when: infra_env == 'external' + + when: customization_context == "host" # Alternative is container and then it's not creating it with this role + +- name: Stopping play... + ansible.builtin.meta: end_host + when: (just_create) or (deploy_mode == "undeploy") diff --git a/nova/core/roles/create/tasks/vmware_workstation/create.yml b/nova/core/roles/create/tasks/vmware_workstation/create.yml new file mode 100644 index 00000000..75d30bf5 --- /dev/null +++ b/nova/core/roles/create/tasks/vmware_workstation/create.yml @@ -0,0 +1,172 @@ +--- +- name: Including VMware Workstation tasks... + become: false + block: + - name: Making sure that {{ template_export_folder }} exists... + ansible.builtin.file: + path: "{{ template_export_folder }}" + state: directory + recurse: true + + - name: Checking if the VM Template is already present, if not then Downloading... + ansible.builtin.stat: + path: "{{ local_vmx_template_path }}" + register: vm_template_presence + + - name: Downloading {{ vm_template }} if it does not exist or is older than 30 days... + when: + - not vm_template_presence.stat.exists or ((ansible_date_time.epoch | int) - (vm_template_presence.stat.ctime | round) >= 2592000) # 30 days + - not role_only + - not role_only_wp + block: + - name: Removing existing {{ vm_template }} template... + ansible.builtin.file: + path: "{{ template_export_folder }}/{{ vm_template }}" + state: absent + + - name: Downloading {{ vm_template }}... + ansible.builtin.shell: ovftool --noSSLVerify -tt=vmx -n={{ vm_template }} 'vi://{{ vmware_defaults.username }}:{{ vmware_defaults.password | urlencode() }}@{{ templates_path }}/{{ vm_template }}' {{ template_export_folder }} + + - name: Checking if the {{ custom_vm_name | default(vm_name) }} already exists... + ansible.builtin.stat: + path: "{{ local_vmx_path }}" + register: vm_presence + + - name: Checking for correct deploy method with non-existing VM... + when: + - not vm_presence.stat.exists + - role_only or role_only_wp + block: + - ansible.builtin.debug: + msg: "{{ custom_vm_name | default(vm_name) }} doesn't exist, run ctp-deploy or ctp-redeploy first!" + failed_when: + - not vm_presence.stat.exists + - role_only or role_only_wp + + - name: Creating and configuring {{ custom_vm_name | default(vm_name) }}... + when: not vm_presence.stat.exists + block: + - name: Setting fresh_deploy fact... + ansible.builtin.set_fact: + fresh_deploy: true + + - name: Creating {{ custom_vm_name | default(vm_name) }} from {{ vm_template }}... + ansible.builtin.shell: ovftool -tt=vmx -n={{ custom_vm_name | default(vm_name) }} {{ local_vmx_template_path }} {{ local_vm_folder }} + + - name: Configuring {{ custom_vm_name | default(vm_name) }} virtual machine... + ansible.builtin.lineinfile: + path: "{{ local_vmx_path }}" + regex: "{{ item.regex }}" + line: "{{ item.line }}" + state: present + loop_control: + label: "{{ item.line }}" + loop: + # Sets the network adapter to NAT + - regex: ethernet0.connectionType.* + line: ethernet0.connectionType = "nat" + + # Otherwise Windows deploy will fail sometimes because of running sysprep + - regex: tools.upgrade.policy.* + line: tools.upgrade.policy = "manual" + + # This fixes Windows BSOD when importing from vSphere + - regex: hpet0.present.* + line: hpet0.present = "TRUE" + + # This fixes the issue where link keeps flapping https://github.com/mkubecek/vmware-host-modules/issues/54 + - regex: vmnat.linkStatePropagation.disable.* + line: vmnat.linkStatePropagation.disable = "TRUE" + + # Enabling nested virtualization + - regex: vhv.enable.* + line: vhv.enable = "TRUE" + + - name: Including shared folder tasks... + when: + - enable_shared_folder + - supported_shared_folder_os | intersect(group_names) | length > 0 + - not role_only + block: + - name: Sharing VM folder with host... + lineinfile: + path: "{{ local_vmx_path }}" + regex: "{{ item.regex }}" + line: "{{ item.line }}" + state: present + loop_control: + label: "{{ item.line }}" + loop: + - regex: isolation.tools.hgfs.disable.* + line: isolation.tools.hgfs.disable = "FALSE" + - regex: sharedFolder0.present.* + line: sharedFolder0.present = "TRUE" + - regex: sharedFolder0.enabled.* + line: sharedFolder0.enabled = "TRUE" + - regex: sharedFolder0.readAccess.* + line: sharedFolder0.readAccess = "TRUE" + - regex: sharedFolder0.writeAccess.* + line: sharedFolder0.writeAccess = "TRUE" + - regex: sharedFolder0.hostPath.* + line: sharedFolder0.hostPath = "{{ host_shared_folder_path }}" + - regex: sharedFolder0.guestName.* + line: sharedFolder0.guestName = "{{ custom_vm_name | default(vm_name) }}" + - regex: sharedFolder0.expiration.* + line: sharedFolder0.expiration = "never" + - regex: sharedFolder.maxNum.* + line: sharedFolder.maxNum = "1" + + - name: Creating a shared folder on the host... + ansible.builtin.file: + path: "{{ host_shared_folder_path }}" + state: directory + recurse: true + + - name: Fixing Darwin compatibility... + ansible.builtin.lineinfile: + path: "{{ local_vmx_path }}" + regex: "{{ item.regex }}" + line: "{{ item.line }}" + state: present + loop_control: + label: "{{ item.line }}" + loop: + - regex: guestos.* + line: guestos = "darwin20-64" + when: "'os_macos' in group_names" + + - name: Waiting for user to manually start VMware Workstation... # For some reason it doesn't start automatically + ansible.builtin.shell: | + if [ -z "$(ps -fC vmware --no-headers)" ]; then + echo "Waiting for VMware..." + else + echo "Moving on..." + fi + register: vmware_workstation_running + retries: 90 + delay: 10 + until: vmware_workstation_running.stdout == "Moving on..." + + - name: Starting {{ inventory_hostname }} and waiting for IP address... + ansible.builtin.shell: DISPLAY={{ lookup('env', 'DISPLAY') }} vmrun -T ws start {{ local_vmx_path }} + + - name: Waiting for {{ inventory_hostname }} IP address... + ansible.builtin.shell: vmrun getGuestIPAddress {{ local_vmx_path }} + register: vmip + retries: 60 + delay: 3 + until: + - vmip.rc == 0 + - vmip.stdout != "unknown" + - vmip.stdout | ansible.utils.ipv4() != false + + - name: Enabling shared folders... # Needs to be enabled here again because otherwise VMWare Workstation disables shared folders for new machines on first run + ansible.builtin.shell: vmrun enableSharedFolders {{ local_vmx_path }} + when: + - enable_shared_folder + - supported_shared_folder_os | intersect(group_names) | length > 0 + + - name: Setting {{ vmip.stdout }} for connection to {{ inventory_hostname }}... + ansible.builtin.set_fact: + ansible_host: "{{ vmip.stdout }}" + primary_ipv4: "{{ vmip.stdout }}" diff --git a/nova/core/roles/create/tasks/vmware_workstation/main.yml b/nova/core/roles/create/tasks/vmware_workstation/main.yml new file mode 100644 index 00000000..800dd991 --- /dev/null +++ b/nova/core/roles/create/tasks/vmware_workstation/main.yml @@ -0,0 +1,24 @@ +--- +- name: Including SSH enabled checks block... + when: lookup('env', 'ALLOW_HOST_SSH_ACCESS') != 'true' + block: + - name: INFO + debug: + msg: "ALLOW_HOST_SSH_ACCESS not true in .makerc-vars, skipping VMware Workstation tasks..." + + - name: Stopping play... + ansible.builtin.meta: end_host + +- name: Setting correct connection parameters for VMware Workstation... + ansible.builtin.set_fact: + ansible_user: "{{ lookup('env', 'CONTAINER_USER_NAME') }}" + ansible_connection: ssh + ansible_shell_type: sh + ansible_python_interpreter: /usr/bin/python3 + +- name: Starting the removal process... + ansible.builtin.include_tasks: remove.yml + when: deploy_mode == 'undeploy' or deploy_mode == 'redeploy' + +- name: Starting the cloning process... + ansible.builtin.include_tasks: create.yml diff --git a/nova/core/roles/create/tasks/vmware_workstation/remove.yml b/nova/core/roles/create/tasks/vmware_workstation/remove.yml new file mode 100644 index 00000000..651e6f47 --- /dev/null +++ b/nova/core/roles/create/tasks/vmware_workstation/remove.yml @@ -0,0 +1,58 @@ +--- +- block: + - name: Checking if {{ inventory_hostname }} exists... + ansible.builtin.stat: + path: "{{ local_vmx_path }}" + register: vm_exists + + - block: + - name: Listing running VMs... + ansible.builtin.shell: vmrun -T ws list + register: running_vms + + - name: Stopping {{ inventory_hostname }}... + ansible.builtin.shell: vmrun -T ws stop {{ local_vmx_path }} hard + when: local_vmx_path in running_vms.stdout_lines + + - name: Waiting for user to manually stop VMware Workstation... # Otherwise the remove command gets insufficient permissions error + ansible.builtin.shell: | + if [ -z "$(ps -fC vmware --no-headers)" ]; then + echo "Moving on..." + else + echo "Waiting for VMware to stop..." + fi + register: vmware_workstation_running + retries: 30 + delay: 5 + until: vmware_workstation_running.stdout == "Moving on..." + + - name: Removing {{ inventory_hostname }}... + ansible.builtin.shell: vmrun -T ws deleteVM {{ local_vmx_path }} + + - name: Stopping play for remove... + ansible.builtin.meta: end_host + when: deploy_mode == 'undeploy' + + when: vm_exists.stat.exists + + - name: Removing {{ inventory_hostname }} folder... + ansible.builtin.file: + path: "{{ local_vm_folder }}/{{ custom_vm_name | default(vm_name) }}" + state: absent + + when: + - not no_undeploy + - "'no_undeploy' not in group_names" + - not immutable + - "'immutable' not in group_names" + vars: + ansible_become: false # Using this because set fact will cause probles with later becomes + +- name: Warning... + ansible.builtin.debug: + msg: "{{ inventory_hostname }} has no_undeploy or immutable set and won't be removed!" + when: immutable or no_undeploy or 'immutable' in group_names or 'no_undeploy' in group_names + +- name: Stopping play... + ansible.builtin.meta: end_host + when: deploy_mode == 'undeploy' diff --git a/nova/core/roles/create/tasks/vsphere/create.yml b/nova/core/roles/create/tasks/vsphere/create.yml new file mode 100644 index 00000000..acfb0d7f --- /dev/null +++ b/nova/core/roles/create/tasks/vsphere/create.yml @@ -0,0 +1,211 @@ +--- +# All of the vmware.vmware_rest have retries for stability +- name: Looking up the VM... + vmware.vmware_rest.vcenter_vm_info: + filter_names: "{{ custom_vm_name | default(vm_name) }}" + register: vcenter_vm_info + until: not vcenter_vm_info.failed + retries: 5 + delay: 2 + delegate_to: localhost + become: false + +- name: Setting fresh_deploy fact... + ansible.builtin.set_fact: + fresh_deploy: true + when: vcenter_vm_info.value == [] + +- block: + - ansible.builtin.debug: + msg: Machine doesn't exist, use deploy or redeploy first! + failed_when: + - fresh_deploy + - role_only or role_only_wp + when: + - fresh_deploy + - role_only or role_only_wp + +- block: + - name: Checking that nothing is responding on {{ connection_ip }}... + local_action: shell ping -q -c 2 -W 2 {{ connection_ip }} + register: res + failed_when: not (('0 received' in res.stdout) or ('0 packets received' in res.stdout)) + changed_when: false + when: not connection_mode_dhcp + + - name: Looking up the VM template... + vmware.vmware_rest.vcenter_vm_info: + filter_names: "{{ vm_template if not template else base_vm_template }}" + register: template_search_result + until: not template_search_result.failed + retries: 5 + delay: 2 + + - name: Getting information about the VM template... + vmware.vmware_rest.vcenter_vm_info: + vm: "{{ template_search_result.value[0].vm }}" + register: vm_template_information + until: not vm_template_information.failed + retries: 5 + delay: 2 + + - name: Getting template OS disk size... + ansible.builtin.set_fact: + template_disk_size: "{{ vm_template_information.value.disks[(vm_template_information.value.disks | first)].capacity // (1024 * 1024) | int }}" + + # This task can be removed when local inventories are moved to Providentia + - name: Setting OS disk size... + ansible.builtin.set_fact: + os_disk_size: "{{ os_disk_size_gb * 1024 }}" + when: + - os_disk_size_gb is defined + - hardware_primary_disk_size is not defined + + - name: Setting OS disk size... + ansible.builtin.set_fact: + os_disk_size: "{{ hardware_primary_disk_size * 1024 }}" + when: hardware_primary_disk_size is defined + + - name: Setting OS disk size... + ansible.builtin.set_fact: + os_disk_size: "{{ template_disk_size }}" + when: + - os_disk_size_gb is not defined + - hardware_primary_disk_size is not defined + + - name: ERROR... + ansible.builtin.debug: + msg: Your OS disk size {{ os_disk_size }}MB cannot be smaller than the template {{ template_disk_size }}MB! + when: template_disk_size | int > os_disk_size | int + failed_when: template_disk_size | int > os_disk_size | int + + - name: Checking if linked clone is possible... + when: + - linked_clone_status + - template_disk_size | int < os_disk_size | int + block: + - name: Info... + ansible.builtin.debug: + msg: | + Your OS disk size {{ os_disk_size }}MB is bigger than the template {{ template_disk_size }}MB. + Disabling linked clone. + + - name: Disabling linked clone... + ansible.builtin.set_fact: + linked_clone_status: false + + - name: Cloning VM... + community.vmware.vmware_guest: + wait_for_ip_address: "{{ primary_network.ipv4 is not defined }}" + datacenter: "{{ datacenter }}" + name: "{{ custom_vm_name | default(vm_name) }}" + template: "{{ base_vm_template if template else vm_template }}" + folder: "{{ folder }}" + state: poweredon + cluster: "{{ cluster }}" + datastore: "{{ datastore }}" + disk: + - size_mb: "{{ os_disk_size }}" + controller_type: "{{ vmware_scsi_controller_type }}" + controller_number: 0 + unit_number: 0 + resource_pool: "{{ resource_pool | default(omit) }}" + hardware: + num_cpus: "{{ cpus if not hardware_cpu else hardware_cpu }}" + num_cpu_cores_per_socket: "{{ cpus if not hardware_cpu else hardware_cpu }}" + memory_mb: "{{ ram | default((ram_gb | int * 1024) | round(0) | int) if not hardware_ram else (hardware_ram | int * 1024) | round(0) | int }}" + memory_reservation_lock: false + mem_reservation: 0 + cdrom: + - controller_number: 0 # Unmounting existing ISO + unit_number: 0 + type: none + - controller_number: 0 # Removing extra cdrom left from packer + unit_number: 1 + state: absent + advanced_settings: + - key: isolation.tools.copy.disable + value: "FALSE" + - key: isolation.tools.paste.disable + value: "FALSE" + - key: isolation.tools.setGUIOptions.enable + value: "TRUE" + linked_clone: "{{ linked_clone_status }}" + snapshot_src: "{{ linked_clone_snapshot_name if linked_clone_status else omit }}" + annotation: "{{ template_description if template else vm_description }}" + vapp_properties: "{{ vapp_options }}" + register: vmware_guest + + - name: Increasing video memory + community.vmware.vmware_guest_video: + name: "{{ custom_vm_name | default(vm_name) }}" + folder: "{{ folder }}" + datacenter: "{{ datacenter }}" + video_memory_mb: "{{ video_memory }}" + enable_3D: "{{ video_3d }}" + memory_3D_mb: "{{ video_3d_memory if video_3d else omit }}" + renderer_3D: "{{ 'automatic' if video_3d else omit }}" + use_auto_detect: false + when: video_increase_memory + + - name: Looking up the created VM... + vmware.vmware_rest.vcenter_vm_info: + filter_names: "{{ custom_vm_name | default(vm_name) }}" + register: created_vm_info + until: not created_vm_info.failed + retries: 5 + delay: 2 + + - name: Getting {{ custom_vm_name | default(vm_name) }} interfaces... + vmware.vmware_rest.vcenter_vm_hardware_ethernet_info: + vm: "{{ created_vm_info.value[0].vm }}" + register: existing_nic + until: not existing_nic.failed + retries: 5 + delay: 2 + + - name: Getting the default network name... + vmware.vmware_rest.vcenter_network_info: + filter_names: "{{ default_interface[0].name }}" + register: default_network + until: not default_network.failed + retries: 5 + delay: 2 + + - name: Connecting the default interface... + vmware.vmware_rest.vcenter_vm_hardware_ethernet: + vm: "{{ created_vm_info.value[0].vm }}" + type: "{{ default_interface[0].device_type | upper }}" + nic: "{{ existing_nic.value[0].nic }}" + backing: + type: "{{ default_network.value[0].type }}" + network: "{{ default_network.value[0].network }}" + start_connected: true + register: add_default_nic + until: not add_default_nic.failed + retries: 5 + delay: 2 + + - name: Looping over extra_nics task... # Using loop because then the NIC order stays intact + ansible.builtin.include_tasks: extra_nics.yml + loop: "{{ extra_network_interfaces | map(attribute='name') }}" + loop_control: + index_var: loop_index + when: extra_interfaces + + delegate_to: localhost + become: false + when: + - fresh_deploy + - not manual_fresh_deploy # Since this block an only be ran once during clone + +- name: Starting VM... + community.vmware.vmware_guest_powerstate: + state: powered-on + name: "{{ custom_vm_name | default(vm_name) }}" + folder: "{{ folder }}" + delegate_to: localhost + become: false + when: + - fresh_deploy or vcenter_vm_info.value[0].power_state != "POWERED_ON" + - not just_create diff --git a/nova/core/roles/create/tasks/vsphere/extra_nics.yml b/nova/core/roles/create/tasks/vsphere/extra_nics.yml new file mode 100644 index 00000000..f0e5c20c --- /dev/null +++ b/nova/core/roles/create/tasks/vsphere/extra_nics.yml @@ -0,0 +1,17 @@ +--- +- name: Getting extra network interfaces... + vmware.vmware_rest.vcenter_network_info: + filter_names: "{{ item }}" + register: extra_nics + until: not extra_nics.failed + retries: 5 + delay: 2 + +- name: Adding {{ extra_nics.value[0].name }} network interface... + vmware.vmware_rest.vcenter_vm_hardware_ethernet: + vm: "{{ created_vm_info.value[0].vm }}" + type: "{{ extra_network_interfaces[loop_index].device_type | upper | default('vmxnet3') }}" + backing: + type: "{{ extra_nics.value[0].type }}" + network: "{{ extra_nics.value[0].network }}" + start_connected: true diff --git a/nova/core/roles/create/tasks/vsphere/main.yml b/nova/core/roles/create/tasks/vsphere/main.yml new file mode 100644 index 00000000..d02fcff4 --- /dev/null +++ b/nova/core/roles/create/tasks/vsphere/main.yml @@ -0,0 +1,11 @@ +--- +- name: Setting required facts for deploy... + ansible.builtin.include_tasks: setup_vars.yml + when: deploy_mode == 'deploy' or deploy_mode == 'redeploy' + +- name: Including vSphere VM removal tasks... + ansible.builtin.include_tasks: remove.yml + when: deploy_mode == 'undeploy' or deploy_mode == 'redeploy' + +- name: Including vSphere VM creation tasks... + ansible.builtin.include_tasks: create.yml diff --git a/nova/core/roles/create/tasks/vsphere/remove.yml b/nova/core/roles/create/tasks/vsphere/remove.yml new file mode 100644 index 00000000..2ab5b8a2 --- /dev/null +++ b/nova/core/roles/create/tasks/vsphere/remove.yml @@ -0,0 +1,25 @@ +--- +- name: Removing VM... + community.vmware.vmware_guest: + datacenter: "{{ datacenter }}" + name: "{{ custom_vm_name | default(vm_name) }}" + cluster: "{{ cluster }}" + resource_pool: "{{ resource_pool | default(omit) }}" + folder: "{{ folder }}" + state: absent + force: true + delegate_to: localhost + when: + - not no_undeploy + - "'no_undeploy' not in group_names" + - not immutable + - "'immutable' not in group_names" + +- name: Warning... + ansible.builtin.debug: + msg: "{{ inventory_hostname }} has no_undeploy or immutable set and won't be removed!" + when: immutable or no_undeploy or 'immutable' in group_names or 'no_undeploy' in group_names + +- name: Stopping play... + ansible.builtin.meta: end_host + when: deploy_mode == 'undeploy' diff --git a/nova/core/roles/create/tasks/vsphere/setup_vars.yml b/nova/core/roles/create/tasks/vsphere/setup_vars.yml new file mode 100644 index 00000000..889d1238 --- /dev/null +++ b/nova/core/roles/create/tasks/vsphere/setup_vars.yml @@ -0,0 +1,17 @@ +--- +- name: Checking if the VM has extra network interfaces... + ansible.builtin.set_fact: + extra_interfaces: true + when: interfaces | length > 1 + +- name: Creating a list of extra NICs... + ansible.builtin.set_fact: + extra_network_interfaces: "{{ extra_network_interfaces + [merged] }}" + loop: "{{ interfaces[1:] }}" + loop_control: + label: "{{ item.cloud_id }}" + vars: + merged: + name: "{{ item.cloud_id }}" + device_type: "{{ vmware_nic_type }}" + when: extra_interfaces From 147092ec9e1ae5035d7fe9483f6845c316c64665 Mon Sep 17 00:00:00 2001 From: Allar Viik Date: Fri, 15 Sep 2023 13:33:03 +0300 Subject: [PATCH 3/7] Updated galaxy.yml with the current collection information. --- nova/core/galaxy.yml | 56 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 nova/core/galaxy.yml diff --git a/nova/core/galaxy.yml b/nova/core/galaxy.yml new file mode 100644 index 00000000..ae199c05 --- /dev/null +++ b/nova/core/galaxy.yml @@ -0,0 +1,56 @@ +### REQUIRED +# The namespace of the collection. This can be a company/brand/organization or product namespace under which all +# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with +# underscores or numbers and cannot contain consecutive underscores +namespace: nova + +# The name of the collection. Has the same character restrictions as 'namespace' +name: core + +# The version of the collection. Must be compatible with semantic versioning +version: 0.0.1 + +# The path to the Markdown (.md) readme file. This path is relative to the root of the collection +readme: README.md + +# A list of the collection's content authors. Can be just the name or in the format 'Full Name (url) +# @nicks:irc/im.site#channel' +authors: + - https://github.com/novateams + +### OPTIONAL but strongly recommended +# A short summary description of the collection +description: This is a collection of public roles nad plugins that are developed by the Nova team. These roles go very well with Catapult https://github.com/ClarifiedSecurity/catapult but can be used separately. + +# The path to the license file for the collection. This path is relative to the root of the collection. This key is +# mutually exclusive with 'license' +license: + - AGPL-3.0-or-later + +# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character +# requirements as 'namespace' and 'name' +tags: [] + +# Collections that this collection requires to be installed for it to be usable. The key of the dict is the +# collection label 'namespace.name'. The value is a version range +# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version +# range specifiers can be set and are separated by ',' +dependencies: {} + +# The URL of the originating SCM repository +repository: https://github.com/novateams/nova.core + +# The URL to any online docs +documentation: COMING SOON + +# The URL to the homepage of the collection/project +homepage: https://github.com/novateams/nova.core + +# The URL to the collection issue tracker +issues: https://github.com/novateams/nova.core/issues + +# A list of file glob-like patterns used to filter any files or directories that should not be included in the build +# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This +# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry', +# and '.git' are always filtered +build_ignore: [] From f2746cd6f83e981b81e5f296df36ca64d94e8b57 Mon Sep 17 00:00:00 2001 From: Allar Viik Date: Fri, 15 Sep 2023 13:33:39 +0300 Subject: [PATCH 4/7] Set the correct Ansible version runtime.yml --- nova/core/meta/runtime.yml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 nova/core/meta/runtime.yml diff --git a/nova/core/meta/runtime.yml b/nova/core/meta/runtime.yml new file mode 100644 index 00000000..d4e983a0 --- /dev/null +++ b/nova/core/meta/runtime.yml @@ -0,0 +1,2 @@ +--- +requires_ansible: ">=2.13.12" From 12b7aa5c8ed1639696fc1438a02284787f585909 Mon Sep 17 00:00:00 2001 From: Allar Viik Date: Fri, 15 Sep 2023 13:33:59 +0300 Subject: [PATCH 5/7] Updated .gitignore & README.md --- .gitignore | 2 ++ README.md | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..6a27afd5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.githooks +.vscode diff --git a/README.md b/README.md index 67c2bc2a..534ca689 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,6 @@ -# nova.core -Ansible collection for roles and plugins +# Ansible Collection - nova.core + +This is an Ansible collection consisting of some roles and a an inventory plugin for [Providentia](https://github.com/ClarifiedSecurity/Providentia) maintained by the Nova team. This collection is a culmination of years for cyber defense exercises and is maintained by: + +- [Clarified Security](https://www.clarifiedsecurity.com) +- [CCDCOE](https://ccdcoe.org/) From 2f7f73c9eba8e4ed977d0f83da7c864b5aa57945 Mon Sep 17 00:00:00 2001 From: Allar Viik Date: Fri, 15 Sep 2023 13:35:48 +0300 Subject: [PATCH 6/7] Added contribution info --- .github/CONTRIBUTING.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .github/CONTRIBUTING.md diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 00000000..b2a922d8 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing to the Project + +By making contributions to this project, you acknowledge that these contributions are either your original work or have been authorized by your employer. Additionally, you grant an unrestricted, perpetual, and irrevocable copyright license to all present and future users and developers of the project. This license is granted in accordance with the existing license of the project. + +Please note that by submitting your contributions, you affirm that you have the necessary rights to grant this license and that your contributions will be made available under the terms and conditions of the project's existing license. + +Thank you for your contributions to the project! From 20d2d1c7daab328ce0773199ca11c52f32f325c4 Mon Sep 17 00:00:00 2001 From: Allar Viik Date: Fri, 15 Sep 2023 13:36:51 +0300 Subject: [PATCH 7/7] Added action to automatically update the collection version on push to main --- .github/workflows/release.yml | 56 +++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..567fe9f1 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,56 @@ +name: Updating collection version, git tag and release + +on: + push: + branches: + - main + +jobs: + version_collection_and_tag: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Configuring collection & tag versions + run: | + target_file="nova/core/galaxy.yml" + + # Configuring git + git config --global user.name "Nova CI" + git config --global user.email "Nova@example.com" + + # Updating the version in the galaxy.yml file + version_row_old=$(grep "version: " $target_file) + version=$(echo $version_row_old | cut -d: -f2) + major=$(echo $version | cut -d. -f1) + minor=$(echo $version | cut -d. -f2) + patch=$(echo $version | cut -d. -f3) + patch_new=$(( $patch+1 )) + version_row_new="version: $major.$minor.$patch_new" + sed -i "s/$version_row_old/$version_row_new/" $target_file + + TAG_NAME="v$major.$minor.$patch_new" + echo "LATEST_TAG=$TAG_NAME" >> $GITHUB_ENV + + # Adding the changed file to git + git add $target_file + + # Committing the change + git commit -m "Set nova.core collection version to $major.$minor.$patch_new" + git push + + # Tagging and pushing the change + git tag $TAG_NAME + git push origin $TAG_NAME + + # Creating temp changelog file + git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 HEAD^^)..HEAD > CHANGELOG.md + + - uses: ncipollo/release-action@v1 + with: + tag: ${{ env.LATEST_TAG }} + bodyFile: CHANGELOG.md