diff --git a/provider/bootc_image_builder/aws_utils.py b/provider/bootc_image_builder/aws_utils.py new file mode 100644 index 00000000000..9e306e75c84 --- /dev/null +++ b/provider/bootc_image_builder/aws_utils.py @@ -0,0 +1,197 @@ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Copyright Red Hat +# +# SPDX-License-Identifier: GPL-2.0 +# +# Author: chwen@redhat.com +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +"""Helper functions for aws management""" + +import logging +import os +import shutil +import textwrap +import pathlib + +from avocado.utils import process +from avocado.core import exceptions + +LOG = logging.getLogger('avocado.' + __name__) + + +def install_aws_cli_tool(params): + """ + Download AWS command line tool and install it + + :param params: wrap up all parameters + """ + vm_arch_name = params.get("vm_arch_name", "x86_64") + aws_install_cmd = f"curl https://awscli.amazonaws.com/awscli-exe-linux-{vm_arch_name}.zip -o awscliv2.zip " \ + f" && unzip awscliv2.zip -d ./ && ./aws/install " + if not os.path.exists("/usr/local/bin/aws"): + if os.path.exists("/usr/local/aws-cli"): + shutil.rmtree("/usr/local/aws-cli") + if os.path.exists("/usr/local/bin/aws_completer"): + shutil.rmtree("/usr/local/bin/aws_completer") + if os.path.exists("aws"): + shutil.rmtree("aws") + if os.path.exists("awscliv2.zip"): + os.remove("awscliv2.zip") + process.run(aws_install_cmd, shell=True, ignore_status=False) + + +def create_aws_credentials_file(aws_access_key_id, aws_access_key): + """ + Create AWS credentials file + + :param aws_access_key_id: AWS access key id + :param aws_access_key: AWS access key + """ + folder = os.path.expanduser("~/.aws") + if not os.path.exists(folder): + os.mkdir(folder) + secret_path = pathlib.Path(folder) / "credentials" + secret_path.write_text(textwrap.dedent(f""" + [default] + AWS_ACCESS_KEY_ID={aws_access_key_id} + AWS_SECRET_ACCESS_KEY={aws_access_key} + """), encoding="utf8") + + return os.path.join(folder, "credentials") + + +def create_aws_config_file(aws_region): + """ + Create AWS configuration file + + :param aws_region: AWS region + """ + folder = os.path.expanduser("~/.aws") + if not os.path.exists(folder): + os.mkdir(folder) + secret_path = pathlib.Path(folder) / "config" + secret_path.write_text(textwrap.dedent(f""" + [default] + region={aws_region} + """), encoding="utf8") + + return os.path.join(folder, "config") + + +def delete_aws_ami_id(params): + """ + delete AWS AMI id + + @param params: one dictionary wrapping various parameter + """ + ami_id = params.get("aws_ami_id") + if ami_id: + delete_cmd = "aws ec2 deregister-image --image-id %s" % ami_id + process.run(delete_cmd, shell=True, verbose=True, ignore_status=True) + + +def delete_aws_ami_snapshot_id(params): + """ + delete AWS AMI snapshot id + + @param params: one dictionary wrapping various parameter + """ + aws_ami_snapshot_id = params.get("aws_ami_snapshot_id") + if aws_ami_snapshot_id: + delete_cmd = "aws ec2 delete-snapshot --snapshot-id %s" % aws_ami_snapshot_id + process.run(delete_cmd, shell=True, verbose=True, ignore_status=True) + + +def delete_aws_key_pair(params): + """ + delete AWS key pair + + @param params: one dictionary wrapping various parameter + """ + aws_key_name = params.get("aws_key_name") + delete_key_pair_cmd = f"aws ec2 delete-key-pair --key-name {aws_key_name}" + process.run(delete_key_pair_cmd, shell=True, ignore_status=True).exit_status + + +def import_aws_key_pair(params): + """ + Import key into AWS key pair + + @param params: one dictionary wrapping various parameter + """ + aws_key_name = params.get("aws_key_name") + check_key_pair_cmd = f"aws ec2 describe-key-pairs --key-name {aws_key_name}" + status = process.run(check_key_pair_cmd, shell=True, ignore_status=True).exit_status + if status != 0: + public_key_path = os.path.join(os.path.expanduser("~/.ssh/"), "id_rsa.pub") + import_cmd = "aws ec2 import-key-pair --key-name {aws_key_name}--public-key-material fileb://{public_key_path}" + process.run(import_cmd, shell=True, verbose=True, ignore_status=True) + + +def create_aws_instance(params): + """ + create AWS instance + + @param params: one dictionary wrapping various parameter + """ + import_aws_key_pair(params) + vm_name = params.get("vm_name_bootc") + aws_key_name = params.get("aws_key_name") + aws_ami_id = params.get("aws_ami_id") + aws_subnet_id = params.get("aws_subnet_id") + aws_security_group = params.get("aws_security_group") + aws_instance_type = params.get("aws_instance_type") + create_aws_instance_cmd = "aws ec2 run-instances --image-id %s --count 1" \ + " --security-group-ids %s" \ + " --instance-type %s --key-name %s --subnet-id %s" \ + " --associate-public-ip-address --tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=%s}]'" \ + " --block-device-mappings '[{\"DeviceName\":\"/dev/xvda\",\"Ebs\":{\"VolumeSize\":20,\"VolumeType\":\"gp2\"}}]'" \ + " --query 'Instances[0].InstanceId' --output text" % (aws_ami_id, aws_security_group, aws_instance_type, aws_key_name, aws_subnet_id, vm_name) + instance_id = process.run(create_aws_instance_cmd, shell=True, ignore_status=True).stdout_text.strip() + params.update({"aws_instance_id": "%s" % instance_id}) + return instance_id + + +def wait_aws_instance_running(params): + """ + wait for AWS instance running + + @param params: one dictionary wrapping various parameter + """ + aws_instance_id = params.get("aws_instance_id") + if aws_instance_id: + wait_aws_instance_cmd = f"timeout 30 aws ec2 wait instance-running --instance-ids {aws_instance_id}" + process.run(wait_aws_instance_cmd, shell=True, ignore_status=True) + + +def get_aws_instance_privateip(params): + """ + Get AWS instance private ip + + @param params: one dictionary wrapping various parameter + """ + wait_aws_instance_running(params) + aws_instance_id = params.get("aws_instance_id") + if aws_instance_id: + get_aws_instance_privateip_cmd = f"aws ec2 describe-instances --instance-ids {aws_instance_id}" \ + f" --query 'Reservations[*].Instances[*].PrivateIpAddress' --output text" + private_ip = process.run(get_aws_instance_privateip_cmd, shell=True, ignore_status=True).stdout_text.strip() + return private_ip + else: + raise exceptions.TestFail(f"AWS instance not existed yet") + + +def terminate_aws_instance(params): + """ + terminate AWS instance + + @param params: one dictionary wrapping various parameter + """ + aws_instance_id = params.get("aws_instance_id") + if aws_instance_id: + terminate_aws_instance_cmd = f"aws ec2 terminate-instances --instance-ids {aws_instance_id}" + process.run(terminate_aws_instance_cmd, shell=True, ignore_status=True) + wait_instance_terminated = f"timeout 20 aws ec2 wait instance-terminated --instance-ids {aws_instance_id}" + process.run(wait_instance_terminated, shell=True, ignore_status=True) diff --git a/provider/bootc_image_builder/bootc_image_build_utils.py b/provider/bootc_image_builder/bootc_image_build_utils.py new file mode 100644 index 00000000000..0ee68c73751 --- /dev/null +++ b/provider/bootc_image_builder/bootc_image_build_utils.py @@ -0,0 +1,713 @@ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Copyright Red Hat +# +# SPDX-License-Identifier: GPL-2.0 +# +# Author: chwen@redhat.com +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +"""Helper functions for bootc image builder""" + +import logging +import json +import os +import random +import shutil +import time +import textwrap +import pathlib + +from avocado.utils import path, process +from avocado.core import exceptions +from virttest import utils_package +from virttest import remote + +from provider.bootc_image_builder import aws_utils + +LOG = logging.getLogger('avocado.' + __name__) + + +def install_bib_packages(): + """ + install necessary bootc image builder necessary packages + + """ + package_list = ["podman", "skopeo", "virt-install", "curl", "virt-manager"] + for pkg in package_list: + try: + path.find_command(pkg) + except path.CmdNotFoundError: + utils_package.package_install(pkg) + + +def podman_command_build(bib_image_url, disk_image_type, image_ref, config=None, local_container=False, tls_verify="true", chownership=None, + key_store_mounted=None, target_arch=None, rootfs=None, options=None, **dargs): + """ + Use podman run command to launch bootc image builder + + :param bib_image_url: bootc image builder url + :param disk_image_type: image type to build [qcow2, ami] (default "qcow2") + :param image_ref: image reference + :param config: config file + :param local_container: whether use local container image + :param tls_verify: whether verify tls connection + :param local_container: whether use local container image + :param chownership: whether change output ownership + :param key_store_mounted: whether mount keystore folder + :param target_arch: whether specify architecture + :param rootfs: whether specify rootfs type + :param options: additional options if needed + :param dargs: standardized function API keywords + :return: CmdResult object + """ + if not os.path.exists("/var/lib/libvirt/images/output"): + os.makedirs("/var/lib/libvirt/images/output") + cmd = "sudo podman run --rm -it --privileged --pull=newer --security-opt label=type:unconfined_t -v /var/lib/libvirt/images/output:/output" + if config: + cmd += " -v %s:/config.json " % config + + if local_container: + cmd += " -v /var/lib/containers/storage:/var/lib/containers/storage " + + if key_store_mounted: + cmd += " -v %s " % key_store_mounted + + if dargs.get('aws.secrets'): + cmd += " --env-file=%s " % dargs.get('aws.secrets') + + cmd += " %s " \ + " --type %s --tls-verify=%s " % (bib_image_url, disk_image_type, tls_verify) + + if config: + cmd += " --config /config.json " + + if target_arch: + cmd += " --target-arch=%s " % target_arch + + aws_ami_name = dargs.get('aws_ami_name') + if aws_ami_name: + random_int = random.randint(1, 1000) + vm_arch_name = dargs.get("vm_arch_name", "x86_64") + LOG.debug(f"vm_arch_name value in podman build is : {vm_arch_name}") + aws_ami_name = f"{aws_ami_name}_{vm_arch_name}_{random_int}" + cmd += f" --aws-ami-name {aws_ami_name} --aws-bucket {dargs.get('aws_bucket')} --aws-region {dargs.get('aws_region')} " + + if local_container: + cmd += " --local %s " % image_ref + else: + cmd += " %s " % image_ref + + if chownership: + cmd += " --chown %s " % chownership + + if rootfs: + cmd += " --rootfs %s " % rootfs + + if options is not None: + cmd += " %s" % options + + debug = dargs.get("debug", True) + + ignore_status = dargs.get("ignore_status", False) + timeout = int(dargs.get("timeout", "1800")) + LOG.debug("the whole podman command: %s\n" % cmd) + + ret = process.run( + cmd, timeout=timeout, verbose=debug, ignore_status=ignore_status, shell=True) + + return ret + + +def podman_login(podman_username, podman_password, registry): + """ + Use podman to login in registry + + :param podman_username: podman username + :param podman_password: podman password + :param registry: registry to login + :return: CmdResult object + """ + command = "sudo podman login -u='%s' -p='%s' %s " % (podman_username, podman_password, registry) + process.run( + command, timeout=60, verbose=True, ignore_status=False, shell=True) + + +def podman_push(podman_username, podman_password, registry, container_url): + """ + Use podman image to registry + + :param podman_username: podman username + :param podman_password: podman password + :param registry: registry to login + :param container_url: image url + :return: CmdResult object + """ + podman_login(podman_username, podman_password, registry) + command = "sudo podman push %s " % container_url + process.run( + command, timeout=1200, verbose=True, ignore_status=False, shell=True) + + +def create_config_json_file(folder, username, password): + """ + install necessary bootc image builder necessary packages + + :param folder: the folder that config.json reside in + :param username: user name + :param password: user password + """ + public_key_path = os.path.join(os.path.expanduser("~/.ssh/"), "id_rsa.pub") + if not os.path.exists(public_key_path): + LOG.debug("public key doesn't exist, will help create one") + key_gen_cmd = "ssh-keygen -q -t rsa -N '' <<< $'\ny' >/dev/null 2>&1" + process.run(key_gen_cmd, shell=True, ignore_status=False) + + with open(public_key_path, 'r') as ssh: + key_value = ssh.read().rstrip() + cfg = { + "blueprint": { + "customizations": { + "user": [ + { + "name": username, + "password": password, + "groups": ["wheel"], + "key": "%s" % key_value, + }, + ], + "kernel": {"append": "mitigations=auto,nosmt"}, + }, + }, + } + LOG.debug("what is cfg:%s", cfg) + config_json_path = pathlib.Path(folder) / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + return os.path.join(folder, "config.json") + + +def create_aws_secret_file(folder, aws_access_key_id, aws_access_key): + """ + Create aws secret key file + + :param folder: folder is used to have secret file + :param aws_access_key_id: aws access key id + :param aws_access_key: aws access key + """ + secret_path = pathlib.Path(folder) / "aws.secrets" + secret_path.write_text(textwrap.dedent(f""" + AWS_ACCESS_KEY_ID={aws_access_key_id} + AWS_SECRET_ACCESS_KEY={aws_access_key} + """), encoding="utf8") + + return os.path.join(folder, "aws.secrets") + + +def create_and_build_container_file(params): + """ + Create container file and build container tag + + :param params: one dictionary to wrap up all parameters + """ + folder = params.get("container_base_folder") + build_container = params.get("build_container") + container_tag = params.get("container_url") + + # clean up existed image + clean_image_cmd = "sudo podman rmi %s" % container_tag + process.run(clean_image_cmd, shell=True, ignore_status=True) + etc_config = '' + dnf_vmware_tool = '' + + # create VMware tool + if params.get("add_vmware_tool") == "yes": + vmware_tool_path = os.path.join(folder, "etc/vmware-tools/") + if not os.path.exists(vmware_tool_path): + os.makedirs(vmware_tool_path) + etc_config = "COPY etc/ /etc/" + dnf_vmware_tool = "dnf -y install open-vm-tools && dnf clean all && systemctl enable vmtoolsd.service && " + + download_vmware_config_cmd = "curl https://gitlab.com/fedora/bootc/" \ + "examples/-/raw/main/vmware/etc/vmware-tools/tools.conf > %s/tools.conf" % vmware_tool_path + process.run(download_vmware_config_cmd, shell=True, verbose=True, ignore_status=True) + + container_path = pathlib.Path(folder) / "Containerfile_tmp" + shutil.copy("/etc/yum.repos.d/beaker-BaseOS.repo", folder) + shutil.copy("/etc/yum.repos.d/beaker-AppStream.repo", folder) + container_file_content = f"""\n + FROM {build_container} + {etc_config} + COPY beaker-BaseOS.repo /etc/yum.repos.d/ + COPY beaker-AppStream.repo /etc/yum.repos.d/ + RUN {dnf_vmware_tool} dnf install -y vim && dnf clean all + """ + + custom_repo = params.get("custom_repo") + if custom_repo: + repo_path = pathlib.Path(folder) / "rhel-9.4.repo" + repo_prefix = "rhel-9.4" + if "rhel-9.5" in custom_repo: + repo_path = pathlib.Path(folder) / "rhel-9.5.repo" + repo_prefix = "rhel-9.5" + compose_url = params.get("compose_url") + baseurl = get_baseurl_from_repo_file("/etc/yum.repos.d/beaker-AppStream.repo") + if baseurl: + compose_url = baseurl + vm_arch_name = params.get("vm_arch_name", "x86_64") + repo_content = f"""\n + [{repo_prefix}-baseos] + baseurl={compose_url}/compose/BaseOS/{vm_arch_name}/os/ + enabled=1 + gpgcheck=0 + sslverify=0\n + [{repo_prefix}-appstream] + baseurl={compose_url}/compose/AppStream/{vm_arch_name}/os/ + enabled=1 + gpgcheck=0 + sslverify=0\n + """ + nfv_repo_content = f""" + [{repo_prefix}-nfv] + baseurl={compose_url}/compose/NFV/{vm_arch_name}/os/ + enabled=1 + gpgcheck=0 + sslverify=0\n + """ + if "x86_64" in vm_arch_name: + repo_content = repo_content + nfv_repo_content + repo_path.write_text(textwrap.dedent(repo_content), encoding="utf8") + container_file_content = f"""\n + FROM {build_container} + COPY {custom_repo} /etc/yum.repos.d/ + Run dnf clean all + """ + + container_path.write_text(textwrap.dedent(container_file_content), encoding="utf8") + build_cmd = "sudo podman build -t %s -f %s" % (container_tag, str(container_path)) + process.run(build_cmd, shell=True, ignore_status=False) + + +def create_and_start_vmware_vm(params): + """ + prepare environment, upload vmdk, create and start vm + + @param params: one dictionary wrapping various parameter + """ + image_type = params.get("disk_image_type") + try: + install_vmware_govc_tool(params) + setup_vCenter_env(params) + (params) + if image_type == "vmdk": + import_vmdk_to_vCenter(params) + elif image_type == "anaconda-iso": + import_iso_to_vCenter(params) + create_vm_in_vCenter(params) + if image_type == "vmdk": + attach_disk_to_vm(params) + elif image_type == "anaconda-iso": + attach_iso_to_vm(params) + create_vmdk_on_vm(params) + + power_on_vm(params) + add_vmware_tool = "yes" == params.get("add_vmware_tool") + if add_vmware_tool: + verify_ssh_login_vm(params) + finally: + delete_vm_if_present(params) + + +def create_and_start_cloud_vm(params): + """ + prepare environment, create and start VM in cloud + + @param params: one dictionary wrapping various parameter + """ + try: + aws_utils.create_aws_instance(params) + verify_ssh_login_vm(params) + finally: + cleanup_aws_env(params) + + +def install_vmware_govc_tool(params): + """ + Download VmWare govc tool and install it + + :param params: wrap up all parameters + """ + vm_arch_name = params.get("vm_arch_name", "x86_64") + if "arm64" in vm_arch_name: + vm_arch_name = "arm64" + govc_install_cmd = f"curl -L -o - 'https://github.com/vmware/govmomi/releases/latest/download/govc_Linux_{vm_arch_name}.tar.gz' " \ + f"| tar -C /usr/local/bin -xvzf - govc" + if not os.path.exists("/usr/local/bin/govc"): + process.run(govc_install_cmd, shell=True, ignore_status=False) + + +def setup_vCenter_env(params): + """ + Download VmWare govc tool and install it + + @param params: one dictionary wrapping various parameter + """ + # vCenter information + os.environ["GOVC_URL"] = params.get("GOVC_URL") + os.environ["GOVC_USERNAME"] = params.get("GOVC_USERNAME") + os.environ["GOVC_PASSWORD"] = params.get("GOVC_PASSWORD") + os.environ["DATA_CENTER"] = params.get("DATA_CENTER") + os.environ["GOVC_DATASTORE"] = "%s" % params.get("DATA_STORE") + os.environ["GOVC_INSECURE"] = "true" + process.run("govc about", shell=True, ignore_status=False) + + +def import_vmdk_to_vCenter(params): + """ + import vmdk into vCenter + + @param params: one dictionary wrapping various parameter + """ + delete_datastore_if_existed(params) + import_cmd = f"govc import.vmdk -force=true {params.get('vm_disk_image_path')}" + process.run(import_cmd, shell=True, verbose=True, ignore_status=False) + + +def create_vmdk_on_vm(params): + """ + create empty vmdk on VM + + @param params: one dictionary wrapping various parameters + """ + vm_name = params.get("vm_name_bootc") + create_vmdk_cmd = f"govc vm.disk.create -vm {vm_name} -name {vm_name}.vmdk 10G" + process.run(create_vmdk_cmd, shell=True, verbose=True, ignore_status=False) + + +def import_iso_to_vCenter(params): + """ + import iso into vCenter + + @param params: one dictionary wrapping various parameter + """ + boot = params.get("vm_name_bootc") + iso_location = params.get("vm_disk_image_path") + check_iso_cmd = f"govc datastore.ls {boot}" + if process.run(check_iso_cmd, shell=True, verbose=True, ignore_status=True).exit_status == 0: + delete_iso_cmd = f"govc datastore.rm -f {boot}" + process.run(delete_iso_cmd, shell=True, verbose=True, ignore_status=False) + + import_iso_cmd = f"govc datastore.upload {iso_location} {boot}" + process.run(import_iso_cmd, shell=True, verbose=True, ignore_status=False) + + +def create_vm_in_vCenter(params): + """ + create VM in vCenter + + @param params: one dictionary wrapping various parameter + """ + create_cmd = "govc vm.create -net='VM Network' -on=false -c=2 " \ + "-m=4096 -g=centos9_64Guest -firmware=%s %s" % (params.get("firmware"), params.get("vm_name_bootc")) + process.run(create_cmd, shell=True, verbose=True, ignore_status=False) + + +def attach_disk_to_vm(params): + """ + attach disk to VM in vCenter + + @param params: one dictionary wrapping various parameter + """ + vm_name = params.get("vm_name_bootc") + attach_cmd = "govc vm.disk.attach -vm %s" \ + " -controller %s -link=false -disk=%s/%s.vmdk" % (vm_name, params.get("controller"), vm_name, vm_name) + process.run(attach_cmd, shell=True, verbose=True, ignore_status=False) + + +def attach_iso_to_vm(params): + """ + attach ISO to VM in vCenter + + @param params: one dictionary wrapping various parameter + """ + vm_name = params.get("vm_name_bootc") + add_cdrom_cmd = f"govc device.cdrom.add -vm {vm_name}" + id = process.run(add_cdrom_cmd, shell=True, verbose=True, ignore_status=False).stdout_text.strip() + + #insert ISO into CDROM + attach_cmd = f"govc device.cdrom.insert -vm {vm_name} -device {id} {vm_name}" + process.run(attach_cmd, shell=True, verbose=True, ignore_status=False) + + +def power_on_vm(params): + """ + power on VM in vCenter + + @param params: one dictionary wrapping various parameter + """ + vm_name = params.get("vm_name_bootc") + wait_boot_time = int(params.get("wait_boot_time", "40")) + power_on_cmd = "govc vm.power -on=true %s" % vm_name + process.run(power_on_cmd, shell=True, verbose=True, ignore_status=False) + time.sleep(wait_boot_time) + state_cmd = "govc vm.info -json %s |jq -r .virtualMachines[0].summary.runtime.powerState" % vm_name + result = process.run(state_cmd, shell=True, verbose=True, ignore_status=False).stdout_text.strip() + if result not in "poweredOn": + raise exceptions.TestFail(f"The VM state is not powered on, real state is: {result}") + + +def verify_ssh_login_vm(params): + """ + Verify ssh login VM successfully + + @param params: one dictionary wrapping various parameter + """ + ip_address = params.get("ip_address") + disk_image_type = params.get("disk_image_type") + aws_config_dict = eval(params.get("aws_config_dict", '{}')) + if ip_address is None: + if disk_image_type in ['ami'] and len(aws_config_dict) != 0: + ip_address = aws_utils.get_aws_instance_privateip(params) + else: + ip_address = get_vm_ip_address(params) + user = params.get("os_username") + passwd = params.get("os_password") + vm_params = {} + vm_params.update({"server_ip": ip_address}) + vm_params.update({"server_user": user}) + vm_params.update({"server_pwd": passwd}) + vm_params.update({"vm_ip": ip_address}) + vm_params.update({"vm_user": user}) + vm_params.update({"vm_pwd": passwd}) + remote_vm_obj = remote.VMManager(vm_params) + remote_vm_obj.check_network() + remote_vm_obj.setup_ssh_auth() + result = remote_vm_obj.cmd_status_output("whoami")[1].strip() + LOG.debug(f" remote VM test is: {result} ") + if result not in user: + raise exceptions.TestFail(f"The expected user name should be: {user}, but actually is: {result}") + + +def get_vm_ip_address(params): + """ + Get VM ip_address in vCenter + + @param params: one dictionary wrapping various parameter + """ + vm_name = params.get("vm_name_bootc") + get_ip_cmd = "govc vm.ip -v4 -wait=3m %s" % vm_name + result = process.run(get_ip_cmd, shell=True, verbose=True, ignore_status=False) + LOG.debug("result wcf: {result.stdout_text}") + if result.stdout_text.strip() == "" or result.stdout_text.strip() is None: + raise exceptions.TestFail(f"Can not get ip address") + return result.stdout_text.strip() + + +def delete_vm_if_present(params): + """ + delete vm if present + + @param params: one dictionary wrapping various parameter + """ + vm_name = params.get("vm_name_bootc") + find_cmd = "govc find / -type m -name %s" % vm_name + cmd_result = process.run(find_cmd, shell=True, verbose=True, ignore_status=True).stdout_text + LOG.debug(f"find vm in vsphere is:{cmd_result}") + if cmd_result: + vm_path = cmd_result.strip() + delete_cmd = "govc vm.destroy %s && govc datastore.rm -f %s" % (vm_name, vm_name) + process.run(delete_cmd, shell=True, verbose=True, ignore_status=True) + + +def delete_datastore_if_existed(params): + """ + delete data store if existed + + @param params: one dictionary wrapping various parameter + """ + vm_name = params.get("vm_name_bootc") + find_cmd = "govc datastore.ls %s" % vm_name + cmd_result = process.run(find_cmd, shell=True, verbose=True, ignore_status=True).stdout_text + if cmd_result: + delete_cmd = "govc datastore.rm -f %s" % vm_name + process.run(delete_cmd, shell=True, verbose=True, ignore_status=True) + + +def parse_container_url(params): + """ + Parse repository information from container url + + @param params: wrapped dictionary containing url + """ + container_url = params.get("container_url") + repository_info = container_url.split('/')[-1] + repository_name = repository_info.split(':')[0] + if "localhost" in container_url: + repository_name = "localhost-%s" % repository_name + return repository_name + + +def convert_disk_image_name(params): + """ + Convert disk type image name + + @param params: wrapped dictionary containing parameters + """ + repository_name = parse_container_url(params) + origin_disk_name, extension = os.path.splitext(params.get("output_name")) + dest_disk_name = "%s-%s%s" % (origin_disk_name, repository_name, extension) + return dest_disk_name + + +def virt_install_vm(params): + """ + Use virt install tool to install vm + + @param params: one dictionary containing parameters + """ + vm_name = params.get("vm_name_bootc") + disk_image_type = params.get("disk_image_type") + image_ref = params.get("image_ref") + disk_path = params.get("vm_disk_image_path") + iso_install_path = params.get("iso_install_path") + format = params.get("disk_image_type") + firmware = params.get("firmware") + ovmf_code_path = params.get("ovmf_code_path") + ovmf_vars_path = params.get("ovmf_vars_path") + boot_option = '' + vm_arch_name = params.get("vm_arch_name", "x86_64") + machine_type = " --machine q35 " + secure_boot_feature0_enable = "yes" + if "aarch64" in vm_arch_name: + machine_type = "" + secure_boot_feature0_enable = "no" + if firmware in ['efi']: + if image_ref in ["centos", "fedora"]: + boot_option = f"--boot uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled={secure_boot_feature0_enable}," \ + f"firmware.feature1.name=enrolled-keys,firmware.feature1.enabled=no" + else: + boot_option = " --boot uefi" + if disk_image_type in ["anaconda-iso"]: + if os.path.exists(iso_install_path): + os.remove(iso_install_path) + cmd = ("virt-install --name %s" + " --disk path=%s,bus=virtio,format=qcow2,size=12" + " --vcpus 3 --memory 3096" + " --osinfo detect=on,require=off" + " --graphics vnc" + " --video virtio" + " --wait 10" + " --cdrom %s" + " --debug" + " %s " + " %s " + " --noreboot" % + (vm_name, iso_install_path, disk_path, machine_type, boot_option)) + else: + cmd = ("virt-install --name %s" + " --disk path=%s,bus=virtio,format=%s" + " --import " + " --vcpus 3 --memory 3096" + " --osinfo detect=on,require=off" + " --graphics vnc --video virtio --noautoconsole --serial pty" + " --wait 10" + " --debug" + " %s " + " %s " + " --noreboot" % + (vm_name, disk_path, format, machine_type, boot_option)) + process.run(cmd, shell=True, verbose=True, ignore_status=True) + + +def create_qemu_vm(params, env, test): + """ + prepare environment, virt install, and login vm + + @param params: one dictionary wrapping various parameters + @param env: environment + @param test: test case itself + """ + try: + virt_install_vm(params) + vm_name = params.get("vm_name_bootc") + env.create_vm(vm_type='libvirt', target=None, name=vm_name, params=params, bindir=test.bindir) + vm = env.get_vm(vm_name) + if vm.is_dead(): + LOG.debug("VM is dead, starting") + vm.start() + ip_address = vm.wait_for_get_address(nic_index=0) + params.update({"ip_address": ip_address.strip()}) + verify_ssh_login_vm(params) + LOG.debug(f"ip addressis wcf: {ip_address}") + finally: + if vm and vm.is_alive(): + vm.destroy(gracefully=False) + vm.undefine(options='--nvram') + + +def get_group_and_user_ids(folder_path): + try: + stat_info = os.stat(folder_path) + gid = stat_info.st_gid + uid = stat_info.st_uid + return gid, uid + except FileNotFoundError: + LOG.debug(f"Folder '{folder_path}' not found.") + return None, None + except Exception as ex: + LOG.debug(f"Error occurred: {ex}") + return None, None + + +def prepare_aws_env(params): + """ + One method to prepare AWS environment for image build + + :param params: one collective object representing wrapped parameters + """ + aws_access_key_id = params.get("aws_access_key_id") + aws_access_key = params.get("aws_access_key") + aws_secret_folder = params.get("aws_secret_folder") + aws_region = params.get("aws_region") + create_aws_secret_file(aws_secret_folder, aws_access_key_id, aws_access_key) + aws_utils.create_aws_credentials_file(aws_access_key_id, aws_access_key) + aws_utils.create_aws_config_file(aws_region) + aws_utils.install_aws_cli_tool(params) + + +def cleanup_aws_env(params): + """ + One method to clean up AWS environment for image build + + :param params: one collective object representing wrapped parameters + """ + aws_utils.terminate_aws_instance(params) + aws_utils.delete_aws_ami_id(params) + aws_utils.delete_aws_ami_snapshot_id(params) + + +def cleanup_aws_ami_and_snapshot(params): + """ + One method to clean up AWS ami and snapshot for image build + + :param params: one collective object representing wrapped parameters + """ + aws_utils.delete_aws_ami_id(params) + aws_utils.delete_aws_ami_snapshot_id(params) + + +def get_baseurl_from_repo_file(repo_file_path): + """ + One method to get compose url from current repository file + + :param repo_file_path: file path to repository + """ + try: + with open(repo_file_path, 'r') as f: + for line in f: + line = line.strip() + if line.startswith("baseurl"): + baseurl = line.split("=")[1].strip() + return baseurl.split('/compose/')[0].strip() + return None + except FileNotFoundError: + return None diff --git a/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_build.cfg b/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_build.cfg new file mode 100644 index 00000000000..02ab2179dd2 --- /dev/null +++ b/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_build.cfg @@ -0,0 +1,126 @@ +- bootc_image_builder.bib.disk_image_generation: + type = bootc_disk_image_build + only x86_64, aarch64 + start_vm = False + take_regular_screendumps = "no" + start_vm = "no" + output_base_folder = "/var/lib/libvirt/images/output" + # vCenter information + GOVC_URL = "example_url" + GOVC_USERNAME = "username" + GOVC_PASSWORD = "userpassword" + DATA_CENTER = "example_datacenter" + DATA_STORE = "example_datastore" + bib_image_url = "quay.io/centos-bootc/bootc-image-builder:latest" + registry = "quay.io" + variants: + - tls_verify_enable: + enable_tls_verify="true" + - tls_verify_disable: + enable_tls_verify="false" + variants config_json: + - use_config_json: + config_file_path = "/var/lib/libvirt/images" + os_username = "alice" + os_password = "bob" + - unuse_config_json: + variants image_ref: + - centos: + variants centos_bootc_image: + - centos9: + container_url = "quay.io/centos-bootc/centos-bootc:stream9" + only use_config_json..tls_verify_disable + - centos10: + container_url = "quay.io/centos-bootc/centos-bootc:stream10" + only upstream_bib..use_config_json..tls_verify_enable, rhel_9.5_nightly_bib..use_config_json..tls_verify_enable + roofs = "xfs" + - fedora: + variants fedora_bootc_image: + - fedora_40: + only upstream_bib..tls_verify_enable, rhel_9.4_nightly_bib..tls_verify_disable + no anaconda-iso..rhel_9.4_nightly_bib..tls_verify_disable + ownership ="107:107" + container_url = "quay.io/fedora/fedora-bootc:40" + roofs = "ext4" + qcow..upstream_bib: + roofs = "xfs" + - fedora_latest: + only upstream_bib..tls_verify_disable + container_url = "quay.io/fedora/fedora-bootc:latest" + roofs = "xfs" + raw..upstream_bib: + roofs = "ext4" + - local_image: + container_base_folder = "/var/lib/libvirt/images" + container_url = "localhost/bootc:eln" + local_container = "yes" + build_container = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc:rhel-9.4" + rhel_9.5_nightly_bib: + build_container = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc:rhel-9.5" + - rhel_9.4: + build_container = "registry.redhat.io/rhel9/rhel-bootc:9.4" + container_url = "quay.io/wenbaoxin/rhel9test" + only rhel_9.4_bib + - rhel_9.5_nightly: + container_url = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc:rhel-9.5" + only rhel_9.5_nightly_bib + no anaconda-iso + - cross_build: + container_url = "quay.io/centos-bootc/centos-bootc:stream9" + target_arch = "aarch64" + only qcow..upstream_bib..use_config_json..tls_verify_enable + variants bib_ref: + - upstream_bib: + bib_image_url = "quay.io/centos-bootc/bootc-image-builder:latest" + - rhel_9.4_bib: + podman_redhat_username = "podman_redhat_username" + podman_redhat_password = "podman_redhat_password" + redhat_registry = "registry.redhat.io" + container_base_folder = "/var/lib/libvirt/images" + bib_image_url = "registry.redhat.io/rhel9/bootc-image-builder:9.4" + podman_quay_username = "podman_quay_username" + podman_quay_password = "podman_quay_password" + only rhel_9.4..use_config_json..tls_verify_enable + anaconda-iso..rhel_9.4: + custom_repo = "rhel-9.4.repo" + - rhel_9.4_nightly_bib: + bib_image_url = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc-image-builder:rhel-9.4" + key_store_mounted = "/etc/pki:/etc/pki" + anaconda-iso..local_image: + custom_repo = "rhel-9.4.repo" + compose_url = "example_compose_url" + no cross_build + - rhel_9.5_nightly_bib: + bib_image_url = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc-image-builder:rhel-9.5" + key_store_mounted = "/etc/pki:/etc/pki" + anaconda-iso..local_image: + custom_repo = "rhel-9.5.repo" + compose_url = "example_compose_url" + variants: + - ami: + disk_image_type = "ami" + output_sub_folder = "image" + output_name = "disk.raw" + aws_secret_folder = "/var/lib/libvirt/images" + aws_access_key_id = "example_aws_access_key_id" + aws_access_key = "example_aws_access_key" + aws_region = "us-east-1" + aws_ami_name = "build_${bib_ref}-${image_ref}-component-bootc-${disk_image_type}" + use_config_json..tls_verify_enable: + aws_config_dict = "{'aws.secrets':'${aws_secret_folder}/aws.secrets','aws_ami_name':'${aws_ami_name}','aws_bucket':'bib-component-test','aws_region':'${aws_region}'}" + - qcow: + disk_image_type = "qcow2" + output_sub_folder = "qcow2" + output_name = "disk.qcow2" + - vmdk: + disk_image_type = "vmdk" + output_sub_folder = "vmdk" + output_name = "disk.vmdk" + - anaconda-iso: + disk_image_type = "anaconda-iso" + output_sub_folder = "bootiso" + output_name = "install.iso" + - raw: + disk_image_type = "raw" + output_sub_folder = "image" + output_name = "disk.raw" diff --git a/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_install.cfg b/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_install.cfg new file mode 100644 index 00000000000..41d8ed81590 --- /dev/null +++ b/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_install.cfg @@ -0,0 +1,137 @@ +- bootc_image_builder.bib.disk_image_install: + type = bootc_disk_image_install + only x86_64, aarch64 + start_vm = False + take_regular_screendumps = "no" + start_vm = "no" + output_base_folder = "/var/lib/libvirt/images/output" + libvirt_base_folder = "/var/lib/libvirt/images" + # vCenter information + GOVC_URL = "example_url" + GOVC_USERNAME = "username" + GOVC_PASSWORD = "userpassword" + DATA_CENTER = "example_datacenter" + DATA_STORE = "example_datastore" + bib_image_url = "quay.io/centos-bootc/bootc-image-builder:latest" + registry = "quay.io" + ownership ="107:107" + config_file_path = "/var/lib/libvirt/images" + os_username = "alice" + os_password = "bob" + variants: + - efi: + controller = "scsi" + firmware = "efi" + aws_instance_type = "t3.medium" + aarch64: + aws_instance_type = "c7g.medium" + - bios: + controller = "ide" + firmware = "bios" + aws_instance_type = "m4.large" + aarch64: + aws_instance_type = "m6g.medium" + variants image_ref: + - centos: + variants centos_bootc_image: + - centos9: + container_url = "quay.io/centos-bootc/centos-bootc:stream9" + - centos10: + container_url = "quay.io/centos-bootc/centos-bootc:stream10" + only upstream_bib, rhel_9.5_nightly_bib + roofs = "ext4" + - fedora: + variants fedora_bootc_image: + - fedora_40: + only upstream_bib, rhel_9.4_nightly_bib + no anaconda-iso..rhel_9.4_nightly_bib + container_url = "quay.io/fedora/fedora-bootc:40" + roofs = "ext4" + qcow..upstream_bib: + roofs = "xfs" + - fedora_latest: + only upstream_bib + container_url = "quay.io/fedora/fedora-bootc:latest" + roofs = "xfs" + raw..upstream_bib: + roofs = "ext4" + - rhel_9.4: + build_container = "registry.redhat.io/rhel9/rhel-bootc:9.4" + container_url = "quay.io/wenbaoxin/rhel9test" + only rhel_9.4_bib + - local_image: + container_base_folder = "/var/lib/libvirt/images" + container_url = "localhost/bootc:eln" + local_container = "yes" + build_container = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc:rhel-9.4" + rhel_9.5_nightly_bib: + build_container = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc:rhel-9.5" + - rhel_9.5_nightly: + container_url = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc:rhel-9.5" + enable_tls_verify = "false" + only rhel_9.5_nightly_bib + no anaconda-iso + variants bib_ref: + - upstream_bib: + bib_image_url = "quay.io/centos-bootc/bootc-image-builder:latest" + - rhel_9.4_bib: + podman_redhat_username = "podman_redhat_username" + podman_redhat_password = "podman_redhat_password" + redhat_registry = "registry.redhat.io" + container_base_folder = "/var/lib/libvirt/images" + bib_image_url = "registry.redhat.io/rhel9/bootc-image-builder:9.4" + podman_quay_username = "podman_quay_username" + podman_quay_password = "podman_quay_password" + anaconda-iso..rhel_9.4: + custom_repo = "rhel-9.4.repo" + only rhel_9.4 + - rhel_9.4_nightly_bib: + bib_image_url = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc-image-builder:rhel-9.4" + key_store_mounted = "/etc/pki:/etc/pki" + anaconda-iso..local_image: + custom_repo = "rhel-9.4.repo" + compose_url = "example_compose_url" + - rhel_9.5_nightly_bib: + bib_image_url = "registry-proxy.engineering.redhat.com/rh-osbs/rhel9-rhel_bootc-image-builder:rhel-9.5" + key_store_mounted = "/etc/pki:/etc/pki" + anaconda-iso..local_image: + custom_repo = "rhel-9.5.repo" + compose_url = "example_compose_url" + variants: + - ami: + disk_image_type = "ami" + output_sub_folder = "image" + output_name = "disk.raw" + aws_secret_folder = "/var/lib/libvirt/images" + aws_access_key_id = "example_aws_access_key_id" + aws_access_key = "example_aws_access_key" + aws_region = "us-east-1" + aws_ami_name = "install_${bib_ref}-${image_ref}-component-bootc-${disk_image_type}" + aws_key_name = "component-bootc-key" + aws_security_group = "example_sg" + aws_vpc_id = "example_vpc" + aws_subnet_id = "example_subnet" + aws_config_dict = "{'aws.secrets':'${aws_secret_folder}/aws.secrets','aws_ami_name':'${aws_ami_name}','aws_bucket':'bib-component-test','aws_region':'${aws_region}'}" + rhel_9.5_nightly_bib..centos.centos9, rhel_9.5_nightly_bib..fedora_eln, rhel_9.5_nightly_bib..local_image: + aws_config_dict = {} + - qcow: + disk_image_type = "qcow2" + output_sub_folder = "qcow2" + output_name = "disk.qcow2" + - vmdk: + disk_image_type = "vmdk" + output_sub_folder = "vmdk" + output_name = "disk.vmdk" + local_image: + add_vmware_tool = "yes" + rhel_9.4: + add_vmware_tool = "yes" + - anaconda-iso: + wait_boot_time = 240 + disk_image_type = "anaconda-iso" + output_sub_folder = "bootiso" + output_name = "install.iso" + - raw: + disk_image_type = "raw" + output_sub_folder = "image" + output_name = "disk.raw" diff --git a/virttools/tests/src/bootc_image_builder/bootc_disk_image_build.py b/virttools/tests/src/bootc_image_builder/bootc_disk_image_build.py new file mode 100644 index 00000000000..6fabffecf5f --- /dev/null +++ b/virttools/tests/src/bootc_image_builder/bootc_disk_image_build.py @@ -0,0 +1,119 @@ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Copyright Red Hat +# +# SPDX-License-Identifier: GPL-2.0 + +# Author: Chunfu Wen +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +import logging +import re +import os + +from avocado.utils import distro +from provider.bootc_image_builder import bootc_image_build_utils as bib_utils + +LOG = logging.getLogger('avocado.' + __name__) +cleanup_files = [] + + +def validate_bib_output(params, test): + """ + Common method to check whether image build output exists + + :param params: one collective object representing wrapped parameters + :param test: test object + """ + base_folder = params.get("output_base_folder") + output_sub_folder = params.get("output_sub_folder") + output_name = params.get("output_name") + ownership = params.get("ownership") + full_path = os.path.join(base_folder, output_sub_folder, output_name) + if not os.path.exists(full_path): + test.fail("bootc image build fail to generate outputs for image type: %s" % params.get("disk_image_type")) + if ownership: + formatted_group_user = ':'.join([f"{item}" for item in bib_utils.get_group_and_user_ids(base_folder)]) + if formatted_group_user != ownership: + test.fail(f"The output folder:{base_folder} has wrong setting in group and user ids: {formatted_group_user}") + + +def prepare_env_and_execute_bib(params, test): + """ + One method to prepare environment for image build + + :param params: one collective object representing wrapped parameters + :param test: test object + """ + disk_image_type = params.get("disk_image_type") + bib_image_url = params.get("bib_image_url", "quay.io/centos-bootc/bootc-image-builder:latest") + image_ref = params.get("image_ref") + bib_ref = params.get("bib_ref") + container_url = params.get("container_url") + local_container = "yes" == params.get("local_container") + build_container = params.get("build_container") + + enable_tls_verify = params.get("enable_tls_verify") + config_json = params.get("config_json") + config_json_file = None + + ownership = params.get("ownership") + key_store_mounted = params.get("key_store_mounted") + target_arch = params.get("target_arch") + roofs = params.get("roofs") + + aws_config_dict = eval(params.get("aws_config_dict", '{}')) + + if image_ref in ['cross_build'] and distro.detect().name in ['rhel']: + test.cancel("rhel doesn't support cross build, it is supported on fedora only") + + bib_utils.install_bib_packages() + if config_json == "use_config_json": + config_json_file = bib_utils.create_config_json_file(params.get("config_file_path"), + params.get("os_username"), params.get("os_password")) + if disk_image_type in ["ami"]: + bib_utils.prepare_aws_env(params) + # pull base image and build local image after change + if build_container: + if bib_ref == "rhel_9.4_bib": + bib_utils.podman_login(params.get("podman_redhat_username"), params.get("podman_redhat_password"), + params.get("redhat_registry")) + bib_utils.create_and_build_container_file(params) + if bib_ref == "rhel_9.4_bib": + bib_utils.podman_push(params.get("podman_quay_username"), params.get("podman_quay_password"), + params.get("registry"), container_url) + + result = bib_utils.podman_command_build(bib_image_url, disk_image_type, container_url, config_json_file, + local_container, enable_tls_verify, ownership, + key_store_mounted, target_arch, roofs, None, **aws_config_dict) + if disk_image_type in ['ami'] and len(aws_config_dict) != 0: + match_ami_id_obj = re.search(r'AMI registered:\s(.*)', result.stdout_text) + if match_ami_id_obj is None: + test.fail("Failed to get AWS AMI id") + aws_ami_id = match_ami_id_obj.group(1).strip() + LOG.debug(f"aws_ami_id is: {aws_ami_id}") + params.update({"aws_ami_id": aws_ami_id}) + match_aws_ami_snapshot_id_obj = re.search(r'Snapshot ID:\s(.*)', result.stdout_text) + if match_aws_ami_snapshot_id_obj is None: + test.fail("Failed to get AWS AMI snapshot id") + aws_ami_snapshot_id = match_aws_ami_snapshot_id_obj.group(1).strip() + LOG.debug(f"aws_ami_snapshot_id is: {aws_ami_snapshot_id}") + params.update({"aws_ami_snapshot_id": aws_ami_snapshot_id}) + bib_utils.cleanup_aws_ami_and_snapshot(params) + + +def run(test, params, env): + """ + Test boot container image builder. + """ + try: + prepare_env_and_execute_bib(params, test) + # validate build output + validate_bib_output(params, test) + except Exception as ex: + raise ex + finally: + # Clean up files + for file_path in cleanup_files: + if os.path.exists(file_path): + os.remove(file_path) diff --git a/virttools/tests/src/bootc_image_builder/bootc_disk_image_install.py b/virttools/tests/src/bootc_image_builder/bootc_disk_image_install.py new file mode 100644 index 00000000000..cc1cb7f6158 --- /dev/null +++ b/virttools/tests/src/bootc_image_builder/bootc_disk_image_install.py @@ -0,0 +1,137 @@ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Copyright Red Hat +# +# SPDX-License-Identifier: GPL-2.0 + +# Author: Chunfu Wen +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +import logging +import re +import os +import shutil + +from virttest import virsh +from provider.bootc_image_builder import bootc_image_build_utils as bib_utils + +LOG = logging.getLogger('avocado.' + __name__) +cleanup_files = [] + + +def validate_bib_output(params, test): + """ + Common method to check whether image build output exists + + :param params: one collective object representing wrapped parameters + :param test: test object + """ + base_folder = params.get("output_base_folder") + libvirt_base_folder = params.get("libvirt_base_folder") + output_sub_folder = params.get("output_sub_folder") + output_name = params.get("output_name") + bib_ref = params.get("bib_ref") + firmware = params.get("firmware") + + full_path = os.path.join(base_folder, output_sub_folder, output_name) + if not os.path.exists(full_path): + test.fail("bootc image build fail to generate outputs for image type: %s" % params.get("disk_image_type")) + converted_disk_image = f"install_{bib_ref}_{firmware}_{bib_utils.convert_disk_image_name(params)}" + disk_name, _ = os.path.splitext(converted_disk_image) + full_path_dest = os.path.join(libvirt_base_folder, converted_disk_image) + shutil.move(full_path, full_path_dest) + LOG.debug("vm_disk_image_path: %s", full_path_dest) + LOG.debug("vm_name_bootc: %s", disk_name) + cleanup_files.append(full_path_dest) + params.update({'vm_disk_image_path': full_path_dest}) + params.update({'vm_name_bootc': disk_name}) + + iso_install_path = os.path.join(libvirt_base_folder, f"{disk_name}_{firmware}.qcow2") + params.update({'iso_install_path': iso_install_path}) + cleanup_files.append(iso_install_path) + + +def prepare_env_and_execute_bib(params, test): + """ + One method to prepare environment for image build + + :param params: one collective object representing wrapped parameters + :param test: test object + """ + disk_image_type = params.get("disk_image_type") + bib_image_url = params.get("bib_image_url", "quay.io/centos-bootc/bootc-image-builder:latest") + image_ref = params.get("image_ref") + bib_ref = params.get("bib_ref") + container_url = params.get("container_url") + local_container = "yes" == params.get("local_container") + build_container = params.get("build_container") + add_vmware_tool = "yes" == params.get("add_vmware_tool") + + enable_tls_verify = params.get("enable_tls_verify", "true") + ownership = params.get("ownership") + key_store_mounted = params.get("key_store_mounted") + roofs = params.get("roofs") + aws_config_dict = eval(params.get("aws_config_dict", '{}')) + + bib_utils.install_bib_packages() + config_json_file = bib_utils.create_config_json_file(params.get("config_file_path"), + params.get("os_username"), params.get("os_password")) + if disk_image_type in ["ami"]: + bib_utils.prepare_aws_env(params) + # pull base image and build local image after change + if build_container: + if bib_ref == "rhel_9.4_bib": + bib_utils.podman_login(params.get("podman_redhat_username"), params.get("podman_redhat_password"), + params.get("redhat_registry")) + bib_utils.create_and_build_container_file(params) + if bib_ref == "rhel_9.4_bib": + ownership = None + bib_utils.podman_push(params.get("podman_quay_username"), params.get("podman_quay_password"), + params.get("registry"), container_url) + + result = bib_utils.podman_command_build(bib_image_url, disk_image_type, container_url, config_json_file, + local_container, enable_tls_verify, ownership, + key_store_mounted, None, roofs, None, **aws_config_dict) + if disk_image_type in ['ami'] and len(aws_config_dict) != 0: + match_ami_id_obj = re.search(r'AMI registered:\s(.*)', result.stdout_text) + if match_ami_id_obj is None: + test.fail("Failed to get AWS AMI id") + aws_ami_id = match_ami_id_obj.group(1).strip() + LOG.debug(f"aws_ami_id is: {aws_ami_id}") + params.update({"aws_ami_id": aws_ami_id}) + match_aws_ami_snapshot_id_obj = re.search(r'Snapshot ID:\s(.*)', result.stdout_text) + if match_aws_ami_snapshot_id_obj is None: + test.fail("Failed to get AWS AMI snapshot id") + aws_ami_snapshot_id = match_aws_ami_snapshot_id_obj.group(1).strip() + LOG.debug(f"aws_ami_snapshot_id is: {aws_ami_snapshot_id}") + params.update({"aws_ami_snapshot_id": aws_ami_snapshot_id}) + + +def run(test, params, env): + """ + Test install disk image generated by boot container image builder. + """ + disk_image_type = params.get("disk_image_type") + aws_config_dict = eval(params.get("aws_config_dict", '{}')) + try: + prepare_env_and_execute_bib(params, test) + # validate build output + validate_bib_output(params, test) + if disk_image_type in ["vmdk"]: + bib_utils.create_and_start_vmware_vm(params) + elif disk_image_type in ["qcow2", "raw", "anaconda-iso"]: + bib_utils.create_qemu_vm(params, env, test) + elif disk_image_type in ["ami"]: + if len(aws_config_dict) != 0: + bib_utils.create_and_start_cloud_vm(params) + else: + bib_utils.create_qemu_vm(params, env, test) + except Exception as ex: + raise ex + finally: + vm_name = params.get("vm_name_bootc") + if vm_name and vm_name in virsh.dom_list().stdout_text: + virsh.undefine(vm_name, options="--nvram", ignore_status=True) + # Clean up files + for file_path in cleanup_files: + if os.path.exists(file_path): + os.remove(file_path)