diff --git a/libvirt/tests/cfg/virsh_cmd/domain/virsh_migrate_stress.cfg b/libvirt/tests/cfg/virsh_cmd/domain/virsh_migrate_stress.cfg index f11ab110234..9a9bc942493 100644 --- a/libvirt/tests/cfg/virsh_cmd/domain/virsh_migrate_stress.cfg +++ b/libvirt/tests/cfg/virsh_cmd/domain/virsh_migrate_stress.cfg @@ -28,14 +28,32 @@ virsh_migrate_back = "yes" virsh_migrated_state = "running" variants: - - set_vcpu_1: - smp = 2 - - set_vcpu_2: - smp = 4 - - set_memory_1: - mem = 2048 - - set_memory_2: - mem = 4096 + - @normal: + - with_macvtap: + macvtap_migration = "yes" + host_pf_filter = "Mellanox Technologies MT28800 Family \[ConnectX-5 Ex\]" + host_iface_name = "enP1p12s0f0" + per_vm_macvtap_iface_count = 1 + variants: + - hotplug: + plug_operation = "hotplug" + - coldplug: + plug_operation = "coldplug" + variants: + - plug_before_unplug_before_migration: + virsh_plug_before = "yes" + virsh_unplug_before = "yes" + - plug_before_unplug_after_migration: + virsh_plug_before = "yes" + virsh_unplug_after = "yes" + - plug_after_unplug_after_migration: + virsh_plug_after = "yes" + virsh_unplug_after = "yes" + - unplug_before_hotplug_unplug_after: + virsh_plug_before = "yes" + virsh_unplug_before = "yes" + virsh_plug_after = "yes" + virsh_unplug_after = "yes" variants: - precopy: # In precopy it takes more time to converge, so let it suspend after 30min and diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_stress.py b/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_stress.py index a59ff51f2de..f6df6bbbf23 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_stress.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_stress.py @@ -4,8 +4,10 @@ from virttest import utils_test from virttest import utils_misc from virttest import utils_package -from virttest import remote -from virttest.libvirt_xml import vm_xml +from virttest import utils_net +from virttest import libvirt_xml +from virttest import virsh +from virttest import test_setup from virttest.staging import utils_memory @@ -17,7 +19,7 @@ def set_cpu_memory(vm_name, cpu, memory): :param cpu: No of vcpus to be configured :param memory: Memory for VM to be configured """ - vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + vmxml = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml.vcpu = cpu # To avoid exceeded current memory vmxml.max_mem = memory @@ -126,6 +128,65 @@ def do_stress_migration(vms, srcuri, desturi, migration_type, test, params, migrate_setup.migrate_pre_setup(srcuri, params, cleanup=True) +def macvtap_plug_unplug(test, vms, macvtap_xml, hotplug=False, unplug=False): + """ + Method to perform macvtap hotplug/hotunplug and coldplug/coldunplug + + :param vms: VM objects + :param macvtap_xml: macvtap xml dict of VMs + :param hotplug: True to perform hotplug, False to perform coldplug + :param unplug: True and hotplug as True to perform hotunplug + True and hotplug as False to perform coldunplug + :raise: TestFail if the operation fails + """ + for vm in vms: + xml_list = macvtap_xml[vm.name] + # perform coldplug + if not hotplug: + if vm.is_alive: + vm.destroy() + vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm.name) + func = vmxml.add_device + # perform coldunplug + if unplug: + func = vmxml.remove_device + for xml in xml_list: + try: + func(xml) + except Exception as info: + test.fail(info) + if not vm.is_alive(): + vm.start() + vm.wait_for_login() + # perform hotplug + if hotplug: + func = virsh.attach_device + # perform hotunplug + if unplug: + func = virsh.detach_device + for xml in xml_list: + ret = func(vm.name, xml.xml, flag_str="--live", debug=True) + utils_test.libvirt.check_result(ret) + + +def create_macvtap_xml(iface, params): + """ + Method to create Macvtap interface xml + + :param iface: macvtap interface name + :param params: Test dict params + :return: macvtap xml object + """ + mode = params.get('macvtap_mode', 'passthrough') + model = params.get('macvtap_model', 'virtio') + macvtap_type = params.get('macvtap_type', 'direct') + macvtap = libvirt_xml.devices.interface.Interface('direct') + macvtap.mac_address = utils_net.generate_mac_address_simple() + macvtap.model = model + macvtap.source = {'dev': iface, 'mode': mode} + return macvtap + + def run(test, params, env): """ Test migration under stress. @@ -155,9 +216,25 @@ def run(test, params, env): migration_type = params.get("migration_type") start_migration_vms = params.get("start_migration_vms", "yes") == "yes" thread_timeout = int(params.get("thread_timeout", 120)) + host_iface_name = params.get("host_iface_name", "enP1p12s0f0") + host_pf_filter = params.get("host_pf_filter", "Mellanox Technologies") + # No of macvtap interfaces required per VM + vm_ifaces = int(params.get("vm_macvtap_interfaces", 1)) + macvtap_migration = params.get("plug_operation", "") + plug_before = params.get("virsh_plug_before", "no") == "yes" + unplug_before = params.get("virsh_unplug_before", "no") == "yes" + plug_after = params.get("virsh_plug_after", "no") == "yes" + unplug_after = params.get("virsh_unplug_after", "no") == "yes" + ubuntu_dep = ['build-essential', 'git'] hstress = rstress = None vstress = {} + vmxml_dict = {} + + # backup vm xml + for vm in vms: + vmxml_dict[vm.name] = libvirt_xml.vm_xml.VMXML.new_from_dumpxml(vm.name) + params["source_dist_img"] = "%s-nfs-img" % vm.name # Set vm_bytes for start_cmd mem_total = utils_memory.memtotal() @@ -169,6 +246,68 @@ def run(test, params, env): if "vm-bytes" in stress_args: params["%s_args" % stress_tool] = stress_args % vm_bytes + server_dict = {'server_ip': params['remote_ip'], + 'server_pwd': params['remote_pwd'], + 'server_user': params.get('remote_user', 'root')} + server_session = test_setup.remote_session(server_dict) + cmd = "ip link show | grep \"^[0-9]:\" | awk \"{print $2}\"" + remote_ifaces = server_session.cmd_output(cmd).split() + + if macvtap_migration: + iface_dict = {} + iface_list = [] + macvtap_xml = {} + source_assignable = test_setup.PciAssignable(pf_filter_re=host_pf_filter) + target_assignable = test_setup.PciAssignable(pf_filter_re=host_pf_filter, + session=server_session) + source_pfs = source_assignable.get_pf_ids() + target_pfs = target_assignable.get_pf_ids() + logging.debug("source PFs are: %s", ' '.join(map(str, source_pfs))) + logging.debug("target PFs are: %s", ' '.join(map(str, target_pfs))) + if source_pfs.sort() != target_pfs.sort(): + test.cancel("For migration to work PFs should be in same slot " + "in source and target so that VFs created out of " + "it will be same") + + # create VFs in source and target based on no of VMs and no of + # interfaces required for each VM + nr_vfs = (len(vms) * vm_ifaces) // source_pfs + for pf in source_pfs: + # initialize it to 0 + source_assignable.set_vf(pf) + target_assignable.set_vf(pf) + # set actual vfs + source_assignable.set_vf(pf, nr_vfs) + target_assignable.set_vf(pf, nr_vfs) + pf_vf_info = source_assignable.get_pf_vf_info() + # map vf from each pf to every VM + for pf in source_pfs: + for vf in pf_vf_info[pf]: + for vm_index in range(len(vms)): + iface = utils_misc.get_interface_from_pci_id(vf[vm_index]) + iface_list.append(iface) + iface_dict[vms[vm_index].name] = iface_list + # create xml for vfs associated with VM + for vm in vms: + macvtap_xml_list = [] + for iface in iface_dict[vm.name]: + xml = create_macvtap_xml(iface) + macvtap_xml_list.append(xml) + macvtap_xml[vm.name] = macvtap_xml_list + + # perform hotplug/hotunplug before migration + if macvtap_migration == "hotplug": + if plug_before: + macvtap_plug_unplug(test, vms, macvtap_xml, hotplug=True) + if unplug_before: + macvtap_plug_unplug(test, vms, macvtap_xml, hotplug=True, + unplug=True) + # perform coldplug/coldunplug before migration + elif macvtap_migration == "coldplug": + if plug_before: + macvtap_plug_unplug(test, vms, macvtap_xml) + if unplug_before: + macvtap_plug_unplug(test, vms, macvtap_xml, unplug=True) # Ensure stress tool is available in host if host_stress: # remove package manager installed tool to avoid conflict @@ -184,11 +323,7 @@ def run(test, params, env): if remote_stress: try: - server_ip = params['remote_ip'] - server_pwd = params['remote_pwd'] - server_user = params.get('remote_user', 'root') - remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, - server_pwd, r"[\#\$]\s*$") + remote_session = test_setup.remote_session(server_dict) # remove package manager installed tool to avoid conflict if not utils_package.package_remove(stress_tool, session=remote_session): logging.error("Existing %s is not removed") @@ -232,6 +367,22 @@ def run(test, params, env): do_stress_migration(vms, src_uri, dest_uri, migration_type, test, params, thread_timeout) + + if macvtap_migration: + # perform hotplug/coldplug after migration + if macvtap_migration == "hotplug": + if plug_after: + macvtap_plug_unplug(test, vms, macvtap_xml, hotplug=True) + if unplug_after: + macvtap_plug_unplug(test, vms, macvtap_xml, hotplug=True, + unplug=True) + # perform coldplug/coldunplug before migration + elif macvtap_migration == "coldplug": + if plug_after: + macvtap_plug_unplug(test, vms, macvtap_xml) + if unplug_after: + macvtap_plug_unplug(test, vms, macvtap_xml, unplug=True) + finally: logging.debug("Cleanup vms...") params["connect_uri"] = src_uri @@ -254,3 +405,11 @@ def run(test, params, env): if hstress: hstress.unload_stress() + + for source_file in params.get("source_file_list", []): + utils_test.libvirt.delete_local_disk("file", path=source_file) + + # define the backup xml + if vmxml_dict: + for key in vmxml_dict.keys(): + vmxml_dict[key].define()