diff --git a/lisa/features/disks.py b/lisa/features/disks.py index 59aea145be..8a5e6bf9a5 100644 --- a/lisa/features/disks.py +++ b/lisa/features/disks.py @@ -3,9 +3,7 @@ import re from functools import partial -from typing import Any, Dict, List, Optional, Type - -from assertpy.assertpy import assert_that +from typing import Any, Dict, List, Optional, Type, Union from lisa import schema from lisa.feature import Feature @@ -27,22 +25,25 @@ def can_disable(cls) -> bool: def enabled(self) -> bool: return True - def get_partition_with_mount_point(self, mount_point: str) -> PartitionInfo: + def get_partition_with_mount_point( + self, mount_point: str + ) -> Union[PartitionInfo, None]: partition_info = self._node.tools[Mount].get_partition_info() matched_partitions = [ partition for partition in partition_info if partition.mount_point == mount_point ] - assert_that( - matched_partitions, - f"Exactly one partition with mount point {mount_point} should be present", - ).is_length(1) - partition = matched_partitions[0] - self._log.debug(f"disk: {partition}, mount_point: {mount_point}") + if matched_partitions: + partition = matched_partitions[0] + self._log.debug(f"disk: {partition}, mount_point: {mount_point}") + return partition + else: + return None - return partition + def check_resource_disk_mounted(self) -> bool: + return False def get_raw_data_disks(self) -> List[str]: raise NotImplementedError @@ -71,6 +72,12 @@ def _initialize(self, *args: Any, **kwargs: Any) -> None: def get_resource_disk_mount_point(self) -> str: raise NotImplementedError + def get_resource_disks(self) -> List[str]: + return [] + + def get_resource_disk_type(self) -> schema.ResourceDiskType: + return schema.ResourceDiskType.SCSI + def get_luns(self) -> Dict[str, int]: raise NotImplementedError @@ -98,25 +105,35 @@ def get_os_boot_partition(self) -> Optional[PartitionInfo]: break return boot_partition - # Get disk controller type from the VM by checking the boot partition - def get_os_disk_controller_type(self) -> schema.DiskControllerType: - boot_partition = self.get_os_boot_partition() - assert boot_partition, "'boot_partition' must not be 'None'" + def get_disk_type(self, disk: str) -> schema.StorageInterfaceType: if isinstance(self._node.os, BSD): - if boot_partition.disk.startswith("da"): - os_disk_controller_type = schema.DiskControllerType.SCSI - elif boot_partition.disk.startswith("nvd"): - os_disk_controller_type = schema.DiskControllerType.NVME + # Sample disk names in FreeBSD: + # /dev/da1p1 -> SCSI + # /dev/nvd1p1 -> NVME + if "da" in disk: + disk_type = schema.StorageInterfaceType.SCSI + elif ("nvd" in disk) or ("nvme" in disk): + disk_type = schema.StorageInterfaceType.NVME else: - raise LisaException(f"Unknown OS boot disk type {boot_partition.disk}") + raise LisaException(f"Unknown disk type {disk}") else: - if boot_partition.disk.startswith("nvme"): - os_disk_controller_type = schema.DiskControllerType.NVME - elif boot_partition.disk.startswith("sd"): - os_disk_controller_type = schema.DiskControllerType.SCSI + # Sample disk names in Linux: + # /dev/sda1 -> SCSI + # /dev/nvme0n1p1 -> NVME + if "nvme" in disk: + disk_type = schema.StorageInterfaceType.NVME + elif "sd" in disk: + disk_type = schema.StorageInterfaceType.SCSI else: - raise LisaException(f"Unknown OS boot disk type {boot_partition.disk}") - return os_disk_controller_type + raise LisaException(f"Unknown disk type {disk}") + return disk_type + + # Get disk controller type from the VM by checking the boot partition + def get_os_disk_controller_type(self) -> schema.DiskControllerType: + boot_partition = self.get_os_boot_partition() + assert boot_partition, "'boot_partition' must not be 'None'" + os_disk_controller_type = self.get_disk_type(boot_partition.disk) + return schema.DiskControllerType(os_disk_controller_type) DiskEphemeral = partial( diff --git a/lisa/features/nvme.py b/lisa/features/nvme.py index dae3ecc979..4af3bd0fee 100644 --- a/lisa/features/nvme.py +++ b/lisa/features/nvme.py @@ -16,6 +16,7 @@ from lisa.tools import Ls, Lspci, Nvmecli from lisa.tools.lspci import PciDevice from lisa.util import field_metadata, get_matched_str +from lisa.util.constants import DEVICE_TYPE_NVME class Nvme(Feature): @@ -42,6 +43,9 @@ class Nvme(Feature): # /dev/nvme0n1p15 -> /dev/nvme0n1 NVME_NAMESPACE_PATTERN = re.compile(r"/dev/nvme[0-9]+n[0-9]+", re.M) + # /dev/nvme0n1p15 -> /dev/nvme0n1 + NVME_DEVICE_PATTERN = re.compile(r"/dev/nvme[0-9]+", re.M) + _pci_device_name = "Non-Volatile memory controller" _ls_devices: str = "" @@ -63,7 +67,7 @@ def get_devices(self) -> List[str]: matched_result = self._device_pattern.match(row) if matched_result: devices_list.append(matched_result.group("device_name")) - return devices_list + return self._remove_nvme_os_disk(devices_list) def get_namespaces(self) -> List[str]: namespaces = [] @@ -75,10 +79,28 @@ def get_namespaces(self) -> List[str]: matched_result = self._namespace_pattern.match(row) if matched_result: namespaces.append(matched_result.group("namespace")) - return namespaces + return self._remove_nvme_os_disk(namespaces) + + # With disk controller type NVMe, OS disk along with all remote iSCSI devices + # appears as NVMe. + # Removing OS disk from the list of NVMe devices will remove all the + # remote non-NVME disks. + def _remove_nvme_os_disk(self, disk_list: List[str]) -> List[str]: + if ( + self._node.features[Disk].get_os_disk_controller_type() + == schema.DiskControllerType.NVME + ): + os_disk_nvme_device = self._get_os_disk_nvme_device() + # Removing OS disk/device from the list. + for disk in disk_list: + if os_disk_nvme_device in disk: + disk_list.remove(disk) + break + return disk_list def get_namespaces_from_cli(self) -> List[str]: - return self._node.tools[Nvmecli].get_namespaces() + namespaces_list = self._node.tools[Nvmecli].get_namespaces() + return self._remove_nvme_os_disk(namespaces_list) def get_os_disk_nvme_namespace(self) -> str: node_disk = self._node.features[Disk] @@ -93,10 +115,23 @@ def get_os_disk_nvme_namespace(self) -> str: ) return os_partition_namespace + # This method returns NVMe device name of the OS disk. + def _get_os_disk_nvme_device(self) -> str: + os_disk_nvme_device = "" + os_disk_nvme_namespace = self.get_os_disk_nvme_namespace() + # Sample os_boot_partition when disc controller type is NVMe: + # name: /dev/nvme0n1p15, disk: nvme, mount_point: /boot/efi, type: vfat + if os_disk_nvme_namespace: + os_disk_nvme_device = get_matched_str( + os_disk_nvme_namespace, + self.NVME_DEVICE_PATTERN, + ) + return os_disk_nvme_device + def get_devices_from_lspci(self) -> List[PciDevice]: devices_from_lspci = [] lspci_tool = self._node.tools[Lspci] - device_list = lspci_tool.get_devices() + device_list = lspci_tool.get_devices_by_type(DEVICE_TYPE_NVME) devices_from_lspci = [ x for x in device_list if self._pci_device_name == x.device_class ] @@ -108,16 +143,6 @@ def get_raw_data_disks(self) -> List[str]: def get_raw_nvme_disks(self) -> List[str]: # This routine returns Local NVMe devices as a list. nvme_namespaces = self.get_namespaces() - - # With disk controller type NVMe, OS disk appears as NVMe. - # It should be removed from the list of disks for NVMe tests as it is - # not an actual NVMe device. - # disk_controller_type == NVME - node_disk = self._node.features[Disk] - if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME: - os_disk_nvme_namespace = self.get_os_disk_nvme_namespace() - # Removing OS disk from the list. - nvme_namespaces.remove(os_disk_nvme_namespace) return nvme_namespaces def _get_device_from_ls(self, force_run: bool = False) -> None: diff --git a/lisa/schema.py b/lisa/schema.py index 986fbfe717..431dca0dd6 100644 --- a/lisa/schema.py +++ b/lisa/schema.py @@ -431,9 +431,19 @@ class DiskType(str, Enum): ] +class StorageInterfaceType(str, Enum): + SCSI = constants.STORAGE_INTERFACE_TYPE_SCSI + NVME = constants.STORAGE_INTERFACE_TYPE_NVME + + class DiskControllerType(str, Enum): - SCSI = "SCSI" - NVME = "NVMe" + SCSI = constants.STORAGE_INTERFACE_TYPE_SCSI + NVME = constants.STORAGE_INTERFACE_TYPE_NVME + + +class ResourceDiskType(str, Enum): + SCSI = constants.STORAGE_INTERFACE_TYPE_SCSI + NVME = constants.STORAGE_INTERFACE_TYPE_NVME disk_controller_type_priority: List[DiskControllerType] = [ diff --git a/lisa/sut_orchestrator/azure/features.py b/lisa/sut_orchestrator/azure/features.py index db8646edef..ab73c5d8a2 100644 --- a/lisa/sut_orchestrator/azure/features.py +++ b/lisa/sut_orchestrator/azure/features.py @@ -1897,6 +1897,42 @@ def remove_data_disk(self, names: Optional[List[str]] = None) -> None: self._node.capability.disk.data_disk_count -= len(names) self._node.close() + # verify that resource disk is mounted + # function returns successfully if disk matching mount point is present. + # raises exception if the resource disk is not mounted + # in Azure only SCSI disks are mounted but not NVMe disks + def check_resource_disk_mounted(self) -> bool: + resource_disk_mount_point = self.get_resource_disk_mount_point() + resourcedisk = self.get_partition_with_mount_point(resource_disk_mount_point) + if not resourcedisk: + raise LisaException( + f"Resource disk is not mounted at {resource_disk_mount_point}" + ) + return True + + # get resource disk type + # function returns the type of resource disk/disks available on the VM + # raises exception if no resource disk is available + def get_resource_disk_type(self) -> schema.ResourceDiskType: + resource_disks = self.get_resource_disks() + if not resource_disks: + raise LisaException("No Resource disks are available on VM") + return schema.ResourceDiskType( + self._node.features[Disk].get_disk_type(disk=resource_disks[0]) + ) + + def get_resource_disks(self) -> List[str]: + resource_disk_list = [] + resource_disk_mount_point = self.get_resource_disk_mount_point() + resourcedisk = self._node.features[Disk].get_partition_with_mount_point( + resource_disk_mount_point + ) + if resourcedisk: + resource_disk_list = [resourcedisk.name] + else: + resource_disk_list = self._node.features[Nvme].get_raw_nvme_disks() + return resource_disk_list + def get_resource_disk_mount_point(self) -> str: # get customize mount point from cloud-init configuration file from /etc/cloud/ # if not found, use default mount point /mnt for cloud-init diff --git a/lisa/util/constants.py b/lisa/util/constants.py index 771c5317eb..b4ff441ce6 100644 --- a/lisa/util/constants.py +++ b/lisa/util/constants.py @@ -178,3 +178,7 @@ SIGINT = 2 SIGTERM = 15 SIGKILL = 9 + +# StorageInterfaceTypes +STORAGE_INTERFACE_TYPE_NVME = "NVMe" +STORAGE_INTERFACE_TYPE_SCSI = "SCSI" diff --git a/microsoft/testsuites/core/azure_image_standard.py b/microsoft/testsuites/core/azure_image_standard.py index 645b1cd4b9..d35ca33f3c 100644 --- a/microsoft/testsuites/core/azure_image_standard.py +++ b/microsoft/testsuites/core/azure_image_standard.py @@ -13,6 +13,7 @@ TestCaseMetadata, TestSuite, TestSuiteMetadata, + schema, simple_requirement, ) from lisa.features import Disk @@ -1115,11 +1116,16 @@ def verify_no_pre_exist_users(self, node: Node) -> None: ), ) def verify_resource_disk_readme_file(self, node: RemoteNode) -> None: - resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() + if schema.ResourceDiskType.NVME == node.features[Disk].get_resource_disk_type(): + raise SkippedException( + "Resource disk type is NVMe. NVMe disks are not formatted or mounted by" + " default and readme file wont be available" + ) + + # verify that resource disk is mounted. raise exception if not + node.features[Disk].check_resource_disk_mounted() - # verify that resource disk is mounted - # function returns successfully if disk matching mount point is present - node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point) + resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() # Verify lost+found folder exists # Skip this step for BSD as it does not have lost+found folder @@ -1159,13 +1165,19 @@ def verify_resource_disk_readme_file(self, node: RemoteNode) -> None: ), ) def verify_resource_disk_file_system(self, node: RemoteNode) -> None: - resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() - node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point) + node_disc = node.features[Disk] + if schema.ResourceDiskType.NVME == node.features[Disk].get_resource_disk_type(): + raise SkippedException( + "Resource disk type is NVMe. NVMe disks are not formatted or mounted by default" # noqa: E501 + ) + # verify that resource disk is mounted. raise exception if not + node_disc.check_resource_disk_mounted() + resource_disk_mount_point = node_disc.get_resource_disk_mount_point() disk_info = node.tools[Lsblk].find_disk_by_mountpoint(resource_disk_mount_point) for partition in disk_info.partitions: # by default, resource disk comes with ntfs type - # waagent or cloud-init will format it unless there are some commands hung - # or interrupt + # waagent or cloud-init will format it unless there are some commands + # hung or interrupt assert_that( partition.fstype, "Resource disk file system type should not equal to ntfs", diff --git a/microsoft/testsuites/core/storage.py b/microsoft/testsuites/core/storage.py index b5482863e4..1d12e71fe4 100644 --- a/microsoft/testsuites/core/storage.py +++ b/microsoft/testsuites/core/storage.py @@ -14,6 +14,7 @@ TestCaseMetadata, TestSuite, TestSuiteMetadata, + schema, simple_requirement, ) from lisa.base_tools.service import Systemctl @@ -160,14 +161,21 @@ def verify_disks_device_timeout_setting( ), ) def verify_resource_disk_mounted(self, node: RemoteNode) -> None: + if schema.ResourceDiskType.NVME == node.features[Disk].get_resource_disk_type(): + raise SkippedException( + "Resource disk type is NVMe. NVMe disks are not mounted by default" + ) + + # get the mount point for the resource disk resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() # os disk(root disk) is the entry with mount point `/' in the output # of `mount` command - os_disk = ( - node.features[Disk] - .get_partition_with_mount_point(self.os_disk_mount_point) - .disk + os_disk_partition = node.features[Disk].get_partition_with_mount_point( + self.os_disk_mount_point ) + if os_disk_partition: + os_disk = os_disk_partition.disk + if isinstance(node.os, BSD): partition_info = node.tools[Mount].get_partition_info() resource_disk_from_mtab = [ @@ -199,7 +207,7 @@ def verify_resource_disk_mounted(self, node: RemoteNode) -> None: priority=1, requirement=simple_requirement( supported_platform_type=[AZURE], - unsupported_os=[BSD, Windows] + unsupported_os=[BSD, Windows], # This test is skipped as waagent does not support freebsd fully ), ) @@ -229,11 +237,16 @@ def verify_swap(self, node: RemoteNode) -> None: ), ) def verify_resource_disk_io(self, node: RemoteNode) -> None: + if schema.ResourceDiskType.NVME == node.features[Disk].get_resource_disk_type(): + raise SkippedException( + "Resource disk type is NVMe. NVMe has 'verify_nvme_function' and " + "'verify_nvme_function_unpartitioned' testcases to validate IO operations." # noqa: E501 + ) + resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() - # verify that resource disk is mounted - # function returns successfully if disk matching mount point is present - node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point) + # verify that resource disk is mounted. raise exception if not + node.features[Disk].check_resource_disk_mounted() file_path = f"{resource_disk_mount_point}/sample.txt" original_text = "Writing to resource disk!!!" @@ -302,11 +315,13 @@ def verify_nvme_disk_controller_type(self, node: RemoteNode) -> None: ) def verify_os_partition_identifier(self, log: Logger, node: RemoteNode) -> None: # get information of root disk from blkid - os_partition = ( - node.features[Disk] - .get_partition_with_mount_point(self.os_disk_mount_point) - .name + os_disk_partition = node.features[Disk].get_partition_with_mount_point( + self.os_disk_mount_point ) + if not os_disk_partition: + raise LisaException("Failed to get os disk partition") + + os_partition = os_disk_partition.name os_partition_info = node.tools[Blkid].get_partition_info_by_name(os_partition) # check if cvm diff --git a/microsoft/testsuites/nvme/nvme.py b/microsoft/testsuites/nvme/nvme.py index debc18b2f0..01235926fb 100644 --- a/microsoft/testsuites/nvme/nvme.py +++ b/microsoft/testsuites/nvme/nvme.py @@ -11,26 +11,34 @@ TestCaseMetadata, TestSuite, TestSuiteMetadata, - constants, simple_requirement, ) from lisa.features import Nvme, NvmeSettings, Sriov from lisa.sut_orchestrator.azure.platform_ import AzurePlatform -from lisa.tools import Cat, Echo, Fdisk, Lscpu, Lspci, Mount, Nvmecli +from lisa.tools import Cat, Df, Echo, Fdisk, Lscpu, Lspci, Mkfs, Mount, Nvmecli from lisa.tools.fdisk import FileSystem +from lisa.util.constants import DEVICE_TYPE_NVME, DEVICE_TYPE_SRIOV def _format_mount_disk( node: Node, namespace: str, file_system: FileSystem, + use_partitions: bool = True, ) -> None: mount_point = namespace.rpartition("/")[-1] fdisk = node.tools[Fdisk] mount = node.tools[Mount] mount.umount(namespace, mount_point) - fdisk.make_partition(namespace, file_system) - mount.mount(f"{namespace}p1", mount_point) + fdisk.delete_partitions(namespace) + if use_partitions: + fdisk.make_partition(namespace, file_system) + mount.mount(f"{namespace}p1", mount_point) + else: + # Format and mount the whole disk without partitions. + format_disk = node.tools[Mkfs] + format_disk.mkfs(f"{namespace}", file_system) + mount.mount(f"{namespace}", mount_point) @TestSuiteMetadata( @@ -97,75 +105,28 @@ def verify_nvme_max_disk(self, environment: Environment, node: Node) -> None: ), ) def verify_nvme_function(self, node: Node) -> None: - nvme = node.features[Nvme] - nvme_namespaces = nvme.get_raw_nvme_disks() - nvme_cli = node.tools[Nvmecli] - cat = node.tools[Cat] - mount = node.tools[Mount] - for namespace in nvme_namespaces: - # 1. Get the number of errors from nvme-cli before operations. - error_count_before_operations = nvme_cli.get_error_count(namespace) - - # 2. Create a partition, filesystem and mount it. - _format_mount_disk(node, namespace, FileSystem.ext4) - - # 3. Create a txt file on the partition, content is 'TestContent'. - mount_point = namespace.rpartition("/")[-1] - cmd_result = node.execute( - f"echo TestContent > {mount_point}/testfile.txt", shell=True, sudo=True - ) - cmd_result.assert_exit_code( - message=f"{mount_point}/testfile.txt may not exist." - ) + self._verify_nvme_function(node) - # 4. Create a file 'data' on the partition, get the md5sum value. - cmd_result = node.execute( - f"dd if=/dev/zero of={mount_point}/data bs=10M count=100", - shell=True, - sudo=True, - ) - cmd_result.assert_exit_code( - message=f"{mount_point}/data is not created successfully, " - "please check the disk space." - ) - initial_md5 = node.execute( - f"md5sum {mount_point}/data", shell=True, sudo=True - ) - initial_md5.assert_exit_code( - message=f"{mount_point}/data not exist or md5sum command enounter" - " unexpected error." - ) - - # 5. Umount and remount the partition. - mount.umount(namespace, mount_point, erase=False) - mount.mount(f"{namespace}p1", mount_point) - - # 6. Get the txt file content, compare the value. - file_content = cat.run(f"{mount_point}/testfile.txt", shell=True, sudo=True) - assert_that( - file_content.stdout, - f"content of {mount_point}/testfile.txt should keep consistent " - "after umount and re-mount.", - ).is_equal_to("TestContent") - - # 6. Get md5sum value of file 'data', compare with initial value. - final_md5 = node.execute( - f"md5sum {mount_point}/data", shell=True, sudo=True - ) - assert_that( - initial_md5.stdout, - f"md5sum of {mount_point}/data should keep consistent " - "after umount and re-mount.", - ).is_equal_to(final_md5.stdout) - - # 7. Compare the number of errors from nvme-cli after operations. - error_count_after_operations = nvme_cli.get_error_count(namespace) - assert_that( - error_count_before_operations, - "error-log should not increase after operations.", - ).is_equal_to(error_count_after_operations) - - mount.umount(disk_name=namespace, point=mount_point) + @TestCaseMetadata( + description=""" + The test case is same as `verify_nvme_function`, except it uses + unpartitioned disks. + This test case will do following things for each NVMe device. + 1. Get the number of errors from nvme-cli before operations. + 2. Create filesystem and mount it. + 3. Create a txt file on the partition, content is 'TestContent'. + 4. Create a file 'data' on the partition, get the md5sum value. + 5. Umount and remount the partition. + 6. Get the txt file content, compare the value. + 7. Compare the number of errors from nvme-cli after operations. + """, + priority=2, + requirement=simple_requirement( + supported_features=[Nvme], + ), + ) + def verify_nvme_function_unpartitioned(self, node: Node) -> None: + self._verify_nvme_function(node, use_partitions=False) @TestCaseMetadata( description=""" @@ -187,6 +148,7 @@ def verify_nvme_fstrim(self, node: Node) -> None: nvme = node.features[Nvme] nvme_namespaces = nvme.get_raw_nvme_disks() mount = node.tools[Mount] + df = node.tools[Df] for namespace in nvme_namespaces: mount_point = namespace.rpartition("/")[-1] @@ -202,12 +164,16 @@ def verify_nvme_fstrim(self, node: Node) -> None: message=f"{mount_point} not exist or fstrim command enounter " "unexpected error." ) - - # 3. Create a 300 gb file 'data' using dd command in the partition. + # Use 80% of free space to create a test file. + free_space_gb = int(df.get_filesystem_available_space(mount_point) * 0.8) + # limit the free space to 300GB to avoid long time operation. + free_space_gb = min(free_space_gb, 300) + # 3. Create a file 'data' using dd command in the partition. cmd_result = node.execute( - f"dd if=/dev/zero of={mount_point}/data bs=1G count=300", + f"dd if=/dev/zero of={mount_point}/data bs=1G count={free_space_gb}", shell=True, sudo=True, + timeout=1200, ) cmd_result.assert_exit_code( message=f"{mount_point}/data is not created successfully, " @@ -350,7 +316,9 @@ def verify_nvme_manage_ns(self, node: Node) -> None: description=""" This test case will 1. Disable NVME devices. - 2. Enable NVME device. + 2. Enable PCI devices. + 3. Get NVMe devices slots. + 4. Check NVMe devices are back after rescan. """, priority=2, requirement=simple_requirement( @@ -360,9 +328,16 @@ def verify_nvme_manage_ns(self, node: Node) -> None: def verify_nvme_rescind(self, node: Node) -> None: lspci = node.tools[Lspci] # 1. Disable NVME devices. - lspci.disable_devices_by_type(device_type=constants.DEVICE_TYPE_NVME) - # 2. Enable NVME device. + before_pci_count = lspci.disable_devices_by_type(device_type=DEVICE_TYPE_NVME) + # 2. Enable PCI devices. lspci.enable_devices() + # 3. Get PCI devices slots. + after_devices_slots = lspci.get_device_names_by_type(DEVICE_TYPE_NVME, True) + # 4. Check PCI devices are back after rescan. + assert_that( + after_devices_slots, + "After rescan, the disabled NVMe PCI devices should be back.", + ).is_length(before_pci_count) @TestCaseMetadata( description=""" @@ -381,7 +356,7 @@ def verify_nvme_rescind(self, node: Node) -> None: ) def verify_nvme_sriov_rescind(self, node: Node) -> None: lspci = node.tools[Lspci] - device_types = [constants.DEVICE_TYPE_NVME, constants.DEVICE_TYPE_SRIOV] + device_types = [DEVICE_TYPE_NVME, DEVICE_TYPE_SRIOV] for device_type in device_types: # 1. Disable PCI devices. before_pci_count = lspci.disable_devices_by_type(device_type) @@ -392,7 +367,7 @@ def verify_nvme_sriov_rescind(self, node: Node) -> None: # 4. Check PCI devices are back after rescan. assert_that( after_devices_slots, - "After rescan, the disabled PCI devices should be back.", + f"After rescan, the disabled {device_type} PCI devices should be back.", ).is_length(before_pci_count) def _verify_nvme_disk(self, environment: Environment, node: Node) -> None: @@ -431,3 +406,78 @@ def _verify_nvme_disk(self, environment: Environment, node: Node) -> None: assert_that(nvme_namespace).described_as( "nvme devices count should be equal to [vCPU/8]." ).is_length(expected_count) + + def _verify_nvme_function(self, node: Node, use_partitions: bool = True) -> None: + # Verify the basic function of all NVMe disks + nvme = node.features[Nvme] + nvme_namespaces = nvme.get_raw_nvme_disks() + nvme_cli = node.tools[Nvmecli] + cat = node.tools[Cat] + mount = node.tools[Mount] + for namespace in nvme_namespaces: + # 1. Get the number of errors from nvme-cli before operations. + error_count_before_operations = nvme_cli.get_error_count(namespace) + + # 2. Create a partition, filesystem and mount it. + _format_mount_disk(node, namespace, FileSystem.ext4, use_partitions) + + # 3. Create a txt file on the partition, content is 'TestContent'. + mount_point = namespace.rpartition("/")[-1] + cmd_result = node.execute( + f"echo TestContent > {mount_point}/testfile.txt", shell=True, sudo=True + ) + cmd_result.assert_exit_code( + message=f"{mount_point}/testfile.txt may not exist." + ) + + # 4. Create a file 'data' on the partition, get the md5sum value. + cmd_result = node.execute( + f"dd if=/dev/zero of={mount_point}/data bs=10M count=100", + shell=True, + sudo=True, + ) + cmd_result.assert_exit_code( + message=f"{mount_point}/data is not created successfully, " + "please check the disk space." + ) + initial_md5 = node.execute( + f"md5sum {mount_point}/data", shell=True, sudo=True + ) + initial_md5.assert_exit_code( + message=f"{mount_point}/data not exist or md5sum command encountered" + " unexpected error." + ) + + # 5. Umount and remount the partition. + mount.umount(namespace, mount_point, erase=False) + if use_partitions: + mount.mount(f"{namespace}p1", mount_point) + else: + mount.mount(f"{namespace}", mount_point) + + # 6. Get the txt file content, compare the value. + file_content = cat.run(f"{mount_point}/testfile.txt", shell=True, sudo=True) + assert_that( + file_content.stdout, + f"content of {mount_point}/testfile.txt should keep consistent " + "after umount and re-mount.", + ).is_equal_to("TestContent") + + # 6. Get md5sum value of file 'data', compare with initial value. + final_md5 = node.execute( + f"md5sum {mount_point}/data", shell=True, sudo=True + ) + assert_that( + initial_md5.stdout, + f"md5sum of {mount_point}/data should keep consistent " + "after umount and re-mount.", + ).is_equal_to(final_md5.stdout) + + # 7. Compare the number of errors from nvme-cli after operations. + error_count_after_operations = nvme_cli.get_error_count(namespace) + assert_that( + error_count_before_operations, + "error-log should not increase after operations.", + ).is_equal_to(error_count_after_operations) + + mount.umount(disk_name=namespace, point=mount_point)