From b29f14ebb8733c9622681ea7e15c9187e6e4f8d8 Mon Sep 17 00:00:00 2001 From: SrikanthMyakam Date: Wed, 23 Oct 2024 08:44:27 +0530 Subject: [PATCH] NVMe and resource disc testcase fixes NVMe and resource disc testcase fixes 1. Added new testcase "Nvme.verify_nvme_function_unpartitioned". 2. NVMe test case fixes for disc controller type NVMe. 3. Resource disc testcases fixes for VMs with Local NVMEs. 4. "Nvme.verify_nvme_fstrim" - use fstrim testfile size based on available space. --- lisa/features/disks.py | 3 + lisa/features/nvme.py | 41 +++- lisa/sut_orchestrator/azure/features.py | 14 ++ .../testsuites/core/azure_image_standard.py | 108 ++++++--- microsoft/testsuites/core/storage.py | 108 +++++---- microsoft/testsuites/nvme/nvme.py | 210 +++++++++++------- 6 files changed, 323 insertions(+), 161 deletions(-) diff --git a/lisa/features/disks.py b/lisa/features/disks.py index 59aea145be..3ec03d5a43 100644 --- a/lisa/features/disks.py +++ b/lisa/features/disks.py @@ -71,6 +71,9 @@ def _initialize(self, *args: Any, **kwargs: Any) -> None: def get_resource_disk_mount_point(self) -> str: raise NotImplementedError + def get_resource_disks(self) -> List[str]: + raise NotImplementedError + def get_luns(self) -> Dict[str, int]: raise NotImplementedError diff --git a/lisa/features/nvme.py b/lisa/features/nvme.py index dae3ecc979..65a71b3c3d 100644 --- a/lisa/features/nvme.py +++ b/lisa/features/nvme.py @@ -16,6 +16,7 @@ from lisa.tools import Ls, Lspci, Nvmecli from lisa.tools.lspci import PciDevice from lisa.util import field_metadata, get_matched_str +from lisa.util.constants import DEVICE_TYPE_NVME class Nvme(Feature): @@ -42,6 +43,9 @@ class Nvme(Feature): # /dev/nvme0n1p15 -> /dev/nvme0n1 NVME_NAMESPACE_PATTERN = re.compile(r"/dev/nvme[0-9]+n[0-9]+", re.M) + # /dev/nvme0n1p15 -> /dev/nvme0n1 + NVME_DEVICE_PATTERN = re.compile(r"/dev/nvme[0-9]+", re.M) + _pci_device_name = "Non-Volatile memory controller" _ls_devices: str = "" @@ -63,6 +67,17 @@ def get_devices(self) -> List[str]: matched_result = self._device_pattern.match(row) if matched_result: devices_list.append(matched_result.group("device_name")) + node_disk = self._node.features[Disk] + # With disk controller type NVMe, all remote managed disks attached to the VM + # (including the OS disc) appear as NVMe devices. + # All the remote managed disks (inclusing the OS disc) use the same NVMe + # controller in the VM. + # Excluding the OS NVMe device from the list of NVMe devices will remove + # all the remote managed disks. + if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME: + os_disk_nvme_device = self._get_os_disk_nvme_device() + # Removing OS disk/device from the list. + devices_list.remove(os_disk_nvme_device) return devices_list def get_namespaces(self) -> List[str]: @@ -78,7 +93,17 @@ def get_namespaces(self) -> List[str]: return namespaces def get_namespaces_from_cli(self) -> List[str]: - return self._node.tools[Nvmecli].get_namespaces() + namespaces_list = self._node.tools[Nvmecli].get_namespaces() + node_disk = self._node.features[Disk] + # With disk controller type NVMe, OS disk along with all remote iSCSI devices + # appears as NVMe. + # Removing OS disk from the list of NVMe devices will remove all the + # remote non-NVME disks. + if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME: + os_disk_nvme_namespace = self.get_os_disk_nvme_namespace() + # Removing OS disk/device from the list. + namespaces_list.remove(os_disk_nvme_namespace) + return namespaces_list def get_os_disk_nvme_namespace(self) -> str: node_disk = self._node.features[Disk] @@ -93,10 +118,22 @@ def get_os_disk_nvme_namespace(self) -> str: ) return os_partition_namespace + # This method returns NVMe device name of the OS disk. + def _get_os_disk_nvme_device(self) -> str: + os_disk_nvme_namespace = self.get_os_disk_nvme_namespace() + # Sample os_boot_partition when disc controller type is NVMe: + # name: /dev/nvme0n1p15, disk: nvme, mount_point: /boot/efi, type: vfat + if os_disk_nvme_namespace: + os_disk_nvme_device = get_matched_str( + os_disk_nvme_namespace, + self.NVME_DEVICE_PATTERN, + ) + return os_disk_nvme_device + def get_devices_from_lspci(self) -> List[PciDevice]: devices_from_lspci = [] lspci_tool = self._node.tools[Lspci] - device_list = lspci_tool.get_devices() + device_list = lspci_tool.get_devices_by_type(DEVICE_TYPE_NVME) devices_from_lspci = [ x for x in device_list if self._pci_device_name == x.device_class ] diff --git a/lisa/sut_orchestrator/azure/features.py b/lisa/sut_orchestrator/azure/features.py index db8646edef..bdf3081d15 100644 --- a/lisa/sut_orchestrator/azure/features.py +++ b/lisa/sut_orchestrator/azure/features.py @@ -1897,6 +1897,20 @@ def remove_data_disk(self, names: Optional[List[str]] = None) -> None: self._node.capability.disk.data_disk_count -= len(names) self._node.close() + def get_resource_disks(self) -> List[str]: + resource_disk_mount_point = self.get_resource_disk_mount_point() + resource_disks = [] + try: + resourcedisk = self._node.features[Disk].get_partition_with_mount_point( + resource_disk_mount_point + ) + if resourcedisk: + resource_disks = [resourcedisk.name] + except AssertionError: + nvme = self._node.features[Nvme] + resource_disks = nvme.get_raw_nvme_disks() + return resource_disks + def get_resource_disk_mount_point(self) -> str: # get customize mount point from cloud-init configuration file from /etc/cloud/ # if not found, use default mount point /mnt for cloud-init diff --git a/microsoft/testsuites/core/azure_image_standard.py b/microsoft/testsuites/core/azure_image_standard.py index 645b1cd4b9..e7745df5a2 100644 --- a/microsoft/testsuites/core/azure_image_standard.py +++ b/microsoft/testsuites/core/azure_image_standard.py @@ -1115,32 +1115,51 @@ def verify_no_pre_exist_users(self, node: Node) -> None: ), ) def verify_resource_disk_readme_file(self, node: RemoteNode) -> None: - resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() - - # verify that resource disk is mounted - # function returns successfully if disk matching mount point is present - node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point) - - # Verify lost+found folder exists - # Skip this step for BSD as it does not have lost+found folder - # since it uses UFS file system - if not isinstance(node.os, BSD): - fold_path = f"{resource_disk_mount_point}/lost+found" - folder_exists = node.tools[Ls].path_exists(fold_path, sudo=True) - assert_that(folder_exists, f"{fold_path} should be present").is_true() - - # verify DATALOSS_WARNING_README.txt file exists - file_path = f"{resource_disk_mount_point}/DATALOSS_WARNING_README.txt" - file_exists = node.tools[Ls].path_exists(file_path, sudo=True) - assert_that(file_exists, f"{file_path} should be present").is_true() - - # verify 'WARNING: THIS IS A TEMPORARY DISK' contained in - # DATALOSS_WARNING_README.txt file. - read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True) - assert_that( - read_text, - f"'WARNING: THIS IS A TEMPORARY DISK' should be present in {file_path}", - ).contains("WARNING: THIS IS A TEMPORARY DISK") + node_disc = node.features[Disk] + resource_disks = node_disc.get_resource_disks() + if not resource_disks: + raise LisaException("Resource disk not found") + if "nvme" in resource_disks[0]: + raise SkippedException( + f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501 + ) + else: + resource_disk_mount_point = node.features[ + Disk + ].get_resource_disk_mount_point() + + # verify that resource disk is mounted + # function returns successfully if disk matching mount point is present + node.features[Disk].get_partition_with_mount_point( + resource_disk_mount_point + ) + # Verify lost+found folder exists + # Skip this step for BSD as it does not have lost+found folder + # since it uses UFS file system + if not isinstance(node.os, BSD): + fold_path = f"{resource_disk_mount_point}/lost+found" + folder_exists = node.tools[Ls].path_exists(fold_path, sudo=True) + assert_that(folder_exists, f"{fold_path} should be present").is_true() + + # Verify lost+found folder exists + # Skip this step for BSD as it does not have lost+found folder + # since it uses UFS file system + if not isinstance(node.os, BSD): + fold_path = f"{resource_disk_mount_point}/lost+found" + folder_exists = node.tools[Ls].path_exists(fold_path, sudo=True) + assert_that(folder_exists, f"{fold_path} should be present").is_true() + + # verify DATALOSS_WARNING_README.txt file exists + file_path = f"{resource_disk_mount_point}/DATALOSS_WARNING_README.txt" + file_exists = node.tools[Ls].path_exists(file_path, sudo=True) + assert_that(file_exists, f"{file_path} should be present").is_true() + # verify 'WARNING: THIS IS A TEMPORARY DISK' contained in + # DATALOSS_WARNING_README.txt file. + read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True) + assert_that( + read_text, + f"'WARNING: THIS IS A TEMPORARY DISK' should be present in {file_path}", + ).contains("WARNING: THIS IS A TEMPORARY DISK") @TestCaseMetadata( description=""" @@ -1159,14 +1178,29 @@ def verify_resource_disk_readme_file(self, node: RemoteNode) -> None: ), ) def verify_resource_disk_file_system(self, node: RemoteNode) -> None: - resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() - node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point) - disk_info = node.tools[Lsblk].find_disk_by_mountpoint(resource_disk_mount_point) - for partition in disk_info.partitions: - # by default, resource disk comes with ntfs type - # waagent or cloud-init will format it unless there are some commands hung - # or interrupt - assert_that( - partition.fstype, - "Resource disk file system type should not equal to ntfs", - ).is_not_equal_to("ntfs") + node_disc = node.features[Disk] + resource_disks = node_disc.get_resource_disks() + if not resource_disks: + raise LisaException("Resource disk not found") + if "nvme" in resource_disks[0]: + raise SkippedException( + f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501 + ) + else: + resource_disk_mount_point = node.features[ + Disk + ].get_resource_disk_mount_point() + node.features[Disk].get_partition_with_mount_point( + resource_disk_mount_point + ) + disk_info = node.tools[Lsblk].find_disk_by_mountpoint( + resource_disk_mount_point + ) + for partition in disk_info.partitions: + # by default, resource disk comes with ntfs type + # waagent or cloud-init will format it unless there are some commands + # hung or interrupt + assert_that( + partition.fstype, + "Resource disk file system type should not equal to ntfs", + ).is_not_equal_to("ntfs") diff --git a/microsoft/testsuites/core/storage.py b/microsoft/testsuites/core/storage.py index b5482863e4..032707c6d3 100644 --- a/microsoft/testsuites/core/storage.py +++ b/microsoft/testsuites/core/storage.py @@ -160,32 +160,43 @@ def verify_disks_device_timeout_setting( ), ) def verify_resource_disk_mounted(self, node: RemoteNode) -> None: - resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() - # os disk(root disk) is the entry with mount point `/' in the output - # of `mount` command - os_disk = ( - node.features[Disk] - .get_partition_with_mount_point(self.os_disk_mount_point) - .disk - ) - if isinstance(node.os, BSD): - partition_info = node.tools[Mount].get_partition_info() - resource_disk_from_mtab = [ - entry - for entry in partition_info - if entry.mount_point == resource_disk_mount_point - ][0].mount_point + node_disc = node.features[Disk] + resource_disks = node_disc.get_resource_disks() + if not resource_disks: + raise LisaException("Resource disk not found") + if "nvme" in resource_disks[0]: + raise SkippedException( + f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501 + ) else: - mtab = node.tools[Cat].run("/etc/mtab").stdout - resource_disk_from_mtab = get_matched_str( - mtab, self._get_mtab_mount_point_regex(resource_disk_mount_point) + resource_disk_mount_point = node.features[ + Disk + ].get_resource_disk_mount_point() + # os disk(root disk) is the entry with mount point `/' in the output + # of `mount` command + os_disk = ( + node.features[Disk] + .get_partition_with_mount_point(self.os_disk_mount_point) + .disk ) - assert ( - resource_disk_from_mtab - ), f"resource disk mountpoint not found {resource_disk_mount_point}" - assert_that( - resource_disk_from_mtab, "Resource disk should not be equal to os disk" - ).is_not_equal_to(os_disk) + if isinstance(node.os, BSD): + partition_info = node.tools[Mount].get_partition_info() + resource_disk_from_mtab = [ + entry + for entry in partition_info + if entry.mount_point == resource_disk_mount_point + ][0].mount_point + else: + mtab = node.tools[Cat].run("/etc/mtab").stdout + resource_disk_from_mtab = get_matched_str( + mtab, self._get_mtab_mount_point_regex(resource_disk_mount_point) + ) + assert ( + resource_disk_from_mtab + ), f"resource disk mountpoint not found {resource_disk_mount_point}" + assert_that( + resource_disk_from_mtab, "Resource disk should not be equal to os disk" + ).is_not_equal_to(os_disk) @TestCaseMetadata( description=""" @@ -199,7 +210,7 @@ def verify_resource_disk_mounted(self, node: RemoteNode) -> None: priority=1, requirement=simple_requirement( supported_platform_type=[AZURE], - unsupported_os=[BSD, Windows] + unsupported_os=[BSD, Windows], # This test is skipped as waagent does not support freebsd fully ), ) @@ -229,27 +240,40 @@ def verify_swap(self, node: RemoteNode) -> None: ), ) def verify_resource_disk_io(self, node: RemoteNode) -> None: - resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() - - # verify that resource disk is mounted - # function returns successfully if disk matching mount point is present - node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point) + node_disc = node.features[Disk] + resource_disks = node_disc.get_resource_disks() + if not resource_disks: + raise LisaException("Resource disk not found") + if "nvme" in resource_disks[0]: + raise SkippedException( + f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501 + ) + else: + resource_disk_mount_point = node.features[ + Disk + ].get_resource_disk_mount_point() + + # verify that resource disk is mounted + # function returns successfully if disk matching mount point is present + node.features[Disk].get_partition_with_mount_point( + resource_disk_mount_point + ) - file_path = f"{resource_disk_mount_point}/sample.txt" - original_text = "Writing to resource disk!!!" + file_path = f"{resource_disk_mount_point}/sample.txt" + original_text = "Writing to resource disk!!!" - # write content to the file - node.tools[Echo].write_to_file( - original_text, node.get_pure_path(file_path), sudo=True - ) + # write content to the file + node.tools[Echo].write_to_file( + original_text, node.get_pure_path(file_path), sudo=True + ) - # read content from the file - read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True) + # read content from the file + read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True) - assert_that( - read_text, - "content read from file should be equal to content written to file", - ).is_equal_to(original_text) + assert_that( + read_text, + "content read from file should be equal to content written to file", + ).is_equal_to(original_text) @TestCaseMetadata( description=""" diff --git a/microsoft/testsuites/nvme/nvme.py b/microsoft/testsuites/nvme/nvme.py index debc18b2f0..01235926fb 100644 --- a/microsoft/testsuites/nvme/nvme.py +++ b/microsoft/testsuites/nvme/nvme.py @@ -11,26 +11,34 @@ TestCaseMetadata, TestSuite, TestSuiteMetadata, - constants, simple_requirement, ) from lisa.features import Nvme, NvmeSettings, Sriov from lisa.sut_orchestrator.azure.platform_ import AzurePlatform -from lisa.tools import Cat, Echo, Fdisk, Lscpu, Lspci, Mount, Nvmecli +from lisa.tools import Cat, Df, Echo, Fdisk, Lscpu, Lspci, Mkfs, Mount, Nvmecli from lisa.tools.fdisk import FileSystem +from lisa.util.constants import DEVICE_TYPE_NVME, DEVICE_TYPE_SRIOV def _format_mount_disk( node: Node, namespace: str, file_system: FileSystem, + use_partitions: bool = True, ) -> None: mount_point = namespace.rpartition("/")[-1] fdisk = node.tools[Fdisk] mount = node.tools[Mount] mount.umount(namespace, mount_point) - fdisk.make_partition(namespace, file_system) - mount.mount(f"{namespace}p1", mount_point) + fdisk.delete_partitions(namespace) + if use_partitions: + fdisk.make_partition(namespace, file_system) + mount.mount(f"{namespace}p1", mount_point) + else: + # Format and mount the whole disk without partitions. + format_disk = node.tools[Mkfs] + format_disk.mkfs(f"{namespace}", file_system) + mount.mount(f"{namespace}", mount_point) @TestSuiteMetadata( @@ -97,75 +105,28 @@ def verify_nvme_max_disk(self, environment: Environment, node: Node) -> None: ), ) def verify_nvme_function(self, node: Node) -> None: - nvme = node.features[Nvme] - nvme_namespaces = nvme.get_raw_nvme_disks() - nvme_cli = node.tools[Nvmecli] - cat = node.tools[Cat] - mount = node.tools[Mount] - for namespace in nvme_namespaces: - # 1. Get the number of errors from nvme-cli before operations. - error_count_before_operations = nvme_cli.get_error_count(namespace) - - # 2. Create a partition, filesystem and mount it. - _format_mount_disk(node, namespace, FileSystem.ext4) - - # 3. Create a txt file on the partition, content is 'TestContent'. - mount_point = namespace.rpartition("/")[-1] - cmd_result = node.execute( - f"echo TestContent > {mount_point}/testfile.txt", shell=True, sudo=True - ) - cmd_result.assert_exit_code( - message=f"{mount_point}/testfile.txt may not exist." - ) + self._verify_nvme_function(node) - # 4. Create a file 'data' on the partition, get the md5sum value. - cmd_result = node.execute( - f"dd if=/dev/zero of={mount_point}/data bs=10M count=100", - shell=True, - sudo=True, - ) - cmd_result.assert_exit_code( - message=f"{mount_point}/data is not created successfully, " - "please check the disk space." - ) - initial_md5 = node.execute( - f"md5sum {mount_point}/data", shell=True, sudo=True - ) - initial_md5.assert_exit_code( - message=f"{mount_point}/data not exist or md5sum command enounter" - " unexpected error." - ) - - # 5. Umount and remount the partition. - mount.umount(namespace, mount_point, erase=False) - mount.mount(f"{namespace}p1", mount_point) - - # 6. Get the txt file content, compare the value. - file_content = cat.run(f"{mount_point}/testfile.txt", shell=True, sudo=True) - assert_that( - file_content.stdout, - f"content of {mount_point}/testfile.txt should keep consistent " - "after umount and re-mount.", - ).is_equal_to("TestContent") - - # 6. Get md5sum value of file 'data', compare with initial value. - final_md5 = node.execute( - f"md5sum {mount_point}/data", shell=True, sudo=True - ) - assert_that( - initial_md5.stdout, - f"md5sum of {mount_point}/data should keep consistent " - "after umount and re-mount.", - ).is_equal_to(final_md5.stdout) - - # 7. Compare the number of errors from nvme-cli after operations. - error_count_after_operations = nvme_cli.get_error_count(namespace) - assert_that( - error_count_before_operations, - "error-log should not increase after operations.", - ).is_equal_to(error_count_after_operations) - - mount.umount(disk_name=namespace, point=mount_point) + @TestCaseMetadata( + description=""" + The test case is same as `verify_nvme_function`, except it uses + unpartitioned disks. + This test case will do following things for each NVMe device. + 1. Get the number of errors from nvme-cli before operations. + 2. Create filesystem and mount it. + 3. Create a txt file on the partition, content is 'TestContent'. + 4. Create a file 'data' on the partition, get the md5sum value. + 5. Umount and remount the partition. + 6. Get the txt file content, compare the value. + 7. Compare the number of errors from nvme-cli after operations. + """, + priority=2, + requirement=simple_requirement( + supported_features=[Nvme], + ), + ) + def verify_nvme_function_unpartitioned(self, node: Node) -> None: + self._verify_nvme_function(node, use_partitions=False) @TestCaseMetadata( description=""" @@ -187,6 +148,7 @@ def verify_nvme_fstrim(self, node: Node) -> None: nvme = node.features[Nvme] nvme_namespaces = nvme.get_raw_nvme_disks() mount = node.tools[Mount] + df = node.tools[Df] for namespace in nvme_namespaces: mount_point = namespace.rpartition("/")[-1] @@ -202,12 +164,16 @@ def verify_nvme_fstrim(self, node: Node) -> None: message=f"{mount_point} not exist or fstrim command enounter " "unexpected error." ) - - # 3. Create a 300 gb file 'data' using dd command in the partition. + # Use 80% of free space to create a test file. + free_space_gb = int(df.get_filesystem_available_space(mount_point) * 0.8) + # limit the free space to 300GB to avoid long time operation. + free_space_gb = min(free_space_gb, 300) + # 3. Create a file 'data' using dd command in the partition. cmd_result = node.execute( - f"dd if=/dev/zero of={mount_point}/data bs=1G count=300", + f"dd if=/dev/zero of={mount_point}/data bs=1G count={free_space_gb}", shell=True, sudo=True, + timeout=1200, ) cmd_result.assert_exit_code( message=f"{mount_point}/data is not created successfully, " @@ -350,7 +316,9 @@ def verify_nvme_manage_ns(self, node: Node) -> None: description=""" This test case will 1. Disable NVME devices. - 2. Enable NVME device. + 2. Enable PCI devices. + 3. Get NVMe devices slots. + 4. Check NVMe devices are back after rescan. """, priority=2, requirement=simple_requirement( @@ -360,9 +328,16 @@ def verify_nvme_manage_ns(self, node: Node) -> None: def verify_nvme_rescind(self, node: Node) -> None: lspci = node.tools[Lspci] # 1. Disable NVME devices. - lspci.disable_devices_by_type(device_type=constants.DEVICE_TYPE_NVME) - # 2. Enable NVME device. + before_pci_count = lspci.disable_devices_by_type(device_type=DEVICE_TYPE_NVME) + # 2. Enable PCI devices. lspci.enable_devices() + # 3. Get PCI devices slots. + after_devices_slots = lspci.get_device_names_by_type(DEVICE_TYPE_NVME, True) + # 4. Check PCI devices are back after rescan. + assert_that( + after_devices_slots, + "After rescan, the disabled NVMe PCI devices should be back.", + ).is_length(before_pci_count) @TestCaseMetadata( description=""" @@ -381,7 +356,7 @@ def verify_nvme_rescind(self, node: Node) -> None: ) def verify_nvme_sriov_rescind(self, node: Node) -> None: lspci = node.tools[Lspci] - device_types = [constants.DEVICE_TYPE_NVME, constants.DEVICE_TYPE_SRIOV] + device_types = [DEVICE_TYPE_NVME, DEVICE_TYPE_SRIOV] for device_type in device_types: # 1. Disable PCI devices. before_pci_count = lspci.disable_devices_by_type(device_type) @@ -392,7 +367,7 @@ def verify_nvme_sriov_rescind(self, node: Node) -> None: # 4. Check PCI devices are back after rescan. assert_that( after_devices_slots, - "After rescan, the disabled PCI devices should be back.", + f"After rescan, the disabled {device_type} PCI devices should be back.", ).is_length(before_pci_count) def _verify_nvme_disk(self, environment: Environment, node: Node) -> None: @@ -431,3 +406,78 @@ def _verify_nvme_disk(self, environment: Environment, node: Node) -> None: assert_that(nvme_namespace).described_as( "nvme devices count should be equal to [vCPU/8]." ).is_length(expected_count) + + def _verify_nvme_function(self, node: Node, use_partitions: bool = True) -> None: + # Verify the basic function of all NVMe disks + nvme = node.features[Nvme] + nvme_namespaces = nvme.get_raw_nvme_disks() + nvme_cli = node.tools[Nvmecli] + cat = node.tools[Cat] + mount = node.tools[Mount] + for namespace in nvme_namespaces: + # 1. Get the number of errors from nvme-cli before operations. + error_count_before_operations = nvme_cli.get_error_count(namespace) + + # 2. Create a partition, filesystem and mount it. + _format_mount_disk(node, namespace, FileSystem.ext4, use_partitions) + + # 3. Create a txt file on the partition, content is 'TestContent'. + mount_point = namespace.rpartition("/")[-1] + cmd_result = node.execute( + f"echo TestContent > {mount_point}/testfile.txt", shell=True, sudo=True + ) + cmd_result.assert_exit_code( + message=f"{mount_point}/testfile.txt may not exist." + ) + + # 4. Create a file 'data' on the partition, get the md5sum value. + cmd_result = node.execute( + f"dd if=/dev/zero of={mount_point}/data bs=10M count=100", + shell=True, + sudo=True, + ) + cmd_result.assert_exit_code( + message=f"{mount_point}/data is not created successfully, " + "please check the disk space." + ) + initial_md5 = node.execute( + f"md5sum {mount_point}/data", shell=True, sudo=True + ) + initial_md5.assert_exit_code( + message=f"{mount_point}/data not exist or md5sum command encountered" + " unexpected error." + ) + + # 5. Umount and remount the partition. + mount.umount(namespace, mount_point, erase=False) + if use_partitions: + mount.mount(f"{namespace}p1", mount_point) + else: + mount.mount(f"{namespace}", mount_point) + + # 6. Get the txt file content, compare the value. + file_content = cat.run(f"{mount_point}/testfile.txt", shell=True, sudo=True) + assert_that( + file_content.stdout, + f"content of {mount_point}/testfile.txt should keep consistent " + "after umount and re-mount.", + ).is_equal_to("TestContent") + + # 6. Get md5sum value of file 'data', compare with initial value. + final_md5 = node.execute( + f"md5sum {mount_point}/data", shell=True, sudo=True + ) + assert_that( + initial_md5.stdout, + f"md5sum of {mount_point}/data should keep consistent " + "after umount and re-mount.", + ).is_equal_to(final_md5.stdout) + + # 7. Compare the number of errors from nvme-cli after operations. + error_count_after_operations = nvme_cli.get_error_count(namespace) + assert_that( + error_count_before_operations, + "error-log should not increase after operations.", + ).is_equal_to(error_count_after_operations) + + mount.umount(disk_name=namespace, point=mount_point)