Skip to content

Commit

Permalink
NVMe and resource disc testcase fixes
Browse files Browse the repository at this point in the history
1. Added new testcase "Nvme.verify_nvme_function_unpartitioned".
2. NVMe test case fixes for disc controller type NVMe.
3. Resource disc testcases fixes for VMs with Local NVMEs.
4. "Nvme.verify_nvme_fstrim" - use fstrim testfile size based on available space.
  • Loading branch information
SRIKKANTH committed Nov 12, 2024
1 parent 681f5bd commit 7bb2e16
Show file tree
Hide file tree
Showing 5 changed files with 320 additions and 161 deletions.
41 changes: 39 additions & 2 deletions lisa/features/nvme.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from lisa.tools import Ls, Lspci, Nvmecli
from lisa.tools.lspci import PciDevice
from lisa.util import field_metadata, get_matched_str
from lisa.util.constants import DEVICE_TYPE_NVME


class Nvme(Feature):
Expand All @@ -42,6 +43,9 @@ class Nvme(Feature):
# /dev/nvme0n1p15 -> /dev/nvme0n1
NVME_NAMESPACE_PATTERN = re.compile(r"/dev/nvme[0-9]+n[0-9]+", re.M)

# /dev/nvme0n1p15 -> /dev/nvme0n1
NVME_DEVICE_PATTERN = re.compile(r"/dev/nvme[0-9]+", re.M)

_pci_device_name = "Non-Volatile memory controller"
_ls_devices: str = ""

Expand All @@ -63,6 +67,17 @@ def get_devices(self) -> List[str]:
matched_result = self._device_pattern.match(row)
if matched_result:
devices_list.append(matched_result.group("device_name"))
node_disk = self._node.features[Disk]
# With disk controller type NVMe, all remote managed disks attached to the VM
# (including the OS disc) appear as NVMe devices.
# All the remote managed disks (inclusing the OS disc) use the same NVMe
# controller in the VM.
# Excluding the OS NVMe device from the list of NVMe devices will remove
# all the remote managed disks.
if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME:
os_disk_nvme_device = self._get_os_disk_nvme_device()
# Removing OS disk/device from the list.
devices_list.remove(os_disk_nvme_device)
return devices_list

def get_namespaces(self) -> List[str]:
Expand All @@ -78,7 +93,17 @@ def get_namespaces(self) -> List[str]:
return namespaces

def get_namespaces_from_cli(self) -> List[str]:
return self._node.tools[Nvmecli].get_namespaces()
namespaces_list = self._node.tools[Nvmecli].get_namespaces()
node_disk = self._node.features[Disk]
# With disk controller type NVMe, OS disk along with all remote iSCSI devices
# appears as NVMe.
# Removing OS disk from the list of NVMe devices will remove all the
# remote non-NVME disks.
if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME:
os_disk_nvme_namespace = self.get_os_disk_nvme_namespace()
# Removing OS disk/device from the list.
namespaces_list.remove(os_disk_nvme_namespace)
return namespaces_list

def get_os_disk_nvme_namespace(self) -> str:
node_disk = self._node.features[Disk]
Expand All @@ -93,10 +118,22 @@ def get_os_disk_nvme_namespace(self) -> str:
)
return os_partition_namespace

# This method returns NVMe device name of the OS disk.
def _get_os_disk_nvme_device(self) -> str:
os_disk_nvme_namespace = self.get_os_disk_nvme_namespace()
# Sample os_boot_partition when disc controller type is NVMe:
# name: /dev/nvme0n1p15, disk: nvme, mount_point: /boot/efi, type: vfat
if os_disk_nvme_namespace:
os_disk_nvme_device = get_matched_str(
os_disk_nvme_namespace,
self.NVME_DEVICE_PATTERN,
)
return os_disk_nvme_device

def get_devices_from_lspci(self) -> List[PciDevice]:
devices_from_lspci = []
lspci_tool = self._node.tools[Lspci]
device_list = lspci_tool.get_devices()
device_list = lspci_tool.get_devices_by_type(DEVICE_TYPE_NVME)
devices_from_lspci = [
x for x in device_list if self._pci_device_name == x.device_class
]
Expand Down
14 changes: 14 additions & 0 deletions lisa/sut_orchestrator/azure/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -1897,6 +1897,20 @@ def remove_data_disk(self, names: Optional[List[str]] = None) -> None:
self._node.capability.disk.data_disk_count -= len(names)
self._node.close()

def get_resource_disks(self) -> List[str]:
resource_disk_mount_point = self.get_resource_disk_mount_point()
resource_disks = []
try:
resourcedisk = self._node.features[Disk].get_partition_with_mount_point(
resource_disk_mount_point
)
if resourcedisk:
resource_disks = [resourcedisk.name]
except AssertionError:
nvme = self._node.features[Nvme]
resource_disks = nvme.get_raw_nvme_disks()
return resource_disks

def get_resource_disk_mount_point(self) -> str:
# get customize mount point from cloud-init configuration file from /etc/cloud/
# if not found, use default mount point /mnt for cloud-init
Expand Down
108 changes: 71 additions & 37 deletions microsoft/testsuites/core/azure_image_standard.py
Original file line number Diff line number Diff line change
Expand Up @@ -1115,32 +1115,51 @@ def verify_no_pre_exist_users(self, node: Node) -> None:
),
)
def verify_resource_disk_readme_file(self, node: RemoteNode) -> None:
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point()

# verify that resource disk is mounted
# function returns successfully if disk matching mount point is present
node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point)

# Verify lost+found folder exists
# Skip this step for BSD as it does not have lost+found folder
# since it uses UFS file system
if not isinstance(node.os, BSD):
fold_path = f"{resource_disk_mount_point}/lost+found"
folder_exists = node.tools[Ls].path_exists(fold_path, sudo=True)
assert_that(folder_exists, f"{fold_path} should be present").is_true()

# verify DATALOSS_WARNING_README.txt file exists
file_path = f"{resource_disk_mount_point}/DATALOSS_WARNING_README.txt"
file_exists = node.tools[Ls].path_exists(file_path, sudo=True)
assert_that(file_exists, f"{file_path} should be present").is_true()

# verify 'WARNING: THIS IS A TEMPORARY DISK' contained in
# DATALOSS_WARNING_README.txt file.
read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True)
assert_that(
read_text,
f"'WARNING: THIS IS A TEMPORARY DISK' should be present in {file_path}",
).contains("WARNING: THIS IS A TEMPORARY DISK")
node_disc = node.features[Disk]
resource_disks = node_disc.get_resource_disks()
if not resource_disks:
raise LisaException("Resource disk not found")
if "nvme" in resource_disks[0]:
raise SkippedException(
f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501
)
else:
resource_disk_mount_point = node.features[
Disk
].get_resource_disk_mount_point()

# verify that resource disk is mounted
# function returns successfully if disk matching mount point is present
node.features[Disk].get_partition_with_mount_point(
resource_disk_mount_point
)
# Verify lost+found folder exists
# Skip this step for BSD as it does not have lost+found folder
# since it uses UFS file system
if not isinstance(node.os, BSD):
fold_path = f"{resource_disk_mount_point}/lost+found"
folder_exists = node.tools[Ls].path_exists(fold_path, sudo=True)
assert_that(folder_exists, f"{fold_path} should be present").is_true()

# Verify lost+found folder exists
# Skip this step for BSD as it does not have lost+found folder
# since it uses UFS file system
if not isinstance(node.os, BSD):
fold_path = f"{resource_disk_mount_point}/lost+found"
folder_exists = node.tools[Ls].path_exists(fold_path, sudo=True)
assert_that(folder_exists, f"{fold_path} should be present").is_true()

# verify DATALOSS_WARNING_README.txt file exists
file_path = f"{resource_disk_mount_point}/DATALOSS_WARNING_README.txt"
file_exists = node.tools[Ls].path_exists(file_path, sudo=True)
assert_that(file_exists, f"{file_path} should be present").is_true()
# verify 'WARNING: THIS IS A TEMPORARY DISK' contained in
# DATALOSS_WARNING_README.txt file.
read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True)
assert_that(
read_text,
f"'WARNING: THIS IS A TEMPORARY DISK' should be present in {file_path}",
).contains("WARNING: THIS IS A TEMPORARY DISK")

@TestCaseMetadata(
description="""
Expand All @@ -1159,14 +1178,29 @@ def verify_resource_disk_readme_file(self, node: RemoteNode) -> None:
),
)
def verify_resource_disk_file_system(self, node: RemoteNode) -> None:
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point()
node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point)
disk_info = node.tools[Lsblk].find_disk_by_mountpoint(resource_disk_mount_point)
for partition in disk_info.partitions:
# by default, resource disk comes with ntfs type
# waagent or cloud-init will format it unless there are some commands hung
# or interrupt
assert_that(
partition.fstype,
"Resource disk file system type should not equal to ntfs",
).is_not_equal_to("ntfs")
node_disc = node.features[Disk]
resource_disks = node_disc.get_resource_disks()
if not resource_disks:
raise LisaException("Resource disk not found")
if "nvme" in resource_disks[0]:
raise SkippedException(
f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501
)
else:
resource_disk_mount_point = node.features[
Disk
].get_resource_disk_mount_point()
node.features[Disk].get_partition_with_mount_point(
resource_disk_mount_point
)
disk_info = node.tools[Lsblk].find_disk_by_mountpoint(
resource_disk_mount_point
)
for partition in disk_info.partitions:
# by default, resource disk comes with ntfs type
# waagent or cloud-init will format it unless there are some commands hung
# or interrupt
assert_that(
partition.fstype,
"Resource disk file system type should not equal to ntfs",
).is_not_equal_to("ntfs")
108 changes: 66 additions & 42 deletions microsoft/testsuites/core/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,32 +160,43 @@ def verify_disks_device_timeout_setting(
),
)
def verify_resource_disk_mounted(self, node: RemoteNode) -> None:
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point()
# os disk(root disk) is the entry with mount point `/' in the output
# of `mount` command
os_disk = (
node.features[Disk]
.get_partition_with_mount_point(self.os_disk_mount_point)
.disk
)
if isinstance(node.os, BSD):
partition_info = node.tools[Mount].get_partition_info()
resource_disk_from_mtab = [
entry
for entry in partition_info
if entry.mount_point == resource_disk_mount_point
][0].mount_point
node_disc = node.features[Disk]
resource_disks = node_disc.get_resource_disks()
if not resource_disks:
raise LisaException("Resource disk not found")
if "nvme" in resource_disks[0]:
raise SkippedException(
f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501
)
else:
mtab = node.tools[Cat].run("/etc/mtab").stdout
resource_disk_from_mtab = get_matched_str(
mtab, self._get_mtab_mount_point_regex(resource_disk_mount_point)
resource_disk_mount_point = node.features[
Disk
].get_resource_disk_mount_point()
# os disk(root disk) is the entry with mount point `/' in the output
# of `mount` command
os_disk = (
node.features[Disk]
.get_partition_with_mount_point(self.os_disk_mount_point)
.disk
)
assert (
resource_disk_from_mtab
), f"resource disk mountpoint not found {resource_disk_mount_point}"
assert_that(
resource_disk_from_mtab, "Resource disk should not be equal to os disk"
).is_not_equal_to(os_disk)
if isinstance(node.os, BSD):
partition_info = node.tools[Mount].get_partition_info()
resource_disk_from_mtab = [
entry
for entry in partition_info
if entry.mount_point == resource_disk_mount_point
][0].mount_point
else:
mtab = node.tools[Cat].run("/etc/mtab").stdout
resource_disk_from_mtab = get_matched_str(
mtab, self._get_mtab_mount_point_regex(resource_disk_mount_point)
)
assert (
resource_disk_from_mtab
), f"resource disk mountpoint not found {resource_disk_mount_point}"
assert_that(
resource_disk_from_mtab, "Resource disk should not be equal to os disk"
).is_not_equal_to(os_disk)

@TestCaseMetadata(
description="""
Expand All @@ -199,7 +210,7 @@ def verify_resource_disk_mounted(self, node: RemoteNode) -> None:
priority=1,
requirement=simple_requirement(
supported_platform_type=[AZURE],
unsupported_os=[BSD, Windows]
unsupported_os=[BSD, Windows],
# This test is skipped as waagent does not support freebsd fully
),
)
Expand Down Expand Up @@ -229,27 +240,40 @@ def verify_swap(self, node: RemoteNode) -> None:
),
)
def verify_resource_disk_io(self, node: RemoteNode) -> None:
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point()

# verify that resource disk is mounted
# function returns successfully if disk matching mount point is present
node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point)
node_disc = node.features[Disk]
resource_disks = node_disc.get_resource_disks()
if not resource_disks:
raise LisaException("Resource disk not found")
if "nvme" in resource_disks[0]:
raise SkippedException(
f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501
)
else:
resource_disk_mount_point = node.features[
Disk
].get_resource_disk_mount_point()

# verify that resource disk is mounted
# function returns successfully if disk matching mount point is present
node.features[Disk].get_partition_with_mount_point(
resource_disk_mount_point
)

file_path = f"{resource_disk_mount_point}/sample.txt"
original_text = "Writing to resource disk!!!"
file_path = f"{resource_disk_mount_point}/sample.txt"
original_text = "Writing to resource disk!!!"

# write content to the file
node.tools[Echo].write_to_file(
original_text, node.get_pure_path(file_path), sudo=True
)
# write content to the file
node.tools[Echo].write_to_file(
original_text, node.get_pure_path(file_path), sudo=True
)

# read content from the file
read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True)
# read content from the file
read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True)

assert_that(
read_text,
"content read from file should be equal to content written to file",
).is_equal_to(original_text)
assert_that(
read_text,
"content read from file should be equal to content written to file",
).is_equal_to(original_text)

@TestCaseMetadata(
description="""
Expand Down
Loading

0 comments on commit 7bb2e16

Please sign in to comment.