Skip to content

Commit

Permalink
Increase smoke timeout to 9000 temp
Browse files Browse the repository at this point in the history
  • Loading branch information
kanchansenlaskar committed Jun 6, 2024
1 parent cc6168c commit e79e0df
Show file tree
Hide file tree
Showing 7 changed files with 47 additions and 69 deletions.
3 changes: 1 addition & 2 deletions lisa/features/nvme.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,7 @@ def get_os_disk_nvme_namespace(self) -> str:
# name: /dev/nvme0n1p15, disk: nvme, mount_point: /boot/efi, type: vfat
if os_boot_partition:
os_partition_namespace = get_matched_str(
os_boot_partition.name,
self.NVME_NAMESPACE_PATTERN,
os_boot_partition.name, self.NVME_NAMESPACE_PATTERN
)
return os_partition_namespace

Expand Down
2 changes: 1 addition & 1 deletion lisa/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def create(
)
return node

def reboot(self, time_out: int = 300) -> None:
def reboot(self, time_out: int = 7200) -> None:
self.tools[Reboot].reboot(time_out)

def execute(
Expand Down
91 changes: 35 additions & 56 deletions lisa/sut_orchestrator/azure/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -1616,8 +1616,41 @@ def get_hardware_disk_controller_type(self) -> Any:
vm = get_vm(azure_platform, self._node)
return vm.storage_profile.disk_controller_type

def _get_scsi_data_disks(self) -> List[str]:
# This method restuns azure data disks attached to you given VM.
def get_raw_data_disks(self) -> List[str]:
# Handle BSD case
if isinstance(self._node.os, BSD):
return self._get_raw_data_disks_bsd()

# disk_controller_type == NVME
node_disk = self._node.features[Disk]
if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME:
# Getting OS disk nvme namespace and disk controller used by OS disk.
# Sample os_boot_partition:
# name: /dev/nvme0n1p15, disk: nvme, mount_point: /boot/efi, type: vfat
os_boot_partition = node_disk.get_os_boot_partition()
if os_boot_partition:
os_disk_namespace = get_matched_str(
os_boot_partition.name, self.NVME_NAMESPACE_PATTERN
)
os_disk_controller = get_matched_str(
os_boot_partition.name, self.NVME_CONTROLLER_PATTERN
)

# With NVMe disc controller type, all remote SCSI disks are connected to
# same NVMe controller. The same controller is used by OS disc.
# This loop collects all the SCSI remote disks except OS disk.
nvme = self._node.features[Nvme]
nvme_namespaces = nvme.get_namespaces()
disk_array = []
for name_space in nvme_namespaces:
if (
name_space.startswith(os_disk_controller)
and name_space != os_disk_namespace
):
disk_array.append(name_space)
return disk_array

# disk_controller_type == SCSI
# refer here to get data disks from folder /dev/disk/azure/scsi1
# Example: /dev/disk/azure/scsi1/lun0
# https://docs.microsoft.com/en-us/troubleshoot/azure/virtual-machines/troubleshoot-device-names-problems#identify-disk-luns # noqa: E501
Expand Down Expand Up @@ -1684,60 +1717,6 @@ def get_luns(self) -> Dict[str, int]:
device_luns.update({cmd_result.stdout: device_lun})
return device_luns

def get_raw_data_disks(self) -> List[str]:
# Handle BSD case
if isinstance(self._node.os, BSD):
return self._get_raw_data_disks_bsd()

# disk_controller_type == NVME
node_disk = self._node.features[Disk]
if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME:
# Getting OS disk nvme namespace and disk controller used by OS disk.
# Sample os_boot_partition:
# name: /dev/nvme0n1p15, disk: nvme, mount_point: /boot/efi, type: vfat
os_boot_partition = node_disk.get_os_boot_partition()
if os_boot_partition:
os_disk_namespace = get_matched_str(
os_boot_partition.name,
self.NVME_NAMESPACE_PATTERN,
)
os_disk_controller = get_matched_str(
os_boot_partition.name,
self.NVME_CONTROLLER_PATTERN,
)

# With NVMe disk controller type, all remote SCSI disks are connected to
# same NVMe controller. The same controller is used by OS disk.
# This loop collects all the SCSI remote disks except OS disk.
nvme = self._node.features[Nvme]
nvme_namespaces = nvme.get_namespaces()
disk_array = []
for name_space in nvme_namespaces:
if (
name_space.startswith(os_disk_controller)
and name_space != os_disk_namespace
):
disk_array.append(name_space)
return disk_array

# disk_controller_type == SCSI

# get azure scsi attached disks
azure_scsi_disks = self._get_scsi_data_disks()
assert_that(len(azure_scsi_disks)).described_as(
"no data disks info found under /dev/disk/azure/scsi1"
).is_greater_than(0)
assert azure_scsi_disks, "not find data disks"
disk_array = [""] * len(azure_scsi_disks)
for disk in azure_scsi_disks:
# readlink -f /dev/disk/azure/scsi1/lun0
# /dev/sdc
cmd_result = self._node.execute(
f"readlink -f {disk}", shell=True, sudo=True
)
disk_array[int(disk.split("/")[-1].replace("lun", ""))] = cmd_result.stdout
return disk_array

def get_all_disks(self) -> List[str]:
if isinstance(self._node.os, BSD):
disk_label_pattern = self.DISK_LABEL_PATTERN_BSD
Expand Down
2 changes: 1 addition & 1 deletion lisa/testsuite.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ def __init__(
self,
description: str,
priority: int = 2,
timeout: int = 3600,
timeout: int = 9000,
use_new_environment: bool = False,
owner: str = "",
requirement: Optional[TestCaseRequirement] = None,
Expand Down
4 changes: 2 additions & 2 deletions microsoft/testsuites/core/provisioning.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@
""",
)
class Provisioning(TestSuite):
TIME_OUT = 7200
PLATFORM_TIME_OUT = 7200
TIME_OUT = 9000
PLATFORM_TIME_OUT = 9000

@TestCaseMetadata(
description="""
Expand Down
12 changes: 6 additions & 6 deletions microsoft/testsuites/core/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -633,9 +633,9 @@ def _hot_add_disk_serial(
lsblk = node.tools[Lsblk]

# get max data disk count for the node
#assert node.capability.disk
#assert isinstance(node.capability.disk.max_data_disk_count, int)
#max_data_disk_count = node.capability.disk.max_data_disk_count
# assert node.capability.disk
# assert isinstance(node.capability.disk.max_data_disk_count, int)
# max_data_disk_count = node.capability.disk.max_data_disk_count
max_data_disk_count = 64
log.debug(f"max_data_disk_count: {max_data_disk_count}")

Expand Down Expand Up @@ -721,9 +721,9 @@ def _hot_add_disk_parallel(
lsblk = node.tools[Lsblk]

# get max data disk count for the node
#assert node.capability.disk
#assert isinstance(node.capability.disk.max_data_disk_count, int)
#max_data_disk_count = node.capability.disk.max_data_disk_count
# assert node.capability.disk
# assert isinstance(node.capability.disk.max_data_disk_count, int)
# max_data_disk_count = node.capability.disk.max_data_disk_count
max_data_disk_count = 64
log.debug(f"max_data_disk_count: {max_data_disk_count}")

Expand Down
2 changes: 1 addition & 1 deletion microsoft/testsuites/network/stress.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def stress_sriov_iperf(self, environment: Environment) -> None:
5. Do step 2 ~ step 4 for 25 times.
""",
priority=3,
timeout=4500,
timeout=9000,
requirement=simple_requirement(
min_core_count=4,
network_interface=features.Sriov(),
Expand Down

0 comments on commit e79e0df

Please sign in to comment.