Skip to content

Commit d74dbbf

Browse files
committed
[VIRT] Move fixtures used exclusively by tests/virt
Move fixtures that are used exclusively by tests/virt from the main tests/conftest.py to tests/virt/conftest.py for better organization and proximity to their usage.
1 parent c6f0da0 commit d74dbbf

File tree

4 files changed

+58
-83
lines changed

4 files changed

+58
-83
lines changed

tests/conftest.py

Lines changed: 0 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@
7676
from utilities.bitwarden import get_cnv_tests_secret_by_name
7777
from utilities.constants import (
7878
AAQ_NAMESPACE_LABEL,
79-
AMD,
8079
ARM_64,
8180
ARQ_QUOTA_HARD_SPEC,
8281
AUDIT_LOGS_PATH,
@@ -90,7 +89,6 @@
9089
HCO_SUBSCRIPTION,
9190
HOTFIX_STR,
9291
INSTANCE_TYPE_STR,
93-
INTEL,
9492
KMP_ENABLED_LABEL,
9593
KMP_VM_ASSIGNMENT_LABEL,
9694
KUBECONFIG,
@@ -105,15 +103,12 @@
105103
OVS_BRIDGE,
106104
POD_SECURITY_NAMESPACE_LABELS,
107105
PREFERENCE_STR,
108-
RHEL9_PREFERENCE,
109106
RHEL9_STR,
110-
RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
111107
RHSM_SECRET_NAME,
112108
SSP_CR_COMMON_TEMPLATES_LIST_KEY_NAME,
113109
TIMEOUT_3MIN,
114110
TIMEOUT_4MIN,
115111
TIMEOUT_5MIN,
116-
U1_SMALL,
117112
UNPRIVILEGED_PASSWORD,
118113
UNPRIVILEGED_USER,
119114
UTILITY,
@@ -196,9 +191,7 @@
196191
verify_boot_sources_reimported,
197192
)
198193
from utilities.virt import (
199-
VirtualMachineForCloning,
200194
VirtualMachineForTests,
201-
create_vm_cloning_job,
202195
fedora_vm_body,
203196
get_all_virt_pods_with_running_status,
204197
get_base_templates_list,
@@ -208,7 +201,6 @@
208201
kubernetes_taint_exists,
209202
running_vm,
210203
start_and_fetch_processid_on_linux_vm,
211-
target_vm_from_cloning_job,
212204
vm_instance_from_template,
213205
wait_for_kv_stabilize,
214206
wait_for_windows_vm,
@@ -742,11 +734,6 @@ def workers_type(workers_utility_pods, installing_cnv):
742734
return virtual
743735

744736

745-
@pytest.fixture(scope="session")
746-
def is_psi_cluster():
747-
return Infrastructure(name="cluster").instance.status.platform == "OpenStack"
748-
749-
750737
@pytest.fixture()
751738
def data_volume_multi_storage_scope_function(
752739
request,
@@ -1097,16 +1084,6 @@ def skip_access_mode_rwo_scope_class(storage_class_matrix__class__):
10971084
_skip_access_mode_rwo(storage_class_matrix=storage_class_matrix__class__)
10981085

10991086

1100-
@pytest.fixture(scope="session")
1101-
def nodes_cpu_vendor(schedulable_nodes):
1102-
if schedulable_nodes[0].labels.get(f"cpu-vendor.node.kubevirt.io/{AMD}"):
1103-
return AMD
1104-
elif schedulable_nodes[0].labels.get(f"cpu-vendor.node.kubevirt.io/{INTEL}"):
1105-
return INTEL
1106-
else:
1107-
return None
1108-
1109-
11101087
@pytest.fixture(scope="session")
11111088
def nodes_cpu_architecture(nodes):
11121089
return get_nodes_cpu_architecture(nodes=nodes)
@@ -2340,11 +2317,6 @@ def migration_policy_with_bandwidth_scope_class():
23402317
yield mp
23412318

23422319

2343-
@pytest.fixture(scope="session")
2344-
def gpu_nodes(nodes):
2345-
return get_nodes_with_label(nodes=nodes, label="nvidia.com/gpu.present")
2346-
2347-
23482320
@pytest.fixture(scope="session")
23492321
def worker_machine1(worker_node1):
23502322
machine = Machine(
@@ -2400,21 +2372,6 @@ def vm_for_test(request, namespace, unprivileged_client):
24002372
yield vm
24012373

24022374

2403-
@pytest.fixture(scope="class")
2404-
def rhel_vm_with_instancetype_and_preference_for_cloning(namespace, unprivileged_client):
2405-
with VirtualMachineForCloning(
2406-
name=RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
2407-
image=Images.Rhel.RHEL9_REGISTRY_GUEST_IMG,
2408-
namespace=namespace.name,
2409-
client=unprivileged_client,
2410-
vm_instance_type=VirtualMachineClusterInstancetype(name=U1_SMALL),
2411-
vm_preference=VirtualMachineClusterPreference(name=RHEL9_PREFERENCE),
2412-
os_flavor=OS_FLAVOR_RHEL,
2413-
) as vm:
2414-
running_vm(vm=vm)
2415-
yield vm
2416-
2417-
24182375
@pytest.fixture(scope="class")
24192376
def migrated_vm_multiple_times(request, vm_for_migration_test):
24202377
vmim = []
@@ -2468,25 +2425,6 @@ def hyperconverged_status_templates_scope_class(
24682425
return hyperconverged_resource_scope_class.instance.status.dataImportCronTemplates
24692426

24702427

2471-
@pytest.fixture()
2472-
def cloning_job_scope_function(request, unprivileged_client, namespace):
2473-
with create_vm_cloning_job(
2474-
name=f"clone-job-{request.param['source_name']}",
2475-
client=unprivileged_client,
2476-
namespace=namespace.name,
2477-
source_name=request.param["source_name"],
2478-
label_filters=request.param.get("label_filters"),
2479-
annotation_filters=request.param.get("annotation_filters"),
2480-
) as vmc:
2481-
yield vmc
2482-
2483-
2484-
@pytest.fixture()
2485-
def target_vm_scope_function(unprivileged_client, cloning_job_scope_function):
2486-
with target_vm_from_cloning_job(client=unprivileged_client, cloning_job=cloning_job_scope_function) as target_vm:
2487-
yield target_vm
2488-
2489-
24902428
@pytest.fixture(scope="module")
24912429
def snapshot_storage_class_name_scope_module(
24922430
storage_class_matrix_snapshot_matrix__module__,
@@ -2623,18 +2561,6 @@ def ssp_resource_scope_class(admin_client, hco_namespace):
26232561
return get_ssp_resource(admin_client=admin_client, namespace=hco_namespace)
26242562

26252563

2626-
@pytest.fixture(scope="session")
2627-
def skip_test_if_no_odf_cephfs_sc(cluster_storage_classes_names):
2628-
"""
2629-
Skip test if no odf cephfs storage class available
2630-
"""
2631-
if StorageClassNames.CEPHFS not in cluster_storage_classes_names:
2632-
pytest.skip(
2633-
f"Skipping test, {StorageClassNames.CEPHFS} storage class is not deployed,"
2634-
f"deployed storage classes: {cluster_storage_classes_names}"
2635-
)
2636-
2637-
26382564
@pytest.fixture(scope="session")
26392565
def sriov_unused_ifaces(sriov_ifaces):
26402566
"""

tests/virt/cluster/vm_cloning/conftest.py

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,19 @@
11
import pytest
2+
from ocp_resources.virtual_machine_cluster_instancetype import (
3+
VirtualMachineClusterInstancetype,
4+
)
5+
from ocp_resources.virtual_machine_cluster_preference import (
6+
VirtualMachineClusterPreference,
7+
)
28

3-
from utilities.virt import VirtualMachineForCloning, fedora_vm_body, running_vm
9+
from utilities.constants import (
10+
OS_FLAVOR_RHEL,
11+
RHEL9_PREFERENCE,
12+
RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
13+
U1_SMALL,
14+
Images,
15+
)
16+
from utilities.virt import VirtualMachineForCloning, fedora_vm_body, running_vm, target_vm_from_cloning_job
417

518

619
@pytest.fixture(scope="class")
@@ -18,3 +31,24 @@ def fedora_vm_for_cloning(request, unprivileged_client, namespace, cpu_for_migra
1831
) as vm:
1932
running_vm(vm=vm, wait_for_cloud_init=True)
2033
yield vm
34+
35+
36+
@pytest.fixture(scope="class")
37+
def rhel_vm_with_instancetype_and_preference_for_cloning(namespace, unprivileged_client):
38+
with VirtualMachineForCloning(
39+
name=RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
40+
image=Images.Rhel.RHEL9_REGISTRY_GUEST_IMG,
41+
namespace=namespace.name,
42+
client=unprivileged_client,
43+
vm_instance_type=VirtualMachineClusterInstancetype(name=U1_SMALL),
44+
vm_preference=VirtualMachineClusterPreference(name=RHEL9_PREFERENCE),
45+
os_flavor=OS_FLAVOR_RHEL,
46+
) as vm:
47+
running_vm(vm=vm)
48+
yield vm
49+
50+
51+
@pytest.fixture()
52+
def target_vm_scope_function(unprivileged_client, cloning_job_scope_function):
53+
with target_vm_from_cloning_job(client=unprivileged_client, cloning_job=cloning_job_scope_function) as target_vm:
54+
yield target_vm

tests/virt/conftest.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from bitmath import parse_string_unsafe
88
from ocp_resources.datavolume import DataVolume
99
from ocp_resources.deployment import Deployment
10+
from ocp_resources.infrastructure import Infrastructure
1011
from ocp_resources.performance_profile import PerformanceProfile
1112
from ocp_resources.storage_profile import StorageProfile
1213
from pytest_testconfig import py_config
@@ -30,7 +31,7 @@
3031
)
3132
from utilities.constants import AMD, INTEL, TIMEOUT_1MIN, TIMEOUT_5SEC, NamespacesNames
3233
from utilities.exceptions import UnsupportedGPUDeviceError
33-
from utilities.infra import ExecCommandOnPod, label_nodes
34+
from utilities.infra import ExecCommandOnPod, get_nodes_with_label, label_nodes
3435
from utilities.pytest_utils import exit_pytest_execution
3536
from utilities.virt import get_nodes_gpu_info, vm_instance_from_template
3637

@@ -42,7 +43,6 @@ def virt_special_infra_sanity(
4243
request,
4344
admin_client,
4445
junitxml_plugin,
45-
is_psi_cluster,
4646
schedulable_nodes,
4747
gpu_nodes,
4848
nodes_with_supported_gpus,
@@ -54,9 +54,9 @@ def virt_special_infra_sanity(
5454
):
5555
"""Performs verification that cluster has all required capabilities based on collected tests."""
5656

57-
def _verify_not_psi_cluster(_is_psi_cluster):
57+
def _verify_not_psi_cluster():
5858
LOGGER.info("Verifying tests run on BM cluster")
59-
if _is_psi_cluster:
59+
if Infrastructure(name="cluster").instance.status.platform == "OpenStack":
6060
failed_verifications_list.append("Cluster should be BM and not PSI")
6161

6262
def _verify_cpumanager_workers(_schedulable_nodes):
@@ -143,7 +143,7 @@ def _verify_if_1tb_memory_or_more_node(_memory_per_node):
143143
if not request.session.config.getoption(skip_virt_sanity_check):
144144
LOGGER.info("Verifying that cluster has all required capabilities for special_infra marked tests")
145145
if any(item.get_closest_marker("high_resource_vm") for item in request.session.items):
146-
_verify_not_psi_cluster(_is_psi_cluster=is_psi_cluster)
146+
_verify_not_psi_cluster()
147147
_verify_hw_virtualization(
148148
_schedulable_nodes=schedulable_nodes, _nodes_cpu_virt_extension=nodes_cpu_virt_extension
149149
)
@@ -389,3 +389,8 @@ def vm_for_test_from_template_scope_class(
389389
@pytest.fixture(scope="class")
390390
def hco_memory_overcommit_increased(hyperconverged_resource_scope_class):
391391
yield from update_hco_memory_overcommit(hco=hyperconverged_resource_scope_class, percentage=200)
392+
393+
394+
@pytest.fixture(scope="session")
395+
def gpu_nodes(nodes):
396+
return get_nodes_with_label(nodes=nodes, label="nvidia.com/gpu.present")

tests/virt/node/migration_and_maintenance/test_odf_vm_migration.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,18 @@ def vm_with_common_cpu_model_scope_function(
2323
yield vm_from_template
2424

2525

26+
@pytest.fixture(scope="session")
27+
def xfail_if_no_odf_cephfs_sc(cluster_storage_classes_names):
28+
"""
29+
Skip test if no odf cephfs storage class available
30+
"""
31+
if StorageClassNames.CEPHFS not in cluster_storage_classes_names:
32+
pytest.xfail(
33+
f"Cannot execute test, {StorageClassNames.CEPHFS} storage class is not deployed,"
34+
f"deployed storage classes: {cluster_storage_classes_names}"
35+
)
36+
37+
2638
@pytest.mark.parametrize(
2739
"golden_image_data_source_for_test_scope_function,"
2840
"golden_image_data_volume_template_for_test_scope_function,"
@@ -37,7 +49,5 @@ def vm_with_common_cpu_model_scope_function(
3749
],
3850
indirect=True,
3951
)
40-
def test_vm_with_odf_cephfs_storage_class_migrates(
41-
skip_test_if_no_odf_cephfs_sc, vm_with_common_cpu_model_scope_function
42-
):
52+
def test_vm_with_odf_cephfs_storage_class_migrates(xfail_if_no_odf_cephfs_sc, vm_with_common_cpu_model_scope_function):
4353
migrate_vm_and_verify(vm=vm_with_common_cpu_model_scope_function)

0 commit comments

Comments
 (0)