Skip to content

Commit ae35029

Browse files
committed
[VIRT] Move fixtures used exclusively by tests/virt
Move fixtures that are used exclusively by tests/virt from the main tests/conftest.py to tests/virt/conftest.py for better organization and proximity to their usage.
1 parent c6f0da0 commit ae35029

File tree

5 files changed

+124
-150
lines changed

5 files changed

+124
-150
lines changed

tests/conftest.py

Lines changed: 1 addition & 144 deletions
Original file line numberDiff line numberDiff line change
@@ -71,12 +71,11 @@
7171
from timeout_sampler import TimeoutSampler
7272

7373
import utilities.hco
74-
from tests.utils import download_and_extract_tar, update_cluster_cpu_model
74+
from tests.utils import download_and_extract_tar
7575
from utilities.artifactory import get_artifactory_header, get_http_image_url, get_test_artifact_server_url
7676
from utilities.bitwarden import get_cnv_tests_secret_by_name
7777
from utilities.constants import (
7878
AAQ_NAMESPACE_LABEL,
79-
AMD,
8079
ARM_64,
8180
ARQ_QUOTA_HARD_SPEC,
8281
AUDIT_LOGS_PATH,
@@ -90,7 +89,6 @@
9089
HCO_SUBSCRIPTION,
9190
HOTFIX_STR,
9291
INSTANCE_TYPE_STR,
93-
INTEL,
9492
KMP_ENABLED_LABEL,
9593
KMP_VM_ASSIGNMENT_LABEL,
9694
KUBECONFIG,
@@ -105,15 +103,12 @@
105103
OVS_BRIDGE,
106104
POD_SECURITY_NAMESPACE_LABELS,
107105
PREFERENCE_STR,
108-
RHEL9_PREFERENCE,
109106
RHEL9_STR,
110-
RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
111107
RHSM_SECRET_NAME,
112108
SSP_CR_COMMON_TEMPLATES_LIST_KEY_NAME,
113109
TIMEOUT_3MIN,
114110
TIMEOUT_4MIN,
115111
TIMEOUT_5MIN,
116-
U1_SMALL,
117112
UNPRIVILEGED_PASSWORD,
118113
UNPRIVILEGED_USER,
119114
UTILITY,
@@ -196,9 +191,7 @@
196191
verify_boot_sources_reimported,
197192
)
198193
from utilities.virt import (
199-
VirtualMachineForCloning,
200194
VirtualMachineForTests,
201-
create_vm_cloning_job,
202195
fedora_vm_body,
203196
get_all_virt_pods_with_running_status,
204197
get_base_templates_list,
@@ -208,9 +201,7 @@
208201
kubernetes_taint_exists,
209202
running_vm,
210203
start_and_fetch_processid_on_linux_vm,
211-
target_vm_from_cloning_job,
212204
vm_instance_from_template,
213-
wait_for_kv_stabilize,
214205
wait_for_windows_vm,
215206
)
216207

@@ -742,11 +733,6 @@ def workers_type(workers_utility_pods, installing_cnv):
742733
return virtual
743734

744735

745-
@pytest.fixture(scope="session")
746-
def is_psi_cluster():
747-
return Infrastructure(name="cluster").instance.status.platform == "OpenStack"
748-
749-
750736
@pytest.fixture()
751737
def data_volume_multi_storage_scope_function(
752738
request,
@@ -1097,16 +1083,6 @@ def skip_access_mode_rwo_scope_class(storage_class_matrix__class__):
10971083
_skip_access_mode_rwo(storage_class_matrix=storage_class_matrix__class__)
10981084

10991085

1100-
@pytest.fixture(scope="session")
1101-
def nodes_cpu_vendor(schedulable_nodes):
1102-
if schedulable_nodes[0].labels.get(f"cpu-vendor.node.kubevirt.io/{AMD}"):
1103-
return AMD
1104-
elif schedulable_nodes[0].labels.get(f"cpu-vendor.node.kubevirt.io/{INTEL}"):
1105-
return INTEL
1106-
else:
1107-
return None
1108-
1109-
11101086
@pytest.fixture(scope="session")
11111087
def nodes_cpu_architecture(nodes):
11121088
return get_nodes_cpu_architecture(nodes=nodes)
@@ -2340,11 +2316,6 @@ def migration_policy_with_bandwidth_scope_class():
23402316
yield mp
23412317

23422318

2343-
@pytest.fixture(scope="session")
2344-
def gpu_nodes(nodes):
2345-
return get_nodes_with_label(nodes=nodes, label="nvidia.com/gpu.present")
2346-
2347-
23482319
@pytest.fixture(scope="session")
23492320
def worker_machine1(worker_node1):
23502321
machine = Machine(
@@ -2400,21 +2371,6 @@ def vm_for_test(request, namespace, unprivileged_client):
24002371
yield vm
24012372

24022373

2403-
@pytest.fixture(scope="class")
2404-
def rhel_vm_with_instancetype_and_preference_for_cloning(namespace, unprivileged_client):
2405-
with VirtualMachineForCloning(
2406-
name=RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
2407-
image=Images.Rhel.RHEL9_REGISTRY_GUEST_IMG,
2408-
namespace=namespace.name,
2409-
client=unprivileged_client,
2410-
vm_instance_type=VirtualMachineClusterInstancetype(name=U1_SMALL),
2411-
vm_preference=VirtualMachineClusterPreference(name=RHEL9_PREFERENCE),
2412-
os_flavor=OS_FLAVOR_RHEL,
2413-
) as vm:
2414-
running_vm(vm=vm)
2415-
yield vm
2416-
2417-
24182374
@pytest.fixture(scope="class")
24192375
def migrated_vm_multiple_times(request, vm_for_migration_test):
24202376
vmim = []
@@ -2468,25 +2424,6 @@ def hyperconverged_status_templates_scope_class(
24682424
return hyperconverged_resource_scope_class.instance.status.dataImportCronTemplates
24692425

24702426

2471-
@pytest.fixture()
2472-
def cloning_job_scope_function(request, unprivileged_client, namespace):
2473-
with create_vm_cloning_job(
2474-
name=f"clone-job-{request.param['source_name']}",
2475-
client=unprivileged_client,
2476-
namespace=namespace.name,
2477-
source_name=request.param["source_name"],
2478-
label_filters=request.param.get("label_filters"),
2479-
annotation_filters=request.param.get("annotation_filters"),
2480-
) as vmc:
2481-
yield vmc
2482-
2483-
2484-
@pytest.fixture()
2485-
def target_vm_scope_function(unprivileged_client, cloning_job_scope_function):
2486-
with target_vm_from_cloning_job(client=unprivileged_client, cloning_job=cloning_job_scope_function) as target_vm:
2487-
yield target_vm
2488-
2489-
24902427
@pytest.fixture(scope="module")
24912428
def snapshot_storage_class_name_scope_module(
24922429
storage_class_matrix_snapshot_matrix__module__,
@@ -2623,18 +2560,6 @@ def ssp_resource_scope_class(admin_client, hco_namespace):
26232560
return get_ssp_resource(admin_client=admin_client, namespace=hco_namespace)
26242561

26252562

2626-
@pytest.fixture(scope="session")
2627-
def skip_test_if_no_odf_cephfs_sc(cluster_storage_classes_names):
2628-
"""
2629-
Skip test if no odf cephfs storage class available
2630-
"""
2631-
if StorageClassNames.CEPHFS not in cluster_storage_classes_names:
2632-
pytest.skip(
2633-
f"Skipping test, {StorageClassNames.CEPHFS} storage class is not deployed,"
2634-
f"deployed storage classes: {cluster_storage_classes_names}"
2635-
)
2636-
2637-
26382563
@pytest.fixture(scope="session")
26392564
def sriov_unused_ifaces(sriov_ifaces):
26402565
"""
@@ -2665,74 +2590,6 @@ def skip_on_aws_cluster(is_aws_cluster):
26652590
pytest.skip("This test is skipped on an AWS cluster")
26662591

26672592

2668-
@pytest.fixture()
2669-
def cluster_cpu_model_scope_function(
2670-
admin_client,
2671-
hco_namespace,
2672-
hyperconverged_resource_scope_function,
2673-
cluster_common_node_cpu,
2674-
):
2675-
with update_cluster_cpu_model(
2676-
admin_client=admin_client,
2677-
hco_namespace=hco_namespace,
2678-
hco_resource=hyperconverged_resource_scope_function,
2679-
cpu_model=cluster_common_node_cpu,
2680-
):
2681-
yield
2682-
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
2683-
2684-
2685-
@pytest.fixture(scope="module")
2686-
def cluster_cpu_model_scope_module(
2687-
admin_client,
2688-
hco_namespace,
2689-
hyperconverged_resource_scope_module,
2690-
cluster_common_node_cpu,
2691-
):
2692-
with update_cluster_cpu_model(
2693-
admin_client=admin_client,
2694-
hco_namespace=hco_namespace,
2695-
hco_resource=hyperconverged_resource_scope_module,
2696-
cpu_model=cluster_common_node_cpu,
2697-
):
2698-
yield
2699-
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
2700-
2701-
2702-
@pytest.fixture(scope="class")
2703-
def cluster_cpu_model_scope_class(
2704-
admin_client,
2705-
hco_namespace,
2706-
hyperconverged_resource_scope_class,
2707-
cluster_common_node_cpu,
2708-
):
2709-
with update_cluster_cpu_model(
2710-
admin_client=admin_client,
2711-
hco_namespace=hco_namespace,
2712-
hco_resource=hyperconverged_resource_scope_class,
2713-
cpu_model=cluster_common_node_cpu,
2714-
):
2715-
yield
2716-
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
2717-
2718-
2719-
@pytest.fixture(scope="class")
2720-
def cluster_modern_cpu_model_scope_class(
2721-
admin_client,
2722-
hco_namespace,
2723-
hyperconverged_resource_scope_class,
2724-
cluster_common_modern_node_cpu,
2725-
):
2726-
with update_cluster_cpu_model(
2727-
admin_client=admin_client,
2728-
hco_namespace=hco_namespace,
2729-
hco_resource=hyperconverged_resource_scope_class,
2730-
cpu_model=cluster_common_modern_node_cpu,
2731-
):
2732-
yield
2733-
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
2734-
2735-
27362593
@pytest.fixture(scope="module")
27372594
def machine_type_from_kubevirt_config(kubevirt_config_scope_module, nodes_cpu_architecture):
27382595
"""Extract machine type default from kubevirt CR."""

tests/virt/cluster/vm_cloning/conftest.py

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,19 @@
11
import pytest
2+
from ocp_resources.virtual_machine_cluster_instancetype import (
3+
VirtualMachineClusterInstancetype,
4+
)
5+
from ocp_resources.virtual_machine_cluster_preference import (
6+
VirtualMachineClusterPreference,
7+
)
28

3-
from utilities.virt import VirtualMachineForCloning, fedora_vm_body, running_vm
9+
from utilities.constants import (
10+
OS_FLAVOR_RHEL,
11+
RHEL9_PREFERENCE,
12+
RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
13+
U1_SMALL,
14+
Images,
15+
)
16+
from utilities.virt import VirtualMachineForCloning, fedora_vm_body, running_vm, target_vm_from_cloning_job
417

518

619
@pytest.fixture(scope="class")
@@ -18,3 +31,24 @@ def fedora_vm_for_cloning(request, unprivileged_client, namespace, cpu_for_migra
1831
) as vm:
1932
running_vm(vm=vm, wait_for_cloud_init=True)
2033
yield vm
34+
35+
36+
@pytest.fixture(scope="class")
37+
def rhel_vm_with_instancetype_and_preference_for_cloning(namespace, unprivileged_client):
38+
with VirtualMachineForCloning(
39+
name=RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
40+
image=Images.Rhel.RHEL9_REGISTRY_GUEST_IMG,
41+
namespace=namespace.name,
42+
client=unprivileged_client,
43+
vm_instance_type=VirtualMachineClusterInstancetype(name=U1_SMALL),
44+
vm_preference=VirtualMachineClusterPreference(name=RHEL9_PREFERENCE),
45+
os_flavor=OS_FLAVOR_RHEL,
46+
) as vm:
47+
running_vm(vm=vm)
48+
yield vm
49+
50+
51+
@pytest.fixture()
52+
def target_vm_scope_function(unprivileged_client, cloning_job_scope_function):
53+
with target_vm_from_cloning_job(client=unprivileged_client, cloning_job=cloning_job_scope_function) as target_vm:
54+
yield target_vm

tests/virt/conftest.py

Lines changed: 48 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,13 @@
77
from bitmath import parse_string_unsafe
88
from ocp_resources.datavolume import DataVolume
99
from ocp_resources.deployment import Deployment
10+
from ocp_resources.infrastructure import Infrastructure
1011
from ocp_resources.performance_profile import PerformanceProfile
1112
from ocp_resources.storage_profile import StorageProfile
1213
from pytest_testconfig import py_config
1314
from timeout_sampler import TimeoutExpiredError, TimeoutSampler
1415

16+
from tests.utils import update_cluster_cpu_model
1517
from tests.virt.node.gpu.constants import (
1618
GPU_CARDS_MAP,
1719
NVIDIA_VGPU_MANAGER_DS,
@@ -30,9 +32,9 @@
3032
)
3133
from utilities.constants import AMD, INTEL, TIMEOUT_1MIN, TIMEOUT_5SEC, NamespacesNames
3234
from utilities.exceptions import UnsupportedGPUDeviceError
33-
from utilities.infra import ExecCommandOnPod, label_nodes
35+
from utilities.infra import ExecCommandOnPod, get_nodes_with_label, label_nodes
3436
from utilities.pytest_utils import exit_pytest_execution
35-
from utilities.virt import get_nodes_gpu_info, vm_instance_from_template
37+
from utilities.virt import get_nodes_gpu_info, vm_instance_from_template, wait_for_kv_stabilize
3638

3739
LOGGER = logging.getLogger(__name__)
3840

@@ -389,3 +391,47 @@ def vm_for_test_from_template_scope_class(
389391
@pytest.fixture(scope="class")
390392
def hco_memory_overcommit_increased(hyperconverged_resource_scope_class):
391393
yield from update_hco_memory_overcommit(hco=hyperconverged_resource_scope_class, percentage=200)
394+
395+
396+
@pytest.fixture(scope="class")
397+
def cluster_cpu_model_scope_class(
398+
admin_client,
399+
hco_namespace,
400+
hyperconverged_resource_scope_class,
401+
cluster_common_node_cpu,
402+
):
403+
with update_cluster_cpu_model(
404+
admin_client=admin_client,
405+
hco_namespace=hco_namespace,
406+
hco_resource=hyperconverged_resource_scope_class,
407+
cpu_model=cluster_common_node_cpu,
408+
):
409+
yield
410+
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
411+
412+
413+
@pytest.fixture(scope="module")
414+
def cluster_cpu_model_scope_module(
415+
admin_client,
416+
hco_namespace,
417+
hyperconverged_resource_scope_module,
418+
cluster_common_node_cpu,
419+
):
420+
with update_cluster_cpu_model(
421+
admin_client=admin_client,
422+
hco_namespace=hco_namespace,
423+
hco_resource=hyperconverged_resource_scope_module,
424+
cpu_model=cluster_common_node_cpu,
425+
):
426+
yield
427+
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
428+
429+
430+
@pytest.fixture(scope="session")
431+
def gpu_nodes(nodes):
432+
return get_nodes_with_label(nodes=nodes, label="nvidia.com/gpu.present")
433+
434+
435+
@pytest.fixture(scope="session")
436+
def is_psi_cluster():
437+
return Infrastructure(name="cluster").instance.status.platform == "OpenStack"

0 commit comments

Comments
 (0)