From 124ffc0c2c8363463d9059397c17e7cc0d4efdc2 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 6 Sep 2024 16:18:11 -0400 Subject: [PATCH 01/52] Introducing contributed code for isoscope scheduling. --- src/xdist/iso_scheduling_plugin.py | 596 +++++++++++++ src/xdist/iso_scheduling_utils.py | 310 +++++++ src/xdist/scheduler/isoscope.py | 1338 ++++++++++++++++++++++++++++ 3 files changed, 2244 insertions(+) create mode 100644 src/xdist/iso_scheduling_plugin.py create mode 100644 src/xdist/iso_scheduling_utils.py create mode 100644 src/xdist/scheduler/isoscope.py diff --git a/src/xdist/iso_scheduling_plugin.py b/src/xdist/iso_scheduling_plugin.py new file mode 100644 index 00000000..8243199b --- /dev/null +++ b/src/xdist/iso_scheduling_plugin.py @@ -0,0 +1,596 @@ +# This code was contributed to pytest-xdist by Akamai Technologies Inc. +# Copyright 2024 Akamai Technologies, Inc. +# Developed by Vitaly Kruglikov at Akamai Technologies, Inc. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Pytest Fixtures for supporting the PARALLEL_MONO_SCOPE Test Distribution Mode. + +NOTE: These fixtures are NOT compatible with any other Test Distribution Modes. + +NOTE: DO NOT IMPORT this module. It needs to be loaded via pytest's +`conftest.pytest_plugins` mechanism. Pytest doc discourages importing fixtures +directly from other modules - see +https://docs.pytest.org/en/7.1.x/how-to/fixtures.html: +> "Sometimes users will import fixtures from other projects for use, however this +is not recommended: importing fixtures into a module will register them in +pytest as defined in that module". +""" +from __future__ import annotations + +import contextlib +import functools +import logging +import json +import pathlib +from typing import TYPE_CHECKING + +import filelock +import pytest + +from utils.common.parallel_mono_scope_utils import ( + ParallelMonoScopeFixture, + DistributedSetupCoordinator, + DistributedSetupContext, + DistributedTeardownContext, + CoordinationTimeoutError +) + +if TYPE_CHECKING: + from collections.abc import Callable, Generator + from typing import Optional + + +_LOGGER = logging.getLogger(__name__) + + +@pytest.fixture(scope='session') +def parallel_mono_scope( + tmp_path_factory: pytest.TempPathFactory, + testrun_uid: str, + worker_id: str + ) -> ParallelMonoScopeFixture: + """A session-scoped pytest fixture for coordinating setup/teardown of test + scope/class which is executing in the parallel_mono_scope Test Distribution + Mode. + + NOTE: Each XDist remote worker is running its own Pytest Session. + + USAGE EXAMPLE: + + ``` + from __future__ import annotations + from typing import TYPE_CHECKING + import pytest + + if TYPE_CHECKING: + from utils.common.parallel_mono_scope_utils import ( + ParallelMonoScopeFixture, + DistributedSetupContext, + DistributedTeardownContext + ) + + @pytest.mark.parallel_mono_scope + class TestDeng12345ParallelMonoScope: + + @classmethod + @pytest.fixture(scope='class', autouse=True) + def distributed_setup_and_teardown( + cls, + parallel_mono_scope: ParallelMonoScopeFixture: + request: pytest.FixtureRequest): + + # Distributed Setup and Teardown + with parallel_mono_scope.coordinate_setup_teardown( + setup_request=request) as coordinator: + # Distributed Setup + coordinator.maybe_call_setup(cls.patch_system_under_test) + + try: + # Yield control back to the XDist Worker to allow the + # test cases to run + yield + finally: + # Distributed Teardown + coordinator.maybe_call_teardown(cls.revert_system_under_test) + + @classmethod + def patch_system_under_test( + cls, + setup_context: DistributedSetupContext) -> None: + # Initialize the System Under Test for all the test cases in + # this test class and store state in `setup_context.client_dir`. + + @classmethod + def revert_system_under_test( + cls, + teardown_context: DistributedTeardownContext) + # Fetch state from `teardown_context.client_dir` and revert + # changes made by `patch_system_under_test()`. + + perms, tc_ids = generate_tests( + os.path.realpath(__file__), + TestDistributionModeEnum.PARALLEL_MONO_SCOPE) + + @pytest.mark.parametrize('test_data', perms, ids=tc_ids) + def test_case(self, test_data: dict[str, dict]) + ... + ``` + + :param tmp_path_factory: (pytest fixture) interface for temporary + directories and files. + :param testrun_uid: (pytest-xdist fixture) Unique id of the current test + run. This value is common to all XDist worker Pytest Sessions in the + current test run. + :param worker_id: (pytest-xdist fixture) Remote XDist worker ID which is + executing this Pytest Session. + :return: A callable that takes no args and returns a context manager which + yields an instance of `DistributedSetupCoordinator` for the current + Pytest Session. + """ + return _ParallelMonoScopeFixtureImpl(tmp_path_factory=tmp_path_factory, + testrun_uid=testrun_uid, + worker_id=worker_id) + + +class _ParallelMonoScopeFixtureImpl(ParallelMonoScopeFixture): + """Context manager yielding a new instance of the implementation of the + `DistributedSetupCoordinator` interface. + + An instance of _ParallelMonoScopeFixtureImpl is returned by our pytest + fixture `parallel_mono_scope`. + """ + # pylint: disable=too-few-public-methods + + def __init__(self, + tmp_path_factory: pytest.TempPathFactory, + testrun_uid: str, + worker_id: str): + """ + :param tmp_path_factory: pytest interface for temporary directories. + :param testrun_uid: Unique id of the current test run. This value is + common to all XDist worker Pytest Sessions in the current test run. + :param worker_id: Remote XDist worker ID which is executing this Pytest + Session. NOTE: Each XDist remote worker is running its own Pytest + Session for the subset of test cases assigned to it. + """ + self._tmp_path_factory = tmp_path_factory + self._testrun_uid = testrun_uid + self._worker_id = worker_id + + @contextlib.contextmanager + def coordinate_setup_teardown( + self, + setup_request: pytest.FixtureRequest + ) -> Generator[DistributedSetupCoordinator, None, None]: + """Context manager that yields an instance of + `DistributedSetupCoordinator` for distributed coordination of Setup + and Teardown. + + NOTE: In python3.9 and later, a more appropriate return type would be + `contextlib.AbstractContextManager[DistributedSetupCoordinator]`. + + :param setup_request: Value of the pytest `request` fixture obtained + directly by the calling setup-teardown fixture. + """ + # __enter__ + coordinator = _DistributedSetupCoordinatorImpl( + setup_request=setup_request, + tmp_path_factory=self._tmp_path_factory, + testrun_uid=self._testrun_uid, + worker_id=self._worker_id) + + # Yield control to the managed code block + yield coordinator + + # __exit__ + # We can do some cleanup or validation here, but nothing for now + + +class _DistributedSetupCoordinatorImpl(DistributedSetupCoordinator): + """Distributed scope/class setup/teardown coordination for the + `parallel_mono_scope` Test Distribution Mode. + + NOTE: do not instantiate this class directly. Use the + `parallel_mono_scope` fixture instead! + + """ + _DISTRIBUTED_SETUP_ROOT_DIR_LINK_NAME = 'distributed_setup' + + def __init__(self, + setup_request: pytest.FixtureRequest, + tmp_path_factory: pytest.TempPathFactory, + testrun_uid: str, + worker_id: str): + """ + :param setup_request: Value of the pytest `request` fixture obtained + directly by the calling setup-teardown fixture. + :param tmp_path_factory: Pytest interface for temporary directories and + files. + :param testrun_uid: Unique id of the current test run. + This is common to all XDist worker Pytest Sessions in the + current test run. NOTE: each XDist worker is running its own Pytest + Session. + :param worker_id: Remote XDist worker ID which is executing this Pytest + Session. + """ + self._setup_request: pytest.FixtureRequest = setup_request + + # NOTE: `tmp_path_factory.getbasetemp()` returns worker-specific temp + # directory. `tmp_path_factory.getbasetemp().parent` is common to all + # workers in the current PyTest test run. + self._root_context_base_dir: pathlib.Path = ( + tmp_path_factory.getbasetemp().parent + / self._DISTRIBUTED_SETUP_ROOT_DIR_LINK_NAME + / testrun_uid) + + self._worker_id: str = worker_id + + self._setup_context: Optional[DistributedSetupContext] = None + self._teardown_context: Optional[DistributedTeardownContext] = None + + def maybe_call_setup( + self, + setup_callback: Callable[[DistributedSetupContext], None], + timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC + ) -> None: + """Invoke the Setup callback only if distributed setup has not been + performed yet from any other XDist worker for your test scope. + Process-safe. + + Call `maybe_call_setup` from the pytest setup-teardown fixture of your + `PARALLEL_MONO_SCOPE` test (typically test class) if it needs to + initialize a resource which is common to all of its test cases which may + be executing in different XDist worker processes (such as a subnet in + `subnet.xml`). + + `maybe_call_setup` MUST ALWAYS be called in conjunction with + `maybe_call_teardown`. + + :param setup_callback: Callback for performing Setup that is common to + the pytest scope from which `maybe_call_setup` is invoked. + :param timeout: Lock acquisition timeout in seconds + + :return: An instance of `DistributedSetupContext` which MUST be passed + in the corresponding call to `maybe_call_teardown`. + + :raise parallel_mono_scope.CoordinationTimeoutError: If attempt to + acquire the lock times out. + """ + # `maybe_call_setup()` may be called only once per instance of + # `_SetupCoordinator` + assert self._setup_context is None, \ + f'maybe_call_setup()` already called {self._setup_context=}' + + node_path = self._setup_request.node.path + + root_context_dir: pathlib.Path = ( + self._root_context_base_dir + / node_path.relative_to(node_path.root) + / self._setup_request.node.name + ) + + with _DistributedSetupCoordinationImpl.acquire_distributed_setup( + root_context_dir=root_context_dir, + worker_id=self._worker_id, + setup_request=self._setup_request, + timeout=timeout) as setup_context: + self._setup_context = setup_context + if self._setup_context.distributed_setup_allowed: + setup_callback(self._setup_context) + + def maybe_call_teardown( + self, + teardown_callback: Callable[[DistributedTeardownContext], None], + timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC + ) -> None: + """Invoke the Teardown callback only in when called in the context of + the final XDist Worker process to have finished the execution of the + tests for your test scope. Process-safe. + + Call `maybe_call_teardown` from the pytest setup-teardown fixture of + your `PARALLEL_MONO_SCOPE` test (typically test class) if it needs to + initialize a resource which is common to all of its test cases which may + be executing in different XDist worker processes (such as a subnet in + `subnet.xml`). + + NOTE: `maybe_call_teardown` MUST ALWAYS be called in conjunction with + `maybe_call_setup`. + + :param teardown_callback: Callback for performing Teardown that is + common to the pytest scope from which `maybe_call_teardown` is + invoked. + :param timeout: Lock acquisition timeout in seconds + + :raise parallel_mono_scope.CoordinationTimeoutError: If attempt to + acquire the lock times out. + """ + # Make sure `maybe_call_setup()` was already called on this instance + # of `_SetupCoordinator` + assert self._setup_context is not None, \ + f'maybe_call_setup() not called yet {self._setup_context=}' + + # Make sure `maybe_call_teardown()` hasn't been called on this instance + # of `_SetupCoordinator` yet + assert self._teardown_context is None, \ + f'maybe_call_teardown() already called {self._teardown_context=}' + + with _DistributedSetupCoordinationImpl.acquire_distributed_teardown( + setup_context=self._setup_context, + timeout=timeout) as teardown_context: + self._teardown_context = teardown_context + if self._teardown_context.distributed_teardown_allowed: + teardown_callback(self._teardown_context) + + +def _map_file_lock_exception(f: Callable): + """Decorator: map `FileLock` exceptions of interest to our own exceptions. + """ + @functools.wraps(f) + def wrapper(*args, **kwargs): + try: + return f(*args, **kwargs) + except filelock.Timeout as err: + raise CoordinationTimeoutError( + f'Another instance of this test scope/class is holding the ' + f'lock too long or timeout value is too short: {err}') \ + from err + + return wrapper + + +class _DistributedSetupCoordinationImpl: + """Low-level implementation of Context Managers for Coordinating + Distributed Setup and Teardown for the `parallel_mono_scope` + Test Distribution Mode. + """ + _ROOT_STATE_FILE_NAME = 'root_state.json' + _ROOT_LOCK_FILE_NAME = 'lock' + + class DistributedState: + """State of the Distributed Setup-Teardown Coordination. + """ + def __init__(self, setup_count, teardown_count): + self.setup_count = setup_count + self.teardown_count = teardown_count + + def __repr__(self): + return f'<{self.__class__.__qualname__}: ' \ + f'setup_count={self.setup_count}; ' \ + f'teardown_count={self.teardown_count}>' + + @classmethod + def load_from_file_path( + cls, + state_file_path: pathlib.Path + ) -> _DistributedSetupCoordinationImpl.DistributedState: + """Load the state instance from the given file path. + + :param state_file_path: + :return: Instance of the state constructed from the contents of the + given file. + """ + return cls(**json.loads(state_file_path.read_text())) + + @property + def as_json_kwargs_dict(self) -> dict: + """ + :return: JSON-compatible representation of the instance that is also + suitable for constructing the instance after fetching from file. + as in the following example: + + ``` + state_kwargs = json.load(open(state_file_path)) + DistributedState(**state_kwargs) + ``` + """ + return { + 'setup_count': self.setup_count, + 'teardown_count': self.teardown_count + } + + def save_to_file_path(self, state_file_path: pathlib.Path): + """Save this state instance to the given file path. + + :param state_file_path: + :return: + """ + state_file_path.write_text(json.dumps(self.as_json_kwargs_dict)) + + @classmethod + @contextlib.contextmanager + @_map_file_lock_exception + def acquire_distributed_setup( + cls, + root_context_dir: pathlib.Path, + worker_id: str, + setup_request: pytest.FixtureRequest, + timeout: float + ) -> Generator[DistributedSetupContext, None, None]: + """Low-level implementation of Context Manager for Coordinating + Distributed Setup for the `parallel_mono_scope` Test Distribution Mode. + + :param root_context_dir: Scope/class-specific root directory for + saving this context manager's state. This directory is common to + all xdist workers for the given test scope/class. + :param worker_id: XDist worker ID for logging. + :param setup_request: Value of the pytest `request` fixture obtained + directly by the calling setup-teardown fixture. + :param timeout: Lock acquisition timeout in seconds + + :raise parallel_mono_scope.CoordinationTimeoutError: If attempt to + acquire the lock times out. + """ + # + # Before control passes to the managed code block + # + setup_context = DistributedSetupContext( + setup_allowed=False, + root_context_dir=root_context_dir, + worker_id=worker_id, + setup_request=setup_request) + + state_file_path = cls._get_root_state_file_path(root_context_dir) + + # Acquire resource + with filelock.FileLock( + str(cls._get_root_lock_file_path(root_context_dir)), + timeout=timeout): + if state_file_path.is_file(): + state = cls.DistributedState.load_from_file_path( + state_file_path) + # We never save state with setup_count <= 0 + assert state.setup_count > 0, \ + f'acquire_distributed_setup: non-positive setup ' \ + f'count read from state file - {state_file_path=}; ' \ + f'{worker_id=}; {state}' + # No Teardowns should be executing before all Setups + # complete + assert state.teardown_count == 0, \ + f'acquire_distributed_setup: non-zero teardown ' \ + f'count read from state file - {state_file_path=}; ' \ + f'{worker_id=}; {state}' + else: + # State file not created yet + state = cls.DistributedState(setup_count=0, + teardown_count=0) + + state.setup_count += 1 + + setup_context.distributed_setup_allowed = state.setup_count == 1 + + # + # Yield control to the managed code block + # + _LOGGER.info( # pylint: disable=logging-fstring-interpolation + f'acquire_distributed_setup: yielding control to ' + f'managed block - {worker_id=}; {setup_context=}') + yield setup_context + + # + # Control returns from the managed code block, unless control + # left managed code with an exception + # + + # Save state to file + state.save_to_file_path(state_file_path) + + @classmethod + @contextlib.contextmanager + @_map_file_lock_exception + def acquire_distributed_teardown( + cls, + setup_context: DistributedSetupContext, + timeout: float + ) -> Generator[DistributedTeardownContext, None, None]: + """Low-level implementation of Context Manager for Coordinating + Distributed Teardown for the `parallel_mono_scope` Test Distribution + Mode. + + :param setup_context: The instance of `DistributedSetupContext` that was + yielded by the corresponding use of the + `_distributed_setup_permission` context manager. + :param timeout: Lock acquisition timeout in seconds + + :raise parallel_mono_scope.CoordinationTimeoutError: If attempt to + acquire the lock times out. + """ + # + # Before control passes to the managed code block + # + teardown_context = DistributedTeardownContext( + teardown_allowed=False, + setup_context=setup_context) + + # NOTE: Friend-of-class protected member access + root_context_dir = teardown_context._root_context_dir # pylint: disable=protected-access + + worker_id = teardown_context.worker_id + + state_file_path = cls._get_root_state_file_path(root_context_dir) + + # Acquire resource + with filelock.FileLock( + str(cls._get_root_lock_file_path(root_context_dir)), + timeout=timeout): + if state_file_path.is_file(): + state = cls.DistributedState.load_from_file_path( + state_file_path) + assert state.setup_count > 0, ( + f'acquire_distributed_teardown: non-positive ' + f'setup_count read from state file - {state_file_path=}; ' + f'{worker_id=}; {state.setup_count=} <= 0; {state}') + assert state.teardown_count < state.setup_count, ( + f'acquire_distributed_teardown: teardown_count ' + f'already >= setup_count read from state file - ' + f'{state_file_path=}; {worker_id=}; ' + f'{state.teardown_count=} >= {state.setup_count=}') + else: + raise RuntimeError( + f'acquire_distributed_teardown: state file not found: ' + f'{state_file_path=}; {worker_id=}') + + state.teardown_count += 1 + + teardown_context.distributed_teardown_allowed = ( + state.teardown_count == state.setup_count) + + # + # Yield control to the managed code block + # + _LOGGER.info( # pylint: disable=logging-fstring-interpolation + f'acquire_distributed_teardown: yielding control to ' + f'managed block - {worker_id=}; {teardown_context=}') + yield teardown_context + + # + # Control returns from the managed code block, unless control left + # managed code with an exception + # + + # Save state to file + state.save_to_file_path(state_file_path) + + @classmethod + def _get_root_state_file_path( + cls, + root_state_dir: pathlib.Path) -> pathlib.Path: + """Return the path of the file for storing the root state, creating all + parent directories if they don't exist yet. + + :param root_state_dir: Directory where root state should be stored. + :return: The file path of the root state. + """ + root_state_dir.mkdir(parents=True, exist_ok=True) + return root_state_dir / cls._ROOT_STATE_FILE_NAME + + @classmethod + def _get_root_lock_file_path( + cls, + root_lock_dir: pathlib.Path) -> pathlib.Path: + """Return the path of the lock file, creating all parent directories if + they don't exist yet. + + :param root_lock_dir: Directory where lock file should be stored. + :return: The file path of the lock file. + """ + root_lock_dir.mkdir(parents=True, exist_ok=True) + return root_lock_dir / cls._ROOT_LOCK_FILE_NAME diff --git a/src/xdist/iso_scheduling_utils.py b/src/xdist/iso_scheduling_utils.py new file mode 100644 index 00000000..86c65d79 --- /dev/null +++ b/src/xdist/iso_scheduling_utils.py @@ -0,0 +1,310 @@ +# This code was contributed to pytest-xdist by Akamai Technologies Inc. +# Copyright 2024 Akamai Technologies, Inc. +# Developed by Vitaly Kruglikov at Akamai Technologies, Inc. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for supporting the `PARALLEL_MONO_SCOPE` Test Distribution Mode. + +NOTE: These utilities are NOT compatible with any other Test Distribution Modes. + +See also `plugins_common/parallel_mono_scope_plugin.py` for fixtures specific to +the `PARALLEL_MONO_SCOPE` Test Distribution Mode. +""" +from __future__ import annotations + +import abc +import pathlib +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Callable, Generator + import pytest + + +class CoordinationTimeoutError(Exception): + """When attempt to acquire the distributed lock times out.""" + + +class ParallelMonoScopeFixture(abc.ABC): + """Interface of the context manager which is returned by our pytest fixture + `parallel_mono_scope`. + + An instance of the implementation of this interface is a context manager + which yields an instance of the implementation of the + `DistributedSetupCoordinator` interface. + """ + # pylint: disable=too-few-public-methods + + @abc.abstractmethod + def coordinate_setup_teardown( + self, + setup_request: pytest.FixtureRequest + ) -> Generator[DistributedSetupCoordinator, None, None]: + """Context manager that yields an instance of + `DistributedSetupCoordinator` for distributed coordination of Setup + and Teardown. + + NOTE: In python3.9 and later, a more appropriate return type would be + `contextlib.AbstractContextManager[DistributedSetupCoordinator]`. + + :param setup_request: Value of the pytest `request` fixture obtained + directly by the calling setup-teardown fixture. + """ + + +class DistributedSetupCoordinator(abc.ABC): + """Interface for use with the `parallel_mono_scope` fixture for + distributed coordination of Setup and Teardown workflows. For example, + inserting a subnet into `subnet.xml` and reverting it upon Teardown. + + The `parallel_mono_scope` fixture returns an implementation of this + interface. See the `parallel_mono_scope` fixture in + `plugins_common/parallel_mono_scope_plugin.py` for additional information. + + USAGE EXAMPLE: + ``` + from __future__ import annotations + from typing import TYPE_CHECKING + import pytest + + if TYPE_CHECKING: + from utils.common.parallel_mono_scope_utils import ( + ParallelMonoScopeFixture, + DistributedSetupContext, + DistributedTeardownContext + ) + + @pytest.mark.parallel_mono_scope + class TestDeng12345ParallelMonoScope: + + @classmethod + @pytest.fixture(scope='class', autouse=True) + def distributed_setup_and_teardown( + cls, + parallel_mono_scope: ParallelMonoScopeFixture: + request: pytest.FixtureRequest): + + # Distributed Setup and Teardown + with parallel_mono_scope.coordinate_setup_teardown( + setup_request=request) as coordinator: + # Distributed Setup + coordinator.maybe_call_setup(cls.patch_system_under_test) + + try: + # Yield control back to the XDist Worker to allow the + # test cases to run + yield + finally: + # Distributed Teardown + coordinator.maybe_call_teardown(cls.revert_system_under_test) + + @classmethod + def patch_system_under_test( + cls, + setup_context: DistributedSetupContext) -> None: + # Initialize the System Under Test for all the test cases in + # this test class and store state in `setup_context.client_dir`. + + @classmethod + def revert_system_under_test( + cls, + teardown_context: DistributedTeardownContext) + # Fetch state from `teardown_context.client_dir` and revert + # changes made by `patch_system_under_test()`. + + perms, tc_ids = generate_tests( + os.path.realpath(__file__), + TestDistributionModeEnum.PARALLEL_MONO_SCOPE) + + @pytest.mark.parametrize('test_data', perms, ids=tc_ids) + def test_case(self, test_data: dict[str, dict]) + ... + ``` + """ + + # Default lock acquisition timeout in seconds + DEFAULT_TIMEOUT_SEC = 90 + + @abc.abstractmethod + def maybe_call_setup( + self, + setup_callback: Callable[[DistributedSetupContext], None], + timeout: float = DEFAULT_TIMEOUT_SEC) -> None: + """Invoke the Setup callback only if distributed setup has not been + performed yet from any other XDist worker for your test scope. + Process-safe. + + Call `maybe_call_setup` from the pytest setup-teardown fixture of your + `PARALLEL_MONO_SCOPE` test (typically test class) if it needs to + initialize a resource which is common to all of its test cases which may + be executing in different XDist worker processes (such as a subnet in + `subnet.xml`). + + `maybe_call_setup` MUST ALWAYS be called in conjunction with + `maybe_call_teardown`. + + :param setup_callback: Callback for performing Setup that is common to + the pytest scope from which `maybe_call_setup` is invoked. + :param timeout: Lock acquisition timeout in seconds + + :return: An instance of `DistributedSetupContext` which MUST be passed + in the corresponding call to `maybe_call_teardown`. + + :raise CoordinationTimeoutError: If attempt to acquire the lock times + out. + """ + + @abc.abstractmethod + def maybe_call_teardown( + self, + teardown_callback: Callable[[DistributedTeardownContext], None], + timeout: float = DEFAULT_TIMEOUT_SEC + ) -> None: + """Invoke the Teardown callback only in when called in the context of + the final XDist Worker process to have finished the execution of the + tests for your test scope. Process-safe. + + Call `maybe_call_teardown` from the pytest setup-teardown fixture of + your `PARALLEL_MONO_SCOPE` test (typically test class) if it needs to + initialize a resource which is common to all of its test cases which may + be executing in different XDist worker processes (such as a subnet in + `subnet.xml`). + + NOTE: `maybe_call_teardown` MUST ALWAYS be called in conjunction with + `maybe_call_setup`. + + :param teardown_callback: Callback for performing Teardown that is + common to the pytest scope from which `maybe_call_teardown` is + invoked. + :param timeout: Lock acquisition timeout in seconds + + :raise CoordinationTimeoutError: If attempt to acquire the lock times + out. + """ + + +class _DistributedSetupTeardownContextMixin: # pylint: disable=too-few-public-methods + """Mixin for `DistributedSetupContext` and DistributedTeardownContext`. + """ + # Expected instance members in derived class + _root_context_dir: pathlib.Path + _setup_node_name: str + + _CLIENT_SUBDIRECTORY_LINK = 'client-workspace' + + @property + def client_dir(self) -> pathlib.Path: + """ + :return: The directory where client should save/retrieve + client-specific state, creating the directory if not already + created. + """ + client_dir_path = (self._root_context_dir + / self._CLIENT_SUBDIRECTORY_LINK) + client_dir_path.mkdir(parents=True, exist_ok=True) + + return client_dir_path + + +class DistributedSetupContext(_DistributedSetupTeardownContextMixin): + """Setup context provided by the `acquire_distributed_setup` context + manager. + """ + + def __init__(self, + setup_allowed: bool, + root_context_dir: pathlib.Path, + worker_id: str, + setup_request: pytest.FixtureRequest): + """ + :param setup_allowed: Whether distributed setup may be performed by the + current process. + :param root_context_dir: Scope/class-specific root directory for + saving this context manager's state. This directory is common to + all xdist workers for the given test scope/class. + :param worker_id: XDist worker ID which is executing tests in the + current process. + :param setup_request: Value of the pytest `request` fixture obtained + directly by the calling setup-teardown fixture. + """ + self._root_context_dir = root_context_dir + + # XDist worker ID which is executing tests in the current process + self.worker_id = worker_id + + # Pytest setup node name (e.g., name of test class being setup) + self._setup_node_name = setup_request.node.name + + # Managed code MUST obey the value of `distributed_setup_allowed`! + # + # If True, the client is designated for performing the distributed Setup + # actions. + # If False, the client MUST NOT perform the distributed Setup actions, + # in which case someone else has already performed them + self.distributed_setup_allowed = setup_allowed + + def __repr__(self) -> str: + return ( + f'< {self.__class__.__name__}: ' + f'node_name={self._setup_node_name}; ' + f'setup_allowed={self.distributed_setup_allowed}; ' + f'worker_id={self.worker_id}; ' + f'client_dir={self.client_dir} >') + + +class DistributedTeardownContext(_DistributedSetupTeardownContextMixin): + """Teardown context provided by the `acquire_distributed_teardown` context + manager. + """ + + def __init__(self, + teardown_allowed: bool, + setup_context: DistributedSetupContext): + """ + :param teardown_allowed: Whether Distributed Teardown may be performed + by the current process. + :param setup_context: Setup Context from the Setup phase. + """ + # Managed code MUST obey the value of `distributed_teardown_allowed`! + # + # If True, the client is designated for performing the distributed + # Teardown actions. + # If False, the client MUST NOT perform the distributed Teardown + # actions, in which case someone else will perform them. + self.distributed_teardown_allowed = teardown_allowed + + # NOTE: Friend-of-class protected member access + self._root_context_dir = setup_context._root_context_dir + + # XDist worker ID which is executing tests in the current process + self.worker_id = setup_context.worker_id + + # NOTE: Friend-of-class protected member access + self._setup_node_name = setup_context._setup_node_name + + def __repr__(self) -> str: + return ( + f'< {self.__class__.__name__}: ' + f'node_name={self._setup_node_name}; ' + f'teardown_allowed={self.distributed_teardown_allowed}; ' + f'worker_id={self.worker_id}; ' + f'client_dir={self.client_dir} >') diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py new file mode 100644 index 00000000..83ae10a4 --- /dev/null +++ b/src/xdist/scheduler/isoscope.py @@ -0,0 +1,1338 @@ +# This code was contributed to pytest-xdist by Akamai Technologies Inc. +# Copyright 2024 Akamai Technologies, Inc. +# Developed by Vitaly Kruglikov at Akamai Technologies, Inc. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +""" +Implementation of the Distributed Scope Isolation scheduler for pytest-xdist. + +Properties of this scheduler: + 1. Executes one test scope/class at a time. + 2. Distributes tests of the executing scope/class to the configured XDist + Workers. + 3. Guarantees that the Setup of the executing scope/class completes in all + XDist Workers BEFORE any of those Workers start processing the + Teardown of that test scope/class. + 4. Guarantees that the Teardown phase of the executing test scope/class + completes in all XDist Workers before the Setup phase begins for the + next test scope/class. + +Credits: +* Implementation of `_split_scope()` and public method documentation in + DistScopeIsoScheduling: + - borrowed from the builtin `loadscope` scheduler +""" # pylint: disable=too-many-lines +from __future__ import annotations + +from collections import OrderedDict +import enum +from math import ceil +import random +from typing import TYPE_CHECKING + +import pytest +from _pytest.runner import CollectReport +from xdist.report import report_collection_diff +from xdist.workermanage import parse_spec_config + + +if TYPE_CHECKING: + from typing import Optional + from collections.abc import Generator, Iterable, ValuesView + import xdist.remote + from xdist.workermanage import WorkerController + + +class DistScopeIsoScheduling: # pylint: disable=too-many-instance-attributes + """Distributed Scope Isolation Scheduling: Implement scheduling across + remote workers, distributing and executing one scope at a time, such that + each scope is executed in isolation from tests in other scopes. + + Ensures that all tests in a given scope complete execution before starting + execution of the tests in the subsequent scope. This way, scoped + setup/teardown fixtures can be orchestrated to execute global setup/teardown + once per scope (vs. per worker) using `FileLock` or similar for + coordination. + """ + class _State(str, enum.Enum): + # Waiting for scheduler to be ready to distribute the next Scope. When + # the Workset Queue is NOT empty AND all workers which are shutting down + # reach zero pending tests AND all other workers have no more than one + # pending tests AND at least one worker is available for the distribution + # of the next scope, then transition to `ACTIVATE_SCOPE` + WAIT_READY_TO_ACTIVATE_SCOPE = 'WAIT-READY-TO-ACTIVATE-SCOPE' + + # Activate (i.e., distribute) tests from the next Scope, if any. If a + # scope was distributed, then transition to `WAIT_READY_TO_FENCE`. + # Workers that are available for distribution are those that already + # contain a fence test belonging to this scope as well as empty workers + # which are not shutting down. Workers with matching fence tests have + # priority over empty workers (to satisfy that "at least two + # active-Scope tests per worker" Rule) + ACTIVATE_SCOPE = 'ACTIVATE-SCOPE' + + # Waiting for scheduler to be ready to fence the active (i.e., + # distributed) scope. Wait until each non-empty worker has only one + # pending test remaining. Then, if at least one of those non-empty + # and non-shutting-down workers contains a pending test belonging to the + # current active Scope, transition to the `FENCE` state. If none of + # these workers contains a pending test belonging to the current active + # Scope, then reset current active scope and transition to + # `WAIT-READY-TO-ACTIVATE-SCOPE` (this means that all workers containing + # active-Scope tests crashed) + WAIT_READY_TO_FENCE = 'WAIT-READY-TO-FENCE' + + # Fence the workers containing the final active-Scope tests in + # order to allow those final pending tests to complete execution. Fence + # tests are dequeued from subsequent scopes, making sure that those + # scopes will be able to satisfy the "at least two active-Scope tests + # per worker" Rule when they are activated. When subsequent scopes run + # out of tests for fencing, then send "shutdown" to the balance of those + # workers instead of a fence test. Finally, transition to + # `WAIT_READY_TO_ACTIVATE_SCOPE`. + FENCE = 'FENCE' + + def __init__(self, config: pytest.Config, + log: xdist.remote.Producer): + self._config = config + self._log: xdist.remote.Producer = log.distscopeisosched + + # Current scheduling state + self._state: DistScopeIsoScheduling._State = \ + self._State.WAIT_READY_TO_ACTIVATE_SCOPE + + # Scope ID of tests that are currently executing; `None` prior to the + # initial distribution + self._active_scope_id: Optional[str] = None + + # The initial expected number of remote workers taking part. + # The actual number of workers will vary during the scheduler's + # lifetime as nodes are added by the DSession as they are brought up and + # removed either because of a dead node or normal shutdown. This number + # is primarily used to know when the initial collection is completed. + self._expected_num_workers = len(parse_spec_config(config)) + + # The final list of test IDs collected by all nodes once + # it's validated to be identical between all the nodes. The validation + # is performed once the number of registered node collections reaches + # `_expected_num_workers`. It is initialized to None and then updated + # after validation succeeds. + self._official_test_collection: Optional[tuple[str]] = None + # Remote worker node having `_official_test_collection` as its test + # collection (for reporting failed collection validations) + self._official_test_collection_node: Optional[WorkerController] = None + + # Ordered collection of Scope Worksets. Each Scope Workset is an ordered + # collection of tests belonging to the given scope. Initially empty, + # it will be populated once we establish the final test collection + # (see `_official_test_collection`). + self._workset_queue = _WorksetQueue() + + # Workers available for test distribution (aka, active workers). It's + # the mapping of `WorkerController` nodes to the corresponding + # `_WorkerProxy` instances. Initially empty, it will be populated by + # our `add_node_collection` implementation as it's called by xdist's + # `DSession` and the corresponding test collection passes validation. + self._worker_by_node: \ + OrderedDict[WorkerController, _WorkerProxy] = OrderedDict() + + # Workers pending validation of their Test collections that have not + # been admitted to `_worker_by_node` yet. + # + # A worker is added to `_pending_worker_by_node` with its collection + # value initialized to `None` when xdist Controller invokes + # `add_node()`. + # + # The worker's test collection value is updated when + # `add_node_collection` receives the corresponding test collection. + # + # A worker is moved from `_pending_worker_by_node` to `_worker_by_node` + # when its collection is validated. + # + # A worker is removed from `_pending_worker_by_node` when the xdist + # controller invokes `remove_node()` with the corresponding node. + self._pending_worker_by_node: \ + OrderedDict[WorkerController, _WorkerProxy] = OrderedDict() + + @property + def nodes(self) -> list[WorkerController]: + """A new list of all active `WorkerController` nodes. + + Called by xdist `DSession`. + """ + return (list(self._worker_by_node.keys()) + + list(self._pending_worker_by_node.keys())) + + @property + def collection_is_completed(self) -> bool: + """Indication whether initial test collection is completed. + + Indicates that all initial participating remote workers have finished + test collection. + + Called by xdist `DSession`: + - when it gets notified that a remote worker completed collection as + a prelude to calling our `schedule()` method. + """ + return self._official_test_collection is not None + + @property + def tests_finished(self) -> bool: + """True if all tests have completed execution. + + Called by xdist `DSession`: + - periodically as a prelude to triggering shutdown + """ + if not self.collection_is_completed: + return False + + if self.has_pending: + return False + + return True + + @property + def has_pending(self) -> bool: + """True if there are pending test items. + + This indicates that collection has finished and nodes are still + processing test items, so this can be thought of as + "the scheduler is active". + + Called by xdist `DSession`. + """ + if not self._workset_queue.empty: + return True + + for worker in self._workers: + if not worker.empty: + return True + + return False + + def add_node(self, node: WorkerController): + """Add a new node to the scheduler's pending worker collection. + + The node will be activated and assigned tests to be executed only after + its test collection is received by `add_node_collection()` and + validated. + + Called by the ``DSession.worker_workerready`` hook + - when it successfully bootstraps a new remote worker. + """ + self._log(f'Registering remote worker node {node}') + + assert node not in self._pending_worker_by_node, \ + f'{node=} already in pending workers' + + self._pending_worker_by_node[node] = _WorkerProxy(node) + + def remove_node(self, node: WorkerController) -> Optional[str]: + """Remove a Remote Worker node from the scheduler. + + This should be called either when the node crashed or at node shutdown + time. + + NOTE: If the worker still has pending tests assigned to it, this + method will return those pending tests back to the Workset Queue for + later execution. + + IMPORTANT: If the remote worker experienced an ungraceful shutdown, it + may create an imbalance between the execution of the setup and teardown + fixture(s). THIS MAY LEAVE THE SYSTEM UNDER TEST IN AN UNEXPECTED STATE, + COMPROMISING EXECUTION OF ALL SUBSEQUENT TESTS IN CURRENT AND FUTURE + SESSIONS. + + Called by the hooks: + + - ``DSession.worker_workerfinished``. + - ``DSession.worker_errordown``. + + :return: the test ID being executed while the node crashed or None if + the node has no more pending items. + + :raise KeyError: if the Remote Worker node has not been registered with + the scheduler. (NOTE: xdist's `DSession` expects this behavior) + """ + self._log(f'Removing remote worker node {node}') + + if node in self._pending_worker_by_node: + # Worker was not admitted to active workers yet, remove it from the + # pending worker collection. + self._pending_worker_by_node.pop(node) + assert node not in self._worker_by_node, \ + f'{node=} in both pending and active workers' + return None + + # Worker was admitted to active workers already + + worker = self._worker_by_node.pop(node) + + if worker.empty: + return None + + # The remote worker node crashed; identify the test that crashed + # + # IMPORTANT: The remote worker might have experienced an ungraceful + # shutdown, possibly creating an imbalance between the execution of + # the setup and teardown fixture(s). THIS MAY LEAVE THE + # SYSTEM UNDER TEST IN AN UNEXPECTED STATE, COMPROMISING EXECUTION OF + # ALL SUBSEQUENT TESTS IN CURRENT AND FUTURE SESSIONS. + + first_pending_test = worker.head_pending_test + crashed_test_id = first_pending_test.test_id + + self._log(f'Remote Worker {repr(worker)} shut down ungracefully. It ' + f'may have crashed while executing the pending test ' + f'{first_pending_test}. ' + f'NOTE: The ungraceful shutdown may create an imbalance ' + f'between the execution of the setup and teardown ' + f'fixture(s). THIS MAY LEAVE THE SYSTEM UNDER TEST IN AN ' + f'UNEXPECTED STATE, COMPROMISING EXECUTION OF ALL SUBSEQUENT ' + f'TESTS IN CURRENT AND FUTURE SESSIONS.') + + # Return the pending tests back to the workset queue + for test in worker.release_pending_tests(): + self._workset_queue.add_test(test) + + return crashed_test_id + + def add_node_collection(self, node: WorkerController, + collection: list[str]): + """Register the collected test items from a Remote Worker node. + + If the official test collection has been established already, validate + the given worker's test collection against the official node; if valid, + then activate the worker, making it available for scheduling. + + If the official test collection has not been established yet, and we + now have at least the expected number of pending workers with a test + collection, and all these test collections are identical, then: + 1. Record the reference node and collection for subsequent + validations of future worker collections + 2. Activate all these workers, making them available for scheduling. + 2. Organize tests into a queue of worksets grouped by test scope ID + + Called by the hook: + + - ``DSession.worker_collectionfinish``. + """ + self._log(f'Adding collection for node {node}: {len(collection)=}') + + # Check that add_node() was called on the node before + assert node in self._pending_worker_by_node, \ + f'Received test collection for {node=} which is not in pending ' \ + f'workers' + + worker = self._pending_worker_by_node[node] + + collection = worker.collection = tuple(collection) + + if self.collection_is_completed: + # A new node has been added after final collection establishment, + # perhaps an original one died. + + # Check that the new collection matches the official collection + if self._do_two_nodes_have_same_collection( + reference_node=self._official_test_collection_node, + reference_collection=self._official_test_collection, + node=node, + collection=collection): + # The worker's collection is valid, so activate the new worker + self._pending_worker_by_node.pop(node) + self._worker_by_node[node] = worker + + return + + # + # The final collection has not been established yet + # + + # Check if we now have enough collections to establish a final one + + # Get all pending workers with registered test collection + w: _WorkerProxy + workers_with_collection = [ + w for w in self._pending_worker_by_node.values() + if w.collection is not None] + + if len(workers_with_collection) < self._expected_num_workers: + # Not enough test collections registered yet + return + + # Check that all nodes collected the same tests + same_collection = True + reference_worker = workers_with_collection[0] + for pending_worker in workers_with_collection[1:]: + if not self._do_two_nodes_have_same_collection( + reference_node=reference_worker.node, + reference_collection=reference_worker.collection, + node=pending_worker.node, + collection=pending_worker.collection): + same_collection = False + + if not same_collection: + self._log( + '**Different tests collected, aborting worker activation**') + return + + # Collections are identical! + + # Record the reference node and collection for subsequent validations of + # future worker collections + self._official_test_collection_node = reference_worker.node + self._official_test_collection = reference_worker.collection + + # Activate these workers + for worker in workers_with_collection: + # Activate the worker + self._pending_worker_by_node.pop(worker.node) + self._worker_by_node[worker.node] = worker + + # Shuffle the tests to break any inherent ordering relationships for + # distribution across workers (e.g., a sub-sequence of tests that are + # particularly slow) + all_tests = [ + _TestProxy(test_id=test_id, test_index=test_index) + for test_index, test_id + in enumerate(self._official_test_collection)] + shuffled_test_collection = random.sample(all_tests, k=len(all_tests)) + + # Organize tests into a queue of worksets grouped by test scope ID + for test in shuffled_test_collection: + self._workset_queue.add_test(test) + + def mark_test_complete(self, node: WorkerController, item_index: int, + duration): + """Mark test item as completed by node and remove from pending tests + in the worker and reschedule. + + Called by the hook: + + - ``DSession.worker_runtest_protocol_complete``. + """ + # Suppress "unused parameter" warning + assert duration is duration # pylint: disable=comparison-with-itself + + worker = self._worker_by_node[node] + + if self._log.enabled: + self._log(f'Marking test complete: ' + f'test_id={self._official_test_collection[item_index]}; ' + f'{item_index=}; {worker}') + + worker.handle_test_completion(test_index=item_index) + + self._reschedule_workers() + + def mark_test_pending(self, item): + """Not supported""" + raise NotImplementedError() + + def schedule(self): + """Initiate distribution of the test collection. + + Initiate scheduling of the items across the nodes. If this gets called + again later it behaves the same as calling ``._reschedule()`` on all + nodes so that newly added nodes will start to be used. + + If ``.collection_is_completed`` is True, this is called by the hook: + + - ``DSession.worker_collectionfinish``. + """ + assert self.collection_is_completed, \ + 'schedule() called before test collection completed' + + # Test collection has been completed, so reschedule if needed + self._reschedule_workers() + + @staticmethod + def split_scope(test_id: str) -> str: + """Determine the scope (grouping) of a test ID (aka, "nodeid"). + + There are usually 3 cases for a nodeid:: + + example/loadsuite/test/test_beta.py::test_beta0 + example/loadsuite/test/test_delta.py::Delta1::test_delta0 + example/loadsuite/epsilon/__init__.py::epsilon.epsilon + + #. Function in a test module. + #. Method of a class in a test module. + #. Doctest in a function in a package. + + This function will group tests with the scope determined by splitting + the first ``::`` from the right. That is, classes will be grouped in a + single work unit, and functions from a test module will be grouped by + their module. In the above example, scopes will be:: + + example/loadsuite/test/test_beta.py + example/loadsuite/test/test_delta.py::Delta1 + example/loadsuite/epsilon/__init__.py + """ + return test_id.rsplit('::', 1)[0] + + @property + def _workers(self) -> Iterable[_WorkerProxy]: + """An iterable of all active worker proxies in this scheduler, + including those that have initiated, but not yet completed shutdown. + """ + return self._worker_by_node.values() + + def _reschedule_workers(self): + """Distribute work to workers if needed at this time. + """ + assert self._state is not None + + traversed_states = [] + previous_state = None + while self._state != previous_state: + # NOTE: This loop will terminate because completion of tests and + # worker availability are reported outside the scope of this + # function, and our state transitions are limited by those factors + assert len(traversed_states) <= len(self._State), \ + f'Too many traversed states - {len(traversed_states)}: ' \ + f'{traversed_states}' + traversed_states.append(self._state) + + previous_state = self._state + + if self._state is self._State.WAIT_READY_TO_ACTIVATE_SCOPE: + self._handle_state_wait_ready_to_activate_scope() + elif self._state is self._State.ACTIVATE_SCOPE: + self._handle_state_activate_scope() + elif self._state is self._State.WAIT_READY_TO_FENCE: + self._handle_state_wait_ready_to_fence() + elif self._state is self._State.FENCE: + self._handle_state_fence() + else: + raise RuntimeError(f'Unhandled state: {self._state}') + + def _handle_state_wait_ready_to_activate_scope(self): + """Handle the `WAIT_READY_TO_ACTIVATE_SCOPE` state. + + Waiting for scheduler to be ready to distribute the next Scope. When + the Workset Queue is NOT empty AND all workers which are shutting down + reach zero pending tests AND all other workers have no more than one + pending tests AND at least one worker is available for the distribution + of the next scope, then transition to `ACTIVATE_SCOPE` + """ + assert self._state is self._State.WAIT_READY_TO_ACTIVATE_SCOPE, \ + f'{self._state=} != {self._State.WAIT_READY_TO_ACTIVATE_SCOPE}' + + if self._workset_queue.empty: + # No more scopes are available + return + + # First check if all workers satisfy the pending test thresholds + for worker in self._workers: + if worker.num_pending_tests > 1: + # A worker has too many pending tests + return + if worker.shutting_down and worker.num_pending_tests != 0: + # A worker is shutting down, but is not empty yet + return + + # Check whether at least one worker is available for the next Scope. + # + # In the event none are available, we'll have to wait for crashed + # worker(s) to be restarted. + # + # NOTE: xdist will either replace crashed workers or terminate the + # session. + + next_scope_id = self._workset_queue.head_workset.scope_id + if not self._get_workers_available_for_distribution( + scope_id=next_scope_id): + # No workers are available for distribution of the next scope. + # It appears that some workers have crashed. xdist will either + # replace crashed workers or terminate the session. + if self._log.enabled: + self._log(f'No workers are available for {next_scope_id=}, ' + f'they likely crashed; staying in {self._state=}') + return + + # Conditions are satisfied for transition to next state + previous_state = self._state + self._state = self._State.ACTIVATE_SCOPE + self._log(f'Transitioned from {str(previous_state)} to ' + f'{str(self._state)}') + + def _handle_state_activate_scope(self): + """Handle the `ACTIVATE_SCOPE` state. + + Activate (i.e., distribute) tests from the next Scope, if any. If we + distributed a scope, then transition to `WAIT_READY_TO_FENCE`. + Workers that are available for distribution are those that already + contain fence tests belonging to this scope as well as empty workers + which are not shutting down. Workers with matching fence tests have + priority over empty workers (to satisfy the "at least two + active-Scope tests per worker" Rule) + """ + assert self._state is self._State.ACTIVATE_SCOPE, \ + f'{self._state=} != {self._State.ACTIVATE_SCOPE}' + + # The previous state is responsible for ensuring that the workset queue + # is not empty + assert not self._workset_queue.empty, f'Empty {self._workset_queue}' + + workset = self._workset_queue.dequeue_workset() + + # Get workers that are available for distribution: those that already + # contain a fence test belonging to this scope as well as empty workers + # which are not shutting down + available_workers = self._get_workers_available_for_distribution( + scope_id=workset.scope_id) + + # The previous state is responsible for ensuring that workers are + # available for this Scope + assert available_workers, \ + f'No workers available for {workset.scope_id=} in {self._state=}' + + # Distribute the workset to the available workers + self._distribute_workset(workset=workset, workers=available_workers) + + # Update Active Scope ID + self._active_scope_id = workset.scope_id + + # Conditions are satisfied for transition to next state + previous_state = self._state + self._state = self._State.WAIT_READY_TO_FENCE + self._log(f'Transitioned from {str(previous_state)} to ' + f'{str(self._state)}. ' + f'Activated scope={self._active_scope_id}') + + def _handle_state_wait_ready_to_fence(self): + """Handle the `WAIT_READY_TO_FENCE` state. + + Waiting for scheduler to be ready to fence the active (i.e., + distributed) scope. Wait until each non-empty worker has only one + pending test remaining. Then, if at least one of those non-empty + and non-shutting-down workers contains a pending test belonging to the + current active Scope, transition to the `FENCE` state. If none of + these workers contains a pending test belonging to the current active + Scope, then reset current active scope and transition to + `WAIT-READY-TO-ACTIVATE-SCOPE` (this means that all workers containing + active-Scope tests crashed) + """ + assert self._state is self._State.WAIT_READY_TO_FENCE, \ + f'{self._state=} != {self._State.WAIT_READY_TO_FENCE}' + + assert self._active_scope_id is not None, \ + f'{self._active_scope_id=} is None' + + for worker in self._workers: + if worker.num_pending_tests > 1: + # A worker has too many pending tests + return + + workers_to_fence = self._get_workers_ready_for_fencing( + scope_id=self._active_scope_id) + + # Conditions are satisfied for transition to next state + previous_state = self._state + + if workers_to_fence: + # There are pending active-Scope tests that need to be fenced + self._state = self._State.FENCE + else: + # No active-Scope tests pending, so nothing to fence. Their + # worker(s) must have crashed? + self._state = self._State.WAIT_READY_TO_ACTIVATE_SCOPE + self._log(f'Nothing to fence! No active-scope tests pending - ' + f'workers crashed? {self._active_scope_id=}') + + self._log(f'Transitioned from {str(previous_state)} to ' + f'{str(self._state)}') + + def _handle_state_fence(self): + """Handle the `FENCE` state. + + Fence the workers containing the final active-Scope tests in + order to allow those final pending tests to complete execution. Fence + tests are dequeued from subsequent scopes, making sure that those + scopes will be able to satisfy the "at least two active-Scope tests + per worker" Rule when they are activated. When subsequent scopes run + out of tests for fencing, then send "shutdown" to the balance of those + workers instead of a fence test. Finally, transition to + `WAIT_READY_TO_ACTIVATE_SCOPE`. + """ + assert self._state is self._State.FENCE, \ + f'{self._state=} is not {self._State.FENCE}' + + workers_to_fence = self._get_workers_ready_for_fencing( + scope_id=self._active_scope_id) + + # The prior state should have ensured that there is at least one worker + # that needs to be fenced + assert workers_to_fence, \ + f'No workers ready to fence {self._active_scope_id=} ' \ + f'in {self._state=}; ' \ + f'active workers: {[w.verbose_repr() for w in self._workers]}' + + # We will take Fence tests from subsequent worksets. + # NOTE: A given workset may be used to fence multiple preceding active + # Scopes + fence_item_generator = self._generate_fence_items( + source_worksets=self._workset_queue.worksets) + + # Start fencing + for worker in workers_to_fence: + fence_item = next(fence_item_generator) + if fence_item is not None: + worker.run_some_tests([fence_item]) + self._log(f'Fenced {worker} with {fence_item}. ' + f'Active scope={self._active_scope_id}') + else: + # No more fence items, so send the "shutdown" message to + # the worker to force it to execute its final pending test and + # shut down. We won't need this worker any more - the remaining + # fence items are already occupying the necessary number of + # workers + worker.shutdown() + + # Transition to next state + previous_state = self._state + self._state = self._State.WAIT_READY_TO_ACTIVATE_SCOPE + self._log(f'Transitioned from {str(previous_state)} to ' + f'{str(self._state)}') + + def _distribute_workset(self, workset: _ScopeWorkset, + workers: list[_WorkerProxy]): + """Distribute the tests in the given workset to the given workers. + + Adhere to the "at least two active-Scope tests per worker" Rule. + + Note that each of the non-empty workers, if any, contains exactly one + Fence test that belongs to the scope of the given workset. + + :param workset: The workset to distribute. NOTE that some of its tests + may have already been dequeued and applied as fences for a prior + scope. + :param workers: Workers to receive the distribution of tests from the + given workset. NOTE that some of the workers may be non-empty, in + which case they contain exactly one Fence test that belongs to the + scope of the given workset. + """ + # Workers with matching fence tests have priority over empty workers (to + # satisfy the "at least two active-Scope tests per worker" Rule) + # + # Sort workers such that non-empty ones (those containing Fence items) + # are at the beginning to make sure each receive at least one additional + # test item from the workset + workers = list(sorted(workers, key=lambda w: w.empty)) + + num_workers_with_fences = sum(1 for w in workers if not w.empty) + + # Remaining tests in the workset plus the number borrowed as fences + # must add up to the original total tests in the workset + assert (workset.num_tests + num_workers_with_fences + == workset.high_water), \ + f'{workset}.num_tests + {num_workers_with_fences=} ' \ + f'!= {workset.high_water=}; {workers=}' + + # Determine the number of workers we will use for this distribution + num_workers_to_use = min( + self._get_max_workers_for_num_tests(workset.high_water), + len(workers)) + + # At minimum, all workers fenced from the given Scope Workset must be + # included in the distribution + assert num_workers_to_use >= num_workers_with_fences, \ + f'{num_workers_to_use=} < {num_workers_with_fences=} ' \ + f'for {workset} and available {len(workers)=}' + # We should only be called when there is work to be done + assert num_workers_to_use > 0, f'{num_workers_to_use=} <= 0' + # Our workset's footprint should not exceed available workers + assert num_workers_to_use <= len(workers), \ + f'{num_workers_to_use=} > {len(workers)=} for {workset}' + + # Distribute the tests to the selected workers + self._log(f'Distributing {workset} to {num_workers_to_use=}: ' + f'{workers[:num_workers_to_use]}') + + num_tests_remaining = workset.high_water + for (worker, num_available_workers) in zip( + workers, + range(num_workers_to_use, 0, -1)): + worker: _WorkerProxy + num_available_workers: int + + # Workers ready for distribution must have no more than one pending + # test + assert worker.num_pending_tests <= 1, \ + f'{worker.verbose_repr()} num_pending_tests > 1' + + if not worker.empty: + # The single pending test in the worker must be a Fence test + # borrowed from the given workset + assert worker.head_pending_test.scope_id == workset.scope_id, \ + f'Scope IDs of {worker.verbose_repr()} and {workset} differ' + + # Determine the target number of tests for this worker (including + # a matching Fence test, if any) + target_num_tests = ceil(num_tests_remaining / num_available_workers) + num_tests_remaining -= target_num_tests + + # Number of tests we'll be dequeuing from the workset and adding to + # the worker + num_tests_to_add = target_num_tests - worker.num_pending_tests + + # Send tests to the worker + if num_tests_to_add: + tests_to_add = workset.dequeue_tests(num_tests=num_tests_to_add) + worker.run_some_tests(tests_to_add) + self._log(f'Distributed {len(tests_to_add)} tests to {worker} ' + f'from {workset}') + else: + # NOTE: A Workset with a high watermark of just one item becomes + # empty if a Fence item was withdrawn from it + assert workset.high_water == 1, \ + f'Attempted to distribute 0 tests to {worker} ' \ + f'from {workset}' + self._log(f'No more tests to distribute from {workset} ' + f'to {worker}') + + # Workset should be empty now + assert workset.empty, \ + f'{workset} is not empty after distribution to {num_workers_to_use} ' \ + f'workers: {workers[:num_workers_to_use]}.' + + @classmethod + def _generate_fence_items(cls, source_worksets: Iterable[_ScopeWorkset] + ) -> Generator[Optional[_TestProxy], None, None]: + """Generator that withdraws (i.e., dequeues) Fence test items from the + given ordered Scope Worksets and yields them until it runs out of the + fence items per limits described below, and will thereafter yield + `None`. + + Details: + Withdraws (i.e., dequeues) Fence test items - one test per yield - from + the given ordered Scope Worksets and yields these + test items, while making sure not to exceed each Workset's withdrawal + limit to be in compliance with the "at least two active-Scope tests per + worker" Rule when these Worksets are activated. + + The withdrawals are made from the first Workset until it reaches its + Fence limit, then the next Workset, and so on. + + If all Fence items available in the given Source Worksets become + exhausted, the generator yields `None` indefinitely. + + NOTE: The Worksets may have been used to fence multiple preceding active + Scopes, so they may not have their full capacity of Fence items. + + NOTE: ASSUME that all previously withdrawn items were used for Fencing. + + NOTE: Worksets with high watermark of just one item become empty when + a Fence item is withdrawn. + + NOTE: Worksets with original capacity of more than one Test item will + not be completely emptied out by Fencing in order to adhere with the + "at least two active-Scope tests per worker" Rule when these Worksets + are eventually activated. + + :param source_worksets: A (possibly-empty) ordered Iterable of Scope + Worksets from which to withdraw Fence test items. + + :return: this generator. + """ + for workset in source_worksets: + # Determine the maximum number of items we can withdraw from this + # Workset for Fencing. + # + # ASSUME that all previously withdrawn items were used for Fencing + num_fence_items = cls._get_fence_capacity_of_workset(workset) + for _ in range(num_fence_items): + yield workset.dequeue_tests(num_tests=1)[0] + + # The given Worksets ran out of Fence items, so yield `None` from now on + while True: + yield None + + @classmethod + def _get_fence_capacity_of_workset(cls, workset: _ScopeWorkset) -> int: + """Determine the maximum number of items we can withdraw from this + Workset for Fencing. + + NOTE: The Worksets may have been used to fence multiple preceding active + Scopes, so they may not have their full capacity of Fence items. + + NOTE: ASSUME that all previously withdrawn items were used for Fencing. + + NOTE: Worksets with high watermark of just one item become empty when + a Fence item is withdrawn. + + :param workset: The given Scope Workset + :return: + """ + num_fence_items = ( + cls._get_max_workers_for_num_tests(num_tests=workset.high_water) + - (workset.high_water - workset.num_tests) + ) + + assert num_fence_items >= 0, f'Number of fences below zero ' \ + f'({num_fence_items}) in {workset}' + + return num_fence_items + + @staticmethod + def _get_max_workers_for_num_tests(num_tests: int) -> int: + """Determine the maximum number of workers to which the given number of + tests can be distributed, adhering to the "at least two active-Scope + tests per worker" Rule. + + f(0) = 0 + f(1) = 1 + f(2) = 1 + f(3) = 1 + f(4) = 2 + f(5) = 2 + f(6) = 3 + f(7) = 3 + f(8) = 4 + + :param num_tests: Number of tests. + :return: The maximum number of workers to which the given number of + tests can be distributed, adhering to the "at least two active-Scope + tests per worker" Rule. + """ + if num_tests == 1: + return 1 + + return num_tests // 2 + + def _get_workers_available_for_distribution( + self, scope_id: str) -> list[_WorkerProxy]: + """Return workers available for distribution of the given Scope. + + Available workers are non-shutting-down workers that either + * contain a single pending test which is a fence + test belonging to the given scope + * or are empty workers (no pending tests) + + ASSUMPTION: the caller is responsible for making sure that no worker + contains more than one pending test before calling this method. + + :param scope_id: The scope ID of the test Scope being distributed. + + :return: A (possibly empty) list of workers available for distribution. + """ + return [ + worker for worker in self._workers + if (not worker.shutting_down + and (worker.empty + or worker.tail_pending_test.scope_id == scope_id)) + ] + + def _get_workers_ready_for_fencing(self, scope_id: str + ) -> list[_WorkerProxy]: + """Return workers that are ready to be Fenced for the given test Scope. + + A worker that needs to be Fenced satisfies all the following conditions: + * is not shutting down + * contains exactly one pending test + * this test belongs to the given Scope. + + :param scope_id: Scope ID of the test Scope that needs to be Fenced + + :return: A (possibly empty) list of workers to Fence. + """ + return [ + worker for worker in self._workers + if (not worker.shutting_down + and worker.num_pending_tests == 1 + and worker.head_pending_test.scope_id == scope_id) + ] + + def _do_two_nodes_have_same_collection( + self, + reference_node: WorkerController, + reference_collection: tuple[str], + node: WorkerController, + collection: tuple[str]) -> bool: + """ + If collections differ, this method returns False while logging + the collection differences and posting collection errors to + pytest_collectreport hook. + + :param reference_node: Node of test collection believed to be correct. + :param reference_collection: Test collection believed to be correct. + :param node: Node of the other collection. + :param collection: The other collection to be compared with + `reference_collection` + :return: True if both nodes have collected the same test items. False + otherwise. + """ + msg = report_collection_diff( + reference_collection, collection, reference_node.gateway.id, + node.gateway.id) + if not msg: + return True + + self._log(msg) + + if self._config is not None: + # NOTE: Not sure why/when `_config` would be `None`. Copied check + # from the `loadscope` scheduler. + + report = CollectReport(node.gateway.id, 'failed', longrepr=msg, + result=[]) + self._config.hook.pytest_collectreport(report=report) + + return False + + +class _WorkerProxy: + """Our proxy of a xdist Remote Worker. + + NOTE: tests are added to the pending queue and sent to the remote worker. + NOTE: a test is removed from the pending queue when pytest-xdist controller + reports that the test has completed + """ + + def __init__(self, node: WorkerController): + """ + :param node: The corresponding xdist worker node. + """ + # node: node instance for communication with remote worker, + # provided by pytest-xdist controller + self._node: WorkerController = node + + # An ordered collection of test IDs collected by the remote worker. + # Initially None, until assigned by the Scheduler + self._collection: Optional[tuple[str]] = None + + self._pending_test_by_index: \ + OrderedDict[int, _TestProxy] = OrderedDict() + + def __repr__(self): + return self.verbose_repr(verbose=False) + + @property + def node(self) -> WorkerController: + """ + :return: The corresponding xdist worker node. + """ + return self._node + + @property + def collection(self) -> Optional[tuple[str]]: + """ + :return: An ordered collection of test IDs collected by the remote + worker; `None` if the collection is not available yet. + """ + return self._collection + + @collection.setter + def collection(self, collection: tuple[str]): + """ + :param collection: An ordered collection of test IDs collected by the + remote worker. Must not be `None`. Also, MUST NOT be set already. + """ + assert collection is not None, f'None test collection passed to {self}' + + assert self._collection is None, \ + f'Test collection passed when one already exists to {self}' + + self._collection = collection + + @property + def pending_tests(self) -> ValuesView[_TestProxy]: + """Pending tests""" + return self._pending_test_by_index.values() + + @property + def head_pending_test(self) -> _TestProxy: + """ + :return: The head pending test + + :raise StopIteration: If there are no pending tests + """ + return next(iter(self.pending_tests)) + + @property + def tail_pending_test(self) -> _TestProxy: + """ + :return: The tail pending test + + :raise StopIteration: If there are no pending tests + """ + return next(reversed(self.pending_tests)) + + @property + def empty(self) -> bool: + """ + `True` if no tests have been enqueued for this worker + `False` is at least one Test remains on the pending queue + """ + return not self._pending_test_by_index + + @property + def num_pending_tests(self) -> int: + """Count of tests in the pending queue + """ + return len(self._pending_test_by_index) + + @property + def shutting_down(self) -> bool: + """ + :return: `True` if the worker is already down or shutdown was sent to + the remote worker; `False` otherwise. + """ + return self._node.shutting_down + + def verbose_repr(self, verbose: bool = True) -> str: + """Return a possibly verbose `repr` of the instance. + + :param verbose: `True` to return verbose `repr`; `False` for terse + `repr` content. Defaults to `True`. + + :return: `repr` of the instance. + """ + items = [ + '<', + f'{self.__class__.__name__}:', + f'{self._node}', + f'shutting_down={self.shutting_down}', + f'num_pending={self.num_pending_tests}' + ] + + if verbose: + if self.num_pending_tests == 1: + items.append( + f'head_scope_id={self.head_pending_test.scope_id}') + if self.num_pending_tests > 1: + items.append( + f'tail_scope_id={self.tail_pending_test.scope_id}') + + items.append('>') + + return ' '.join(items) + + def run_some_tests(self, tests: Iterable[_TestProxy]): + """ + Add given tests to the pending queue and + send their indexes to the remote worker + """ + self._node.send_runtest_some([test.test_index for test in tests]) + self._pending_test_by_index.update((t.test_index, t) for t in tests) + + def handle_test_completion(self, test_index: int): + """Remove completed test from the worker's pending tests. + + :param test_index: The stable index of the corresponding test. + """ + # Test assumption: tests should be completed in the order they are sent + # to the remote worker + head_pending_test_index = next(iter(self._pending_test_by_index.keys())) + + # Completion should be reported in same order the tests were sent to + # the remote worker + assert head_pending_test_index == test_index, \ + f'{head_pending_test_index=} != {test_index}' + + # Remove the test from the worker's pending queue + self._pending_test_by_index.pop(test_index) + + def release_pending_tests(self) -> list[_TestProxy]: + """Reset the worker's pending tests, returning those pending tests. + + :return: A (possibly empty) list of pending tests. + """ + pending_tests = list(self.pending_tests) + self._pending_test_by_index.clear() + return pending_tests + + def shutdown(self): + """ + Send the "shutdown" message to the remote worker. This + will cause the remote worker to shut down after executing + any remaining pending tests assigned to it. + """ + self._node.shutdown() + + +class _TestProxy: + """ + Represents a single test from the overall test + collection to be executed + """ + + # There can be a large number of tests, so economize memory by declaring + # `__slots__` (see https://wiki.python.org/moin/UsingSlots) + __slots__ = ('test_id', 'test_index',) + + def __init__(self, test_id: str, test_index: int): + """ + :param test_id: Test ID of this test; + :param test_index: The stable index of the corresponding test + for assigning to remote worker. + + """ + self.test_id: str = test_id + self.test_index: int = test_index + + def __repr__(self): + return f'<{self.__class__.__name__}: test_index={self.test_index} ' \ + f'scope_id={self.scope_id} test_id={self.test_id}>' + + @property + def scope_id(self) -> str: + """Scope ID to which this test belongs. + """ + return DistScopeIsoScheduling.split_scope(self.test_id) + + +class _ScopeWorkset: + """ + Ordered collection of Tests for the given scope + """ + + __slots__ = ('scope_id', '_high_water', '_test_by_index',) + + def __init__(self, scope_id: str): + """ + :param scope_id: Test Scope to which the tests in this workset belong; + """ + self.scope_id = scope_id + + # High watermark for number of tests in the workset + self._high_water: int = 0 + + self._test_by_index: OrderedDict[int, _TestProxy] = OrderedDict() + + def __repr__(self): + return f'<{self.__class__.__name__}: scope_id={self.scope_id} ' \ + f'num_tests={self.num_tests} high_water={self.high_water}>' + + @property + def empty(self) -> bool: + """`True` if workset is empty; `False` otherwise.""" + return not self._test_by_index + + @property + def high_water(self) -> int: + """ + :return: High Watermark of the number of tests in the workset. + """ + return self._high_water + + @property + def num_tests(self) -> int: + """Number of tests in this workset""" + return len(self._test_by_index) + + def enqueue_test(self, test: _TestProxy): + """Append given test to ordered test collection""" + assert test.scope_id == self.scope_id, \ + f'Wrong {test.scope_id=} for {self}' + + assert test.test_index not in self._test_by_index, \ + f'{test.test_index=} was already assigned to {self}' + + self._test_by_index[test.test_index] = test + + # Update high watermark + new_num_tests = len(self._test_by_index) + if new_num_tests > self._high_water: + self._high_water = new_num_tests + + def dequeue_tests(self, num_tests: int) -> list[_TestProxy]: + """ + Remove and return the given number of tests from the head of the + collection. + + :param num_tests: a positive number of tests to dequeue; must not exceed + available tests. + @raise IndexError: If `num_tests` exceeds available tests. + """ + assert num_tests > 0, f'Non-positive {num_tests=} requested.' + + if num_tests > len(self._test_by_index): + raise IndexError( + f'{num_tests=} exceeds {len(self._test_by_index)=}') + + key_iter = iter(self._test_by_index.keys()) + test_indexes_to_dequeue = [next(key_iter) for _ in range(num_tests)] + + return [self._test_by_index.pop(test_index) + for test_index in test_indexes_to_dequeue] + + +class _WorksetQueue: + """Ordered collection of Scope Worksets grouped by scope id.""" + + def __init__(self): + self._workset_by_scope: OrderedDict[str, _ScopeWorkset] = OrderedDict() + + def __repr__(self): + return f'<{self.__class__.__name__}: ' \ + f'num_worksets={len(self._workset_by_scope)}>' + + @property + def empty(self) -> bool: + """`True` if work queue is empty; `False` otherwise.""" + return not self._workset_by_scope + + @property + def head_workset(self) -> _ScopeWorkset: + """ + :return: The head workset + + :raise StopIteration: If the Workset Queue is empty + """ + return next(iter(self.worksets)) + + @property + def worksets(self) -> ValuesView[_ScopeWorkset]: + """ + :return: An iterable of this queue's ordered collection of + `_ScopeWorkset` instances. + """ + return self._workset_by_scope.values() + + def add_test(self, test: _TestProxy): + """Adds given test to its Scope Workset, creating the corresponding + workset as needed. Newly-created Worksets are always added at + the end of the Workset Queue(appended). + """ + scope_id = test.scope_id + + if (workset := self._workset_by_scope.get(scope_id)) is not None: + # Add to an existing Scope Workset + workset.enqueue_test(test) + else: + # Create a new Scope Workset + new_workset = _ScopeWorkset(scope_id=scope_id) + new_workset.enqueue_test(test) + self._workset_by_scope[scope_id] = new_workset + + def dequeue_workset(self) -> _ScopeWorkset: + """Dequeue and return the scope workset at the head of the queue. + + @raise IndexError: If queue is empty. + """ + if self.empty: + raise IndexError('Attempted dequeue from empty Workset Queue.') + + return self._workset_by_scope.pop( + next(iter(self._workset_by_scope.keys()))) From ed63fe4e78d037d9f87de1d289831d5ff9c4d889 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 6 Sep 2024 18:34:13 -0400 Subject: [PATCH 02/52] Integrate isoscope scheduling and distributed sccope isolation into xdist. Not tested yet. --- CHANGELOG.rst | 7 ++ docs/distribution.rst | 13 +++ pyproject.toml | 2 + src/xdist/dsession.py | 3 + src/xdist/iso_scheduling_plugin.py | 156 +++++++++++++++-------------- src/xdist/iso_scheduling_utils.py | 78 ++++++++------- src/xdist/plugin.py | 9 ++ src/xdist/scheduler/__init__.py | 1 + src/xdist/scheduler/isoscope.py | 99 +++++++++++------- 9 files changed, 220 insertions(+), 148 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2f29201a..53b482a0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,10 @@ +pytest-xdist 3.ZZZ.ZZZ (2024-zz-zz) +=============================== + +Features +-------- +- `#1126 `_: New ``isoscope`` scheduler. + pytest-xdist 3.6.1 (2024-04-28) =============================== diff --git a/docs/distribution.rst b/docs/distribution.rst index ebbfa088..b00b44a0 100644 --- a/docs/distribution.rst +++ b/docs/distribution.rst @@ -49,6 +49,19 @@ The test distribution algorithm is configured with the ``--dist`` command-line o .. _distribution modes: +* ``--dist isoscope``: Scope Isolation Scheduler. Tests are grouped by module for + test functions and by class for test methods. Tests are executed one group at a + time, distributed across available workers. This groupwise isolation guarantees + that all tests in one group complete execution before running another group of + tests. This can be useful when module-level or class-level fixtures of one group + could create undesirable side-effects for tests in other test groups, while + taking advantage of distributed execution of tests within each group. Grouping + by class takes priority over grouping by module. NOTE: the use of this scheduler + requires distributed coordination for setup and teardown such as provided by + the ``iso_scheduling`` fixture or an alternate implementation of distributed + coordination - see the ``iso_scheduling.coordinate_setup_teardown`` usage example + in iso_scheduling_plugin.py. + * ``--dist load`` **(default)**: Sends pending tests to any worker that is available, without any guaranteed order. Scheduling can be fine-tuned with the `--maxschedchunk` option, see output of `pytest --help`. diff --git a/pyproject.toml b/pyproject.toml index 70cb2b00..c6f703d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ classifiers = [ requires-python = ">=3.8" dependencies = [ "execnet>=2.1", + "filelock>=3.13.1", "pytest>=7.0.0", ] dynamic = ["version"] @@ -48,6 +49,7 @@ Tracker = "https://github.com/pytest-dev/pytest-xdist/issues" [project.entry-points.pytest11] xdist = "xdist.plugin" +"xdist.iso_scheduling_plugin" = "xdist.iso_scheduling_plugin" "xdist.looponfail" = "xdist.looponfail" [project.optional-dependencies] diff --git a/src/xdist/dsession.py b/src/xdist/dsession.py index 62079a28..c27390bd 100644 --- a/src/xdist/dsession.py +++ b/src/xdist/dsession.py @@ -15,6 +15,7 @@ from xdist.remote import Producer from xdist.remote import WorkerInfo from xdist.scheduler import EachScheduling +from xdist.scheduler import IsoScopeScheduling from xdist.scheduler import LoadFileScheduling from xdist.scheduler import LoadGroupScheduling from xdist.scheduler import LoadScheduling @@ -113,6 +114,8 @@ def pytest_xdist_make_scheduler( dist = config.getvalue("dist") if dist == "each": return EachScheduling(config, log) + if dist == "isoscope": + return IsoScopeScheduling(config, log) if dist == "load": return LoadScheduling(config, log) if dist == "loadscope": diff --git a/src/xdist/iso_scheduling_plugin.py b/src/xdist/iso_scheduling_plugin.py index 8243199b..8567d7c5 100644 --- a/src/xdist/iso_scheduling_plugin.py +++ b/src/xdist/iso_scheduling_plugin.py @@ -22,9 +22,9 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -"""Pytest Fixtures for supporting the PARALLEL_MONO_SCOPE Test Distribution Mode. +"""Pytest Fixtures for supporting users of isoscope scheduling. -NOTE: These fixtures are NOT compatible with any other Test Distribution Modes. +NOTE: These fixtures are NOT compatible with any other xdist schedulers. NOTE: DO NOT IMPORT this module. It needs to be loaded via pytest's `conftest.pytest_plugins` mechanism. Pytest doc discourages importing fixtures @@ -46,8 +46,8 @@ import filelock import pytest -from utils.common.parallel_mono_scope_utils import ( - ParallelMonoScopeFixture, +from xdist.iso_scheduling_utils import ( + IsoSchedulingFixture, DistributedSetupCoordinator, DistributedSetupContext, DistributedTeardownContext, @@ -63,16 +63,24 @@ @pytest.fixture(scope='session') -def parallel_mono_scope( - tmp_path_factory: pytest.TempPathFactory, - testrun_uid: str, - worker_id: str - ) -> ParallelMonoScopeFixture: +def iso_scheduling( + tmp_path_factory: pytest.TempPathFactory, + testrun_uid: str, + worker_id: str +) -> IsoSchedulingFixture: """A session-scoped pytest fixture for coordinating setup/teardown of test - scope/class which is executing in the parallel_mono_scope Test Distribution - Mode. + scope/class which is executing under isoscope scheduling. - NOTE: Each XDist remote worker is running its own Pytest Session. + Based on the filelock idea described in section + "Making session-scoped fixtures execute only once" of + https://pytest-xdist.readthedocs.io/en/stable/how-to.html. + + NOTE: Each XDist remote worker is running its own Pytest Session, so we want + only the worker that starts its session first to execute the setup logic and + only the worker that finishes its session last to execute the teardown logic + using a form of distributed coordination. This way, setup is executed exactly + once before any worker executes any of the scope's tests, and teardown is + executed only after the last worker finishes test execution. USAGE EXAMPLE: @@ -82,24 +90,23 @@ def parallel_mono_scope( import pytest if TYPE_CHECKING: - from utils.common.parallel_mono_scope_utils import ( - ParallelMonoScopeFixture, + from xdist.iso_scheduling_utils import ( + IsoSchedulingFixture, DistributedSetupContext, DistributedTeardownContext ) - @pytest.mark.parallel_mono_scope - class TestDeng12345ParallelMonoScope: + class TestSomething: @classmethod @pytest.fixture(scope='class', autouse=True) def distributed_setup_and_teardown( cls, - parallel_mono_scope: ParallelMonoScopeFixture: + iso_scheduling: IsoSchedulingFixture: request: pytest.FixtureRequest): # Distributed Setup and Teardown - with parallel_mono_scope.coordinate_setup_teardown( + with iso_scheduling.coordinate_setup_teardown( setup_request=request) as coordinator: # Distributed Setup coordinator.maybe_call_setup(cls.patch_system_under_test) @@ -126,12 +133,13 @@ def revert_system_under_test( # Fetch state from `teardown_context.client_dir` and revert # changes made by `patch_system_under_test()`. - perms, tc_ids = generate_tests( - os.path.realpath(__file__), - TestDistributionModeEnum.PARALLEL_MONO_SCOPE) + def test_case1(self) + ... - @pytest.mark.parametrize('test_data', perms, ids=tc_ids) - def test_case(self, test_data: dict[str, dict]) + def test_case2(self) + ... + + def test_case3(self) ... ``` @@ -146,17 +154,17 @@ def test_case(self, test_data: dict[str, dict]) yields an instance of `DistributedSetupCoordinator` for the current Pytest Session. """ - return _ParallelMonoScopeFixtureImpl(tmp_path_factory=tmp_path_factory, - testrun_uid=testrun_uid, - worker_id=worker_id) + return _IsoSchedulingFixtureImpl(tmp_path_factory=tmp_path_factory, + testrun_uid=testrun_uid, + worker_id=worker_id) -class _ParallelMonoScopeFixtureImpl(ParallelMonoScopeFixture): +class _IsoSchedulingFixtureImpl(IsoSchedulingFixture): """Context manager yielding a new instance of the implementation of the `DistributedSetupCoordinator` interface. - An instance of _ParallelMonoScopeFixtureImpl is returned by our pytest - fixture `parallel_mono_scope`. + An instance of _IsoSchedulingFixtureImpl is returned by our pytest + fixture `iso_scheduling`. """ # pylint: disable=too-few-public-methods @@ -178,9 +186,9 @@ def __init__(self, @contextlib.contextmanager def coordinate_setup_teardown( - self, - setup_request: pytest.FixtureRequest - ) -> Generator[DistributedSetupCoordinator, None, None]: + self, + setup_request: pytest.FixtureRequest + ) -> Generator[DistributedSetupCoordinator, None, None]: """Context manager that yields an instance of `DistributedSetupCoordinator` for distributed coordination of Setup and Teardown. @@ -206,11 +214,11 @@ def coordinate_setup_teardown( class _DistributedSetupCoordinatorImpl(DistributedSetupCoordinator): - """Distributed scope/class setup/teardown coordination for the - `parallel_mono_scope` Test Distribution Mode. + """Distributed scope/class setup/teardown coordination for isoscope + scheduling. NOTE: do not instantiate this class directly. Use the - `parallel_mono_scope` fixture instead! + `iso_scheduling` fixture instead! """ _DISTRIBUTED_SETUP_ROOT_DIR_LINK_NAME = 'distributed_setup' @@ -248,16 +256,16 @@ def __init__(self, self._teardown_context: Optional[DistributedTeardownContext] = None def maybe_call_setup( - self, - setup_callback: Callable[[DistributedSetupContext], None], - timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC - ) -> None: + self, + setup_callback: Callable[[DistributedSetupContext], None], + timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC + ) -> None: """Invoke the Setup callback only if distributed setup has not been performed yet from any other XDist worker for your test scope. Process-safe. Call `maybe_call_setup` from the pytest setup-teardown fixture of your - `PARALLEL_MONO_SCOPE` test (typically test class) if it needs to + isoscope-scheduled test (typically test class) if it needs to initialize a resource which is common to all of its test cases which may be executing in different XDist worker processes (such as a subnet in `subnet.xml`). @@ -272,8 +280,7 @@ def maybe_call_setup( :return: An instance of `DistributedSetupContext` which MUST be passed in the corresponding call to `maybe_call_teardown`. - :raise parallel_mono_scope.CoordinationTimeoutError: If attempt to - acquire the lock times out. + :raise CoordinationTimeoutError: If attempt to acquire the lock times out. """ # `maybe_call_setup()` may be called only once per instance of # `_SetupCoordinator` @@ -298,16 +305,16 @@ def maybe_call_setup( setup_callback(self._setup_context) def maybe_call_teardown( - self, - teardown_callback: Callable[[DistributedTeardownContext], None], - timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC - ) -> None: + self, + teardown_callback: Callable[[DistributedTeardownContext], None], + timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC + ) -> None: """Invoke the Teardown callback only in when called in the context of the final XDist Worker process to have finished the execution of the tests for your test scope. Process-safe. Call `maybe_call_teardown` from the pytest setup-teardown fixture of - your `PARALLEL_MONO_SCOPE` test (typically test class) if it needs to + your isoscope-scheduled test (typically test class) if it needs to initialize a resource which is common to all of its test cases which may be executing in different XDist worker processes (such as a subnet in `subnet.xml`). @@ -320,8 +327,7 @@ def maybe_call_teardown( invoked. :param timeout: Lock acquisition timeout in seconds - :raise parallel_mono_scope.CoordinationTimeoutError: If attempt to - acquire the lock times out. + :raise CoordinationTimeoutError: If attempt to acquire the lock times out. """ # Make sure `maybe_call_setup()` was already called on this instance # of `_SetupCoordinator` @@ -359,8 +365,7 @@ def wrapper(*args, **kwargs): class _DistributedSetupCoordinationImpl: """Low-level implementation of Context Managers for Coordinating - Distributed Setup and Teardown for the `parallel_mono_scope` - Test Distribution Mode. + Distributed Setup and Teardown for users of isoscope scheduling. """ _ROOT_STATE_FILE_NAME = 'root_state.json' _ROOT_LOCK_FILE_NAME = 'lock' @@ -379,9 +384,9 @@ def __repr__(self): @classmethod def load_from_file_path( - cls, - state_file_path: pathlib.Path - ) -> _DistributedSetupCoordinationImpl.DistributedState: + cls, + state_file_path: pathlib.Path + ) -> _DistributedSetupCoordinationImpl.DistributedState: """Load the state instance from the given file path. :param state_file_path: @@ -407,7 +412,7 @@ def as_json_kwargs_dict(self) -> dict: 'teardown_count': self.teardown_count } - def save_to_file_path(self, state_file_path: pathlib.Path): + def save_to_file_path(self, state_file_path: pathlib.Path) -> None: """Save this state instance to the given file path. :param state_file_path: @@ -419,14 +424,14 @@ def save_to_file_path(self, state_file_path: pathlib.Path): @contextlib.contextmanager @_map_file_lock_exception def acquire_distributed_setup( - cls, - root_context_dir: pathlib.Path, - worker_id: str, - setup_request: pytest.FixtureRequest, - timeout: float - ) -> Generator[DistributedSetupContext, None, None]: + cls, + root_context_dir: pathlib.Path, + worker_id: str, + setup_request: pytest.FixtureRequest, + timeout: float + ) -> Generator[DistributedSetupContext, None, None]: """Low-level implementation of Context Manager for Coordinating - Distributed Setup for the `parallel_mono_scope` Test Distribution Mode. + Distributed Setup for isoscope scheduling. :param root_context_dir: Scope/class-specific root directory for saving this context manager's state. This directory is common to @@ -436,8 +441,7 @@ def acquire_distributed_setup( directly by the calling setup-teardown fixture. :param timeout: Lock acquisition timeout in seconds - :raise parallel_mono_scope.CoordinationTimeoutError: If attempt to - acquire the lock times out. + :raise CoordinationTimeoutError: If attempt to acquire the lock times out. """ # # Before control passes to the managed code block @@ -497,21 +501,19 @@ def acquire_distributed_setup( @contextlib.contextmanager @_map_file_lock_exception def acquire_distributed_teardown( - cls, - setup_context: DistributedSetupContext, - timeout: float - ) -> Generator[DistributedTeardownContext, None, None]: + cls, + setup_context: DistributedSetupContext, + timeout: float + ) -> Generator[DistributedTeardownContext, None, None]: """Low-level implementation of Context Manager for Coordinating - Distributed Teardown for the `parallel_mono_scope` Test Distribution - Mode. + Distributed Teardown for the isoscope scheduling. :param setup_context: The instance of `DistributedSetupContext` that was yielded by the corresponding use of the `_distributed_setup_permission` context manager. :param timeout: Lock acquisition timeout in seconds - :raise parallel_mono_scope.CoordinationTimeoutError: If attempt to - acquire the lock times out. + :raise CoordinationTimeoutError: If attempt to acquire the lock times out. """ # # Before control passes to the managed code block @@ -571,8 +573,9 @@ def acquire_distributed_teardown( @classmethod def _get_root_state_file_path( - cls, - root_state_dir: pathlib.Path) -> pathlib.Path: + cls, + root_state_dir: pathlib.Path + ) -> pathlib.Path: """Return the path of the file for storing the root state, creating all parent directories if they don't exist yet. @@ -584,8 +587,9 @@ def _get_root_state_file_path( @classmethod def _get_root_lock_file_path( - cls, - root_lock_dir: pathlib.Path) -> pathlib.Path: + cls, + root_lock_dir: pathlib.Path + ) -> pathlib.Path: """Return the path of the lock file, creating all parent directories if they don't exist yet. diff --git a/src/xdist/iso_scheduling_utils.py b/src/xdist/iso_scheduling_utils.py index 86c65d79..3d61883d 100644 --- a/src/xdist/iso_scheduling_utils.py +++ b/src/xdist/iso_scheduling_utils.py @@ -22,12 +22,11 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -"""Utilities for supporting the `PARALLEL_MONO_SCOPE` Test Distribution Mode. +"""Utilities for supporting isoscope scheduling. -NOTE: These utilities are NOT compatible with any other Test Distribution Modes. +NOTE: These utilities are NOT compatible with any other xdist schedulers. -See also `plugins_common/parallel_mono_scope_plugin.py` for fixtures specific to -the `PARALLEL_MONO_SCOPE` Test Distribution Mode. +See also `iso_scheduling_plugin.py` for fixtures specific to isoscope scheduling. """ from __future__ import annotations @@ -44,9 +43,9 @@ class CoordinationTimeoutError(Exception): """When attempt to acquire the distributed lock times out.""" -class ParallelMonoScopeFixture(abc.ABC): +class IsoSchedulingFixture(abc.ABC): """Interface of the context manager which is returned by our pytest fixture - `parallel_mono_scope`. + `iso_scheduling`. An instance of the implementation of this interface is a context manager which yields an instance of the implementation of the @@ -56,9 +55,9 @@ class ParallelMonoScopeFixture(abc.ABC): @abc.abstractmethod def coordinate_setup_teardown( - self, - setup_request: pytest.FixtureRequest - ) -> Generator[DistributedSetupCoordinator, None, None]: + self, + setup_request: pytest.FixtureRequest + ) -> Generator[DistributedSetupCoordinator, None, None]: """Context manager that yields an instance of `DistributedSetupCoordinator` for distributed coordination of Setup and Teardown. @@ -72,39 +71,46 @@ def coordinate_setup_teardown( class DistributedSetupCoordinator(abc.ABC): - """Interface for use with the `parallel_mono_scope` fixture for + """Interface for use with the `iso_scheduling` fixture for distributed coordination of Setup and Teardown workflows. For example, inserting a subnet into `subnet.xml` and reverting it upon Teardown. - The `parallel_mono_scope` fixture returns an implementation of this - interface. See the `parallel_mono_scope` fixture in - `plugins_common/parallel_mono_scope_plugin.py` for additional information. + The `iso_scheduling` fixture returns an implementation of this + interface. See the `iso_scheduling` fixture in + `iso_scheduling_plugin.py` for additional information. + + NOTE: Each XDist remote worker is running its own Pytest Session, so we want + only the worker that starts its session first to execute the setup logic and + only the worker that finishes its session last to execute the teardown logic + using a form of distributed coordination. This way, setup is executed exactly + once before any worker executes any of the scope's tests, and teardown is + executed only after the last worker finishes test execution. USAGE EXAMPLE: + ``` from __future__ import annotations from typing import TYPE_CHECKING import pytest if TYPE_CHECKING: - from utils.common.parallel_mono_scope_utils import ( - ParallelMonoScopeFixture, + from xdist.iso_scheduling_utils import ( + IsoSchedulingFixture, DistributedSetupContext, DistributedTeardownContext ) - @pytest.mark.parallel_mono_scope - class TestDeng12345ParallelMonoScope: + class TestSomething: @classmethod @pytest.fixture(scope='class', autouse=True) def distributed_setup_and_teardown( cls, - parallel_mono_scope: ParallelMonoScopeFixture: + iso_scheduling: IsoSchedulingFixture: request: pytest.FixtureRequest): # Distributed Setup and Teardown - with parallel_mono_scope.coordinate_setup_teardown( + with iso_scheduling.coordinate_setup_teardown( setup_request=request) as coordinator: # Distributed Setup coordinator.maybe_call_setup(cls.patch_system_under_test) @@ -131,12 +137,13 @@ def revert_system_under_test( # Fetch state from `teardown_context.client_dir` and revert # changes made by `patch_system_under_test()`. - perms, tc_ids = generate_tests( - os.path.realpath(__file__), - TestDistributionModeEnum.PARALLEL_MONO_SCOPE) + def test_case1(self) + ... + + def test_case2(self) + ... - @pytest.mark.parametrize('test_data', perms, ids=tc_ids) - def test_case(self, test_data: dict[str, dict]) + def test_case3(self) ... ``` """ @@ -146,15 +153,16 @@ def test_case(self, test_data: dict[str, dict]) @abc.abstractmethod def maybe_call_setup( - self, - setup_callback: Callable[[DistributedSetupContext], None], - timeout: float = DEFAULT_TIMEOUT_SEC) -> None: + self, + setup_callback: Callable[[DistributedSetupContext], None], + timeout: float = DEFAULT_TIMEOUT_SEC + ) -> None: """Invoke the Setup callback only if distributed setup has not been performed yet from any other XDist worker for your test scope. Process-safe. Call `maybe_call_setup` from the pytest setup-teardown fixture of your - `PARALLEL_MONO_SCOPE` test (typically test class) if it needs to + isoscope-scheduled test (typically test class) if it needs to initialize a resource which is common to all of its test cases which may be executing in different XDist worker processes (such as a subnet in `subnet.xml`). @@ -175,16 +183,16 @@ def maybe_call_setup( @abc.abstractmethod def maybe_call_teardown( - self, - teardown_callback: Callable[[DistributedTeardownContext], None], - timeout: float = DEFAULT_TIMEOUT_SEC - ) -> None: + self, + teardown_callback: Callable[[DistributedTeardownContext], None], + timeout: float = DEFAULT_TIMEOUT_SEC + ) -> None: """Invoke the Teardown callback only in when called in the context of the final XDist Worker process to have finished the execution of the tests for your test scope. Process-safe. Call `maybe_call_teardown` from the pytest setup-teardown fixture of - your `PARALLEL_MONO_SCOPE` test (typically test class) if it needs to + your isoscope-scheduled test (typically test class) if it needs to initialize a resource which is common to all of its test cases which may be executing in different XDist worker processes (such as a subnet in `subnet.xml`). @@ -293,13 +301,13 @@ def __init__(self, self.distributed_teardown_allowed = teardown_allowed # NOTE: Friend-of-class protected member access - self._root_context_dir = setup_context._root_context_dir + self._root_context_dir = setup_context._root_context_dir # pylint: disable=protected-access # XDist worker ID which is executing tests in the current process self.worker_id = setup_context.worker_id # NOTE: Friend-of-class protected member access - self._setup_node_name = setup_context._setup_node_name + self._setup_node_name = setup_context._setup_node_name # pylint: disable=protected-access def __repr__(self) -> str: return ( diff --git a/src/xdist/plugin.py b/src/xdist/plugin.py index f670d9de..b68b7811 100644 --- a/src/xdist/plugin.py +++ b/src/xdist/plugin.py @@ -103,6 +103,7 @@ def pytest_addoption(parser: pytest.Parser) -> None: action="store", choices=[ "each", + "isoscope", "load", "loadscope", "loadfile", @@ -115,6 +116,14 @@ def pytest_addoption(parser: pytest.Parser) -> None: help=( "Set mode for distributing tests to exec environments.\n\n" "each: Send each test to all available environments.\n\n" + "isoscope: Scope Isolation Scheduler." + " Tests are grouped by module for test functions and by class for test methods." + " Tests are executed one group at a time, distributed across available workers. This " + " groupwise isolation guarantees that all tests in one group complete execution before" + " running another group of tests. This can be useful when module-level or class-level" + " fixtures of one group could create undesirable side-effects for tests in other test groups," + " while taking advantage of distributed execution of tests within each group. Grouping by" + " class takes priority over grouping by module.\n\n" "load: Load balance by sending any pending test to any" " available environment.\n\n" "loadscope: Load balance by sending pending groups of tests in" diff --git a/src/xdist/scheduler/__init__.py b/src/xdist/scheduler/__init__.py index b4894732..f11ef5ca 100644 --- a/src/xdist/scheduler/__init__.py +++ b/src/xdist/scheduler/__init__.py @@ -1,4 +1,5 @@ from xdist.scheduler.each import EachScheduling as EachScheduling +from xdist.scheduler.isoscope import IsoScopeScheduling as IsoScopeScheduling from xdist.scheduler.load import LoadScheduling as LoadScheduling from xdist.scheduler.loadfile import LoadFileScheduling as LoadFileScheduling from xdist.scheduler.loadgroup import LoadGroupScheduling as LoadGroupScheduling diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 83ae10a4..4f132423 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -37,8 +37,7 @@ next test scope/class. Credits: -* Implementation of `_split_scope()` and public method documentation in - DistScopeIsoScheduling: +* Implementation of `_split_scope()` and public method documentation: - borrowed from the builtin `loadscope` scheduler """ # pylint: disable=too-many-lines from __future__ import annotations @@ -56,13 +55,13 @@ if TYPE_CHECKING: - from typing import Optional + from typing import NoReturn, Optional, Sequence from collections.abc import Generator, Iterable, ValuesView import xdist.remote from xdist.workermanage import WorkerController -class DistScopeIsoScheduling: # pylint: disable=too-many-instance-attributes +class IsoScopeScheduling: # pylint: disable=too-many-instance-attributes """Distributed Scope Isolation Scheduling: Implement scheduling across remote workers, distributing and executing one scope at a time, such that each scope is executed in isolation from tests in other scopes. @@ -111,13 +110,15 @@ class _State(str, enum.Enum): # `WAIT_READY_TO_ACTIVATE_SCOPE`. FENCE = 'FENCE' - def __init__(self, config: pytest.Config, - log: xdist.remote.Producer): + def __init__( + self, + config: pytest.Config, + log: xdist.remote.Producer): self._config = config self._log: xdist.remote.Producer = log.distscopeisosched # Current scheduling state - self._state: DistScopeIsoScheduling._State = \ + self._state: IsoScopeScheduling._State = \ self._State.WAIT_READY_TO_ACTIVATE_SCOPE # Scope ID of tests that are currently executing; `None` prior to the @@ -229,7 +230,7 @@ def has_pending(self) -> bool: return False - def add_node(self, node: WorkerController): + def add_node(self, node: WorkerController) -> None: """Add a new node to the scheduler's pending worker collection. The node will be activated and assigned tests to be executed only after @@ -316,8 +317,11 @@ def remove_node(self, node: WorkerController) -> Optional[str]: return crashed_test_id - def add_node_collection(self, node: WorkerController, - collection: list[str]): + def add_node_collection( + self, + node: WorkerController, + collection: Sequence[str] + ) -> None: """Register the collected test items from a Remote Worker node. If the official test collection has been established already, validate @@ -421,8 +425,12 @@ def add_node_collection(self, node: WorkerController, for test in shuffled_test_collection: self._workset_queue.add_test(test) - def mark_test_complete(self, node: WorkerController, item_index: int, - duration): + def mark_test_complete( + self, + node: WorkerController, + item_index: int, + duration: float + ) -> None: """Mark test item as completed by node and remove from pending tests in the worker and reschedule. @@ -444,11 +452,18 @@ def mark_test_complete(self, node: WorkerController, item_index: int, self._reschedule_workers() - def mark_test_pending(self, item): + def mark_test_pending(self, item: str) -> NoReturn: """Not supported""" raise NotImplementedError() - def schedule(self): + def remove_pending_tests_from_node( + self, + node: WorkerController, + indices: Sequence[int], + ) -> NoReturn: + raise NotImplementedError() + + def schedule(self) -> None: """Initiate distribution of the test collection. Initiate scheduling of the items across the nodes. If this gets called @@ -497,7 +512,7 @@ def _workers(self) -> Iterable[_WorkerProxy]: """ return self._worker_by_node.values() - def _reschedule_workers(self): + def _reschedule_workers(self) -> None: """Distribute work to workers if needed at this time. """ assert self._state is not None @@ -526,7 +541,7 @@ def _reschedule_workers(self): else: raise RuntimeError(f'Unhandled state: {self._state}') - def _handle_state_wait_ready_to_activate_scope(self): + def _handle_state_wait_ready_to_activate_scope(self) -> None: """Handle the `WAIT_READY_TO_ACTIVATE_SCOPE` state. Waiting for scheduler to be ready to distribute the next Scope. When @@ -576,7 +591,7 @@ def _handle_state_wait_ready_to_activate_scope(self): self._log(f'Transitioned from {str(previous_state)} to ' f'{str(self._state)}') - def _handle_state_activate_scope(self): + def _handle_state_activate_scope(self) -> None: """Handle the `ACTIVATE_SCOPE` state. Activate (i.e., distribute) tests from the next Scope, if any. If we @@ -620,7 +635,7 @@ def _handle_state_activate_scope(self): f'{str(self._state)}. ' f'Activated scope={self._active_scope_id}') - def _handle_state_wait_ready_to_fence(self): + def _handle_state_wait_ready_to_fence(self) -> None: """Handle the `WAIT_READY_TO_FENCE` state. Waiting for scheduler to be ready to fence the active (i.e., @@ -663,7 +678,7 @@ def _handle_state_wait_ready_to_fence(self): self._log(f'Transitioned from {str(previous_state)} to ' f'{str(self._state)}') - def _handle_state_fence(self): + def _handle_state_fence(self) -> None: """Handle the `FENCE` state. Fence the workers containing the final active-Scope tests in @@ -715,8 +730,11 @@ def _handle_state_fence(self): self._log(f'Transitioned from {str(previous_state)} to ' f'{str(self._state)}') - def _distribute_workset(self, workset: _ScopeWorkset, - workers: list[_WorkerProxy]): + def _distribute_workset( + self, + workset: _ScopeWorkset, + workers: list[_WorkerProxy] + ) -> None: """Distribute the tests in the given workset to the given workers. Adhere to the "at least two active-Scope tests per worker" Rule. @@ -817,8 +835,10 @@ def _distribute_workset(self, workset: _ScopeWorkset, f'workers: {workers[:num_workers_to_use]}.' @classmethod - def _generate_fence_items(cls, source_worksets: Iterable[_ScopeWorkset] - ) -> Generator[Optional[_TestProxy], None, None]: + def _generate_fence_items( + cls, + source_worksets: Iterable[_ScopeWorkset] + ) -> Generator[Optional[_TestProxy], None, None]: """Generator that withdraws (i.e., dequeues) Fence test items from the given ordered Scope Worksets and yields them until it runs out of the fence items per limits described below, and will thereafter yield @@ -921,7 +941,9 @@ def _get_max_workers_for_num_tests(num_tests: int) -> int: return num_tests // 2 def _get_workers_available_for_distribution( - self, scope_id: str) -> list[_WorkerProxy]: + self, + scope_id: str + ) -> list[_WorkerProxy]: """Return workers available for distribution of the given Scope. Available workers are non-shutting-down workers that either @@ -943,8 +965,10 @@ def _get_workers_available_for_distribution( or worker.tail_pending_test.scope_id == scope_id)) ] - def _get_workers_ready_for_fencing(self, scope_id: str - ) -> list[_WorkerProxy]: + def _get_workers_ready_for_fencing( + self, + scope_id: str + ) -> list[_WorkerProxy]: """Return workers that are ready to be Fenced for the given test Scope. A worker that needs to be Fenced satisfies all the following conditions: @@ -964,11 +988,12 @@ def _get_workers_ready_for_fencing(self, scope_id: str ] def _do_two_nodes_have_same_collection( - self, - reference_node: WorkerController, - reference_collection: tuple[str], - node: WorkerController, - collection: tuple[str]) -> bool: + self, + reference_node: WorkerController, + reference_collection: tuple[str], + node: WorkerController, + collection: tuple[str, ...] + ) -> bool: """ If collections differ, this method returns False while logging the collection differences and posting collection errors to @@ -1128,7 +1153,7 @@ def verbose_repr(self, verbose: bool = True) -> str: return ' '.join(items) - def run_some_tests(self, tests: Iterable[_TestProxy]): + def run_some_tests(self, tests: Iterable[_TestProxy]) -> None: """ Add given tests to the pending queue and send their indexes to the remote worker @@ -1136,7 +1161,7 @@ def run_some_tests(self, tests: Iterable[_TestProxy]): self._node.send_runtest_some([test.test_index for test in tests]) self._pending_test_by_index.update((t.test_index, t) for t in tests) - def handle_test_completion(self, test_index: int): + def handle_test_completion(self, test_index: int) -> None: """Remove completed test from the worker's pending tests. :param test_index: The stable index of the corresponding test. @@ -1162,7 +1187,7 @@ def release_pending_tests(self) -> list[_TestProxy]: self._pending_test_by_index.clear() return pending_tests - def shutdown(self): + def shutdown(self) -> None: """ Send the "shutdown" message to the remote worker. This will cause the remote worker to shut down after executing @@ -1199,7 +1224,7 @@ def __repr__(self): def scope_id(self) -> str: """Scope ID to which this test belongs. """ - return DistScopeIsoScheduling.split_scope(self.test_id) + return IsoScopeScheduling.split_scope(self.test_id) class _ScopeWorkset: @@ -1241,7 +1266,7 @@ def num_tests(self) -> int: """Number of tests in this workset""" return len(self._test_by_index) - def enqueue_test(self, test: _TestProxy): + def enqueue_test(self, test: _TestProxy) -> None: """Append given test to ordered test collection""" assert test.scope_id == self.scope_id, \ f'Wrong {test.scope_id=} for {self}' @@ -1310,7 +1335,7 @@ def worksets(self) -> ValuesView[_ScopeWorkset]: """ return self._workset_by_scope.values() - def add_test(self, test: _TestProxy): + def add_test(self, test: _TestProxy) -> None: """Adds given test to its Scope Workset, creating the corresponding workset as needed. Newly-created Worksets are always added at the end of the Workset Queue(appended). From 329f2182c67ad27a6a6e5c1c0b588874432bf006 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 22:58:52 +0000 Subject: [PATCH 03/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/xdist/iso_scheduling_plugin.py | 225 +++++++------- src/xdist/iso_scheduling_utils.py | 61 ++-- src/xdist/scheduler/isoscope.py | 466 ++++++++++++++++------------- 3 files changed, 402 insertions(+), 350 deletions(-) diff --git a/src/xdist/iso_scheduling_plugin.py b/src/xdist/iso_scheduling_plugin.py index 8567d7c5..1da796ac 100644 --- a/src/xdist/iso_scheduling_plugin.py +++ b/src/xdist/iso_scheduling_plugin.py @@ -34,39 +34,38 @@ is not recommended: importing fixtures into a module will register them in pytest as defined in that module". """ + from __future__ import annotations import contextlib import functools -import logging import json +import logging import pathlib from typing import TYPE_CHECKING import filelock import pytest -from xdist.iso_scheduling_utils import ( - IsoSchedulingFixture, - DistributedSetupCoordinator, - DistributedSetupContext, - DistributedTeardownContext, - CoordinationTimeoutError -) +from xdist.iso_scheduling_utils import CoordinationTimeoutError +from xdist.iso_scheduling_utils import DistributedSetupContext +from xdist.iso_scheduling_utils import DistributedSetupCoordinator +from xdist.iso_scheduling_utils import DistributedTeardownContext +from xdist.iso_scheduling_utils import IsoSchedulingFixture + if TYPE_CHECKING: - from collections.abc import Callable, Generator + from collections.abc import Callable + from collections.abc import Generator from typing import Optional _LOGGER = logging.getLogger(__name__) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def iso_scheduling( - tmp_path_factory: pytest.TempPathFactory, - testrun_uid: str, - worker_id: str + tmp_path_factory: pytest.TempPathFactory, testrun_uid: str, worker_id: str ) -> IsoSchedulingFixture: """A session-scoped pytest fixture for coordinating setup/teardown of test scope/class which is executing under isoscope scheduling. @@ -154,9 +153,9 @@ def test_case3(self) yields an instance of `DistributedSetupCoordinator` for the current Pytest Session. """ - return _IsoSchedulingFixtureImpl(tmp_path_factory=tmp_path_factory, - testrun_uid=testrun_uid, - worker_id=worker_id) + return _IsoSchedulingFixtureImpl( + tmp_path_factory=tmp_path_factory, testrun_uid=testrun_uid, worker_id=worker_id + ) class _IsoSchedulingFixtureImpl(IsoSchedulingFixture): @@ -166,12 +165,12 @@ class _IsoSchedulingFixtureImpl(IsoSchedulingFixture): An instance of _IsoSchedulingFixtureImpl is returned by our pytest fixture `iso_scheduling`. """ + # pylint: disable=too-few-public-methods - def __init__(self, - tmp_path_factory: pytest.TempPathFactory, - testrun_uid: str, - worker_id: str): + def __init__( + self, tmp_path_factory: pytest.TempPathFactory, testrun_uid: str, worker_id: str + ): """ :param tmp_path_factory: pytest interface for temporary directories. :param testrun_uid: Unique id of the current test run. This value is @@ -186,8 +185,7 @@ def __init__(self, @contextlib.contextmanager def coordinate_setup_teardown( - self, - setup_request: pytest.FixtureRequest + self, setup_request: pytest.FixtureRequest ) -> Generator[DistributedSetupCoordinator, None, None]: """Context manager that yields an instance of `DistributedSetupCoordinator` for distributed coordination of Setup @@ -204,7 +202,8 @@ def coordinate_setup_teardown( setup_request=setup_request, tmp_path_factory=self._tmp_path_factory, testrun_uid=self._testrun_uid, - worker_id=self._worker_id) + worker_id=self._worker_id, + ) # Yield control to the managed code block yield coordinator @@ -221,13 +220,16 @@ class _DistributedSetupCoordinatorImpl(DistributedSetupCoordinator): `iso_scheduling` fixture instead! """ - _DISTRIBUTED_SETUP_ROOT_DIR_LINK_NAME = 'distributed_setup' - def __init__(self, - setup_request: pytest.FixtureRequest, - tmp_path_factory: pytest.TempPathFactory, - testrun_uid: str, - worker_id: str): + _DISTRIBUTED_SETUP_ROOT_DIR_LINK_NAME = "distributed_setup" + + def __init__( + self, + setup_request: pytest.FixtureRequest, + tmp_path_factory: pytest.TempPathFactory, + testrun_uid: str, + worker_id: str, + ): """ :param setup_request: Value of the pytest `request` fixture obtained directly by the calling setup-teardown fixture. @@ -246,9 +248,10 @@ def __init__(self, # directory. `tmp_path_factory.getbasetemp().parent` is common to all # workers in the current PyTest test run. self._root_context_base_dir: pathlib.Path = ( - tmp_path_factory.getbasetemp().parent - / self._DISTRIBUTED_SETUP_ROOT_DIR_LINK_NAME - / testrun_uid) + tmp_path_factory.getbasetemp().parent + / self._DISTRIBUTED_SETUP_ROOT_DIR_LINK_NAME + / testrun_uid + ) self._worker_id: str = worker_id @@ -258,7 +261,7 @@ def __init__(self, def maybe_call_setup( self, setup_callback: Callable[[DistributedSetupContext], None], - timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC + timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC, ) -> None: """Invoke the Setup callback only if distributed setup has not been performed yet from any other XDist worker for your test scope. @@ -284,8 +287,9 @@ def maybe_call_setup( """ # `maybe_call_setup()` may be called only once per instance of # `_SetupCoordinator` - assert self._setup_context is None, \ - f'maybe_call_setup()` already called {self._setup_context=}' + assert ( + self._setup_context is None + ), f"maybe_call_setup()` already called {self._setup_context=}" node_path = self._setup_request.node.path @@ -296,10 +300,11 @@ def maybe_call_setup( ) with _DistributedSetupCoordinationImpl.acquire_distributed_setup( - root_context_dir=root_context_dir, - worker_id=self._worker_id, - setup_request=self._setup_request, - timeout=timeout) as setup_context: + root_context_dir=root_context_dir, + worker_id=self._worker_id, + setup_request=self._setup_request, + timeout=timeout, + ) as setup_context: self._setup_context = setup_context if self._setup_context.distributed_setup_allowed: setup_callback(self._setup_context) @@ -307,7 +312,7 @@ def maybe_call_setup( def maybe_call_teardown( self, teardown_callback: Callable[[DistributedTeardownContext], None], - timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC + timeout: float = DistributedSetupCoordinator.DEFAULT_TIMEOUT_SEC, ) -> None: """Invoke the Teardown callback only in when called in the context of the final XDist Worker process to have finished the execution of the @@ -331,34 +336,36 @@ def maybe_call_teardown( """ # Make sure `maybe_call_setup()` was already called on this instance # of `_SetupCoordinator` - assert self._setup_context is not None, \ - f'maybe_call_setup() not called yet {self._setup_context=}' + assert ( + self._setup_context is not None + ), f"maybe_call_setup() not called yet {self._setup_context=}" # Make sure `maybe_call_teardown()` hasn't been called on this instance # of `_SetupCoordinator` yet - assert self._teardown_context is None, \ - f'maybe_call_teardown() already called {self._teardown_context=}' + assert ( + self._teardown_context is None + ), f"maybe_call_teardown() already called {self._teardown_context=}" with _DistributedSetupCoordinationImpl.acquire_distributed_teardown( - setup_context=self._setup_context, - timeout=timeout) as teardown_context: + setup_context=self._setup_context, timeout=timeout + ) as teardown_context: self._teardown_context = teardown_context if self._teardown_context.distributed_teardown_allowed: teardown_callback(self._teardown_context) def _map_file_lock_exception(f: Callable): - """Decorator: map `FileLock` exceptions of interest to our own exceptions. - """ + """Decorator: map `FileLock` exceptions of interest to our own exceptions.""" + @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except filelock.Timeout as err: raise CoordinationTimeoutError( - f'Another instance of this test scope/class is holding the ' - f'lock too long or timeout value is too short: {err}') \ - from err + f"Another instance of this test scope/class is holding the " + f"lock too long or timeout value is too short: {err}" + ) from err return wrapper @@ -367,25 +374,27 @@ class _DistributedSetupCoordinationImpl: """Low-level implementation of Context Managers for Coordinating Distributed Setup and Teardown for users of isoscope scheduling. """ - _ROOT_STATE_FILE_NAME = 'root_state.json' - _ROOT_LOCK_FILE_NAME = 'lock' + + _ROOT_STATE_FILE_NAME = "root_state.json" + _ROOT_LOCK_FILE_NAME = "lock" class DistributedState: - """State of the Distributed Setup-Teardown Coordination. - """ + """State of the Distributed Setup-Teardown Coordination.""" + def __init__(self, setup_count, teardown_count): self.setup_count = setup_count self.teardown_count = teardown_count def __repr__(self): - return f'<{self.__class__.__qualname__}: ' \ - f'setup_count={self.setup_count}; ' \ - f'teardown_count={self.teardown_count}>' + return ( + f"<{self.__class__.__qualname__}: " + f"setup_count={self.setup_count}; " + f"teardown_count={self.teardown_count}>" + ) @classmethod def load_from_file_path( - cls, - state_file_path: pathlib.Path + cls, state_file_path: pathlib.Path ) -> _DistributedSetupCoordinationImpl.DistributedState: """Load the state instance from the given file path. @@ -408,8 +417,8 @@ def as_json_kwargs_dict(self) -> dict: ``` """ return { - 'setup_count': self.setup_count, - 'teardown_count': self.teardown_count + "setup_count": self.setup_count, + "teardown_count": self.teardown_count, } def save_to_file_path(self, state_file_path: pathlib.Path) -> None: @@ -428,7 +437,7 @@ def acquire_distributed_setup( root_context_dir: pathlib.Path, worker_id: str, setup_request: pytest.FixtureRequest, - timeout: float + timeout: float, ) -> Generator[DistributedSetupContext, None, None]: """Low-level implementation of Context Manager for Coordinating Distributed Setup for isoscope scheduling. @@ -450,32 +459,33 @@ def acquire_distributed_setup( setup_allowed=False, root_context_dir=root_context_dir, worker_id=worker_id, - setup_request=setup_request) + setup_request=setup_request, + ) state_file_path = cls._get_root_state_file_path(root_context_dir) # Acquire resource with filelock.FileLock( - str(cls._get_root_lock_file_path(root_context_dir)), - timeout=timeout): + str(cls._get_root_lock_file_path(root_context_dir)), timeout=timeout + ): if state_file_path.is_file(): - state = cls.DistributedState.load_from_file_path( - state_file_path) + state = cls.DistributedState.load_from_file_path(state_file_path) # We never save state with setup_count <= 0 - assert state.setup_count > 0, \ - f'acquire_distributed_setup: non-positive setup ' \ - f'count read from state file - {state_file_path=}; ' \ - f'{worker_id=}; {state}' + assert state.setup_count > 0, ( + f"acquire_distributed_setup: non-positive setup " + f"count read from state file - {state_file_path=}; " + f"{worker_id=}; {state}" + ) # No Teardowns should be executing before all Setups # complete - assert state.teardown_count == 0, \ - f'acquire_distributed_setup: non-zero teardown ' \ - f'count read from state file - {state_file_path=}; ' \ - f'{worker_id=}; {state}' + assert state.teardown_count == 0, ( + f"acquire_distributed_setup: non-zero teardown " + f"count read from state file - {state_file_path=}; " + f"{worker_id=}; {state}" + ) else: # State file not created yet - state = cls.DistributedState(setup_count=0, - teardown_count=0) + state = cls.DistributedState(setup_count=0, teardown_count=0) state.setup_count += 1 @@ -485,8 +495,9 @@ def acquire_distributed_setup( # Yield control to the managed code block # _LOGGER.info( # pylint: disable=logging-fstring-interpolation - f'acquire_distributed_setup: yielding control to ' - f'managed block - {worker_id=}; {setup_context=}') + f"acquire_distributed_setup: yielding control to " + f"managed block - {worker_id=}; {setup_context=}" + ) yield setup_context # @@ -501,9 +512,7 @@ def acquire_distributed_setup( @contextlib.contextmanager @_map_file_lock_exception def acquire_distributed_teardown( - cls, - setup_context: DistributedSetupContext, - timeout: float + cls, setup_context: DistributedSetupContext, timeout: float ) -> Generator[DistributedTeardownContext, None, None]: """Low-level implementation of Context Manager for Coordinating Distributed Teardown for the isoscope scheduling. @@ -519,8 +528,8 @@ def acquire_distributed_teardown( # Before control passes to the managed code block # teardown_context = DistributedTeardownContext( - teardown_allowed=False, - setup_context=setup_context) + teardown_allowed=False, setup_context=setup_context + ) # NOTE: Friend-of-class protected member access root_context_dir = teardown_context._root_context_dir # pylint: disable=protected-access @@ -531,36 +540,40 @@ def acquire_distributed_teardown( # Acquire resource with filelock.FileLock( - str(cls._get_root_lock_file_path(root_context_dir)), - timeout=timeout): + str(cls._get_root_lock_file_path(root_context_dir)), timeout=timeout + ): if state_file_path.is_file(): - state = cls.DistributedState.load_from_file_path( - state_file_path) + state = cls.DistributedState.load_from_file_path(state_file_path) assert state.setup_count > 0, ( - f'acquire_distributed_teardown: non-positive ' - f'setup_count read from state file - {state_file_path=}; ' - f'{worker_id=}; {state.setup_count=} <= 0; {state}') + f"acquire_distributed_teardown: non-positive " + f"setup_count read from state file - {state_file_path=}; " + f"{worker_id=}; {state.setup_count=} <= 0; {state}" + ) assert state.teardown_count < state.setup_count, ( - f'acquire_distributed_teardown: teardown_count ' - f'already >= setup_count read from state file - ' - f'{state_file_path=}; {worker_id=}; ' - f'{state.teardown_count=} >= {state.setup_count=}') + f"acquire_distributed_teardown: teardown_count " + f"already >= setup_count read from state file - " + f"{state_file_path=}; {worker_id=}; " + f"{state.teardown_count=} >= {state.setup_count=}" + ) else: raise RuntimeError( - f'acquire_distributed_teardown: state file not found: ' - f'{state_file_path=}; {worker_id=}') + f"acquire_distributed_teardown: state file not found: " + f"{state_file_path=}; {worker_id=}" + ) state.teardown_count += 1 teardown_context.distributed_teardown_allowed = ( - state.teardown_count == state.setup_count) + state.teardown_count == state.setup_count + ) # # Yield control to the managed code block # _LOGGER.info( # pylint: disable=logging-fstring-interpolation - f'acquire_distributed_teardown: yielding control to ' - f'managed block - {worker_id=}; {teardown_context=}') + f"acquire_distributed_teardown: yielding control to " + f"managed block - {worker_id=}; {teardown_context=}" + ) yield teardown_context # @@ -572,10 +585,7 @@ def acquire_distributed_teardown( state.save_to_file_path(state_file_path) @classmethod - def _get_root_state_file_path( - cls, - root_state_dir: pathlib.Path - ) -> pathlib.Path: + def _get_root_state_file_path(cls, root_state_dir: pathlib.Path) -> pathlib.Path: """Return the path of the file for storing the root state, creating all parent directories if they don't exist yet. @@ -586,10 +596,7 @@ def _get_root_state_file_path( return root_state_dir / cls._ROOT_STATE_FILE_NAME @classmethod - def _get_root_lock_file_path( - cls, - root_lock_dir: pathlib.Path - ) -> pathlib.Path: + def _get_root_lock_file_path(cls, root_lock_dir: pathlib.Path) -> pathlib.Path: """Return the path of the lock file, creating all parent directories if they don't exist yet. diff --git a/src/xdist/iso_scheduling_utils.py b/src/xdist/iso_scheduling_utils.py index 3d61883d..5f0a3ead 100644 --- a/src/xdist/iso_scheduling_utils.py +++ b/src/xdist/iso_scheduling_utils.py @@ -28,14 +28,18 @@ See also `iso_scheduling_plugin.py` for fixtures specific to isoscope scheduling. """ + from __future__ import annotations import abc import pathlib from typing import TYPE_CHECKING + if TYPE_CHECKING: - from collections.abc import Callable, Generator + from collections.abc import Callable + from collections.abc import Generator + import pytest @@ -51,12 +55,12 @@ class IsoSchedulingFixture(abc.ABC): which yields an instance of the implementation of the `DistributedSetupCoordinator` interface. """ + # pylint: disable=too-few-public-methods @abc.abstractmethod def coordinate_setup_teardown( - self, - setup_request: pytest.FixtureRequest + self, setup_request: pytest.FixtureRequest ) -> Generator[DistributedSetupCoordinator, None, None]: """Context manager that yields an instance of `DistributedSetupCoordinator` for distributed coordination of Setup @@ -155,7 +159,7 @@ def test_case3(self) def maybe_call_setup( self, setup_callback: Callable[[DistributedSetupContext], None], - timeout: float = DEFAULT_TIMEOUT_SEC + timeout: float = DEFAULT_TIMEOUT_SEC, ) -> None: """Invoke the Setup callback only if distributed setup has not been performed yet from any other XDist worker for your test scope. @@ -185,7 +189,7 @@ def maybe_call_setup( def maybe_call_teardown( self, teardown_callback: Callable[[DistributedTeardownContext], None], - timeout: float = DEFAULT_TIMEOUT_SEC + timeout: float = DEFAULT_TIMEOUT_SEC, ) -> None: """Invoke the Teardown callback only in when called in the context of the final XDist Worker process to have finished the execution of the @@ -211,13 +215,13 @@ def maybe_call_teardown( class _DistributedSetupTeardownContextMixin: # pylint: disable=too-few-public-methods - """Mixin for `DistributedSetupContext` and DistributedTeardownContext`. - """ + """Mixin for `DistributedSetupContext` and DistributedTeardownContext`.""" + # Expected instance members in derived class _root_context_dir: pathlib.Path _setup_node_name: str - _CLIENT_SUBDIRECTORY_LINK = 'client-workspace' + _CLIENT_SUBDIRECTORY_LINK = "client-workspace" @property def client_dir(self) -> pathlib.Path: @@ -226,8 +230,7 @@ def client_dir(self) -> pathlib.Path: client-specific state, creating the directory if not already created. """ - client_dir_path = (self._root_context_dir - / self._CLIENT_SUBDIRECTORY_LINK) + client_dir_path = self._root_context_dir / self._CLIENT_SUBDIRECTORY_LINK client_dir_path.mkdir(parents=True, exist_ok=True) return client_dir_path @@ -238,11 +241,13 @@ class DistributedSetupContext(_DistributedSetupTeardownContextMixin): manager. """ - def __init__(self, - setup_allowed: bool, - root_context_dir: pathlib.Path, - worker_id: str, - setup_request: pytest.FixtureRequest): + def __init__( + self, + setup_allowed: bool, + root_context_dir: pathlib.Path, + worker_id: str, + setup_request: pytest.FixtureRequest, + ): """ :param setup_allowed: Whether distributed setup may be performed by the current process. @@ -272,11 +277,12 @@ def __init__(self, def __repr__(self) -> str: return ( - f'< {self.__class__.__name__}: ' - f'node_name={self._setup_node_name}; ' - f'setup_allowed={self.distributed_setup_allowed}; ' - f'worker_id={self.worker_id}; ' - f'client_dir={self.client_dir} >') + f"< {self.__class__.__name__}: " + f"node_name={self._setup_node_name}; " + f"setup_allowed={self.distributed_setup_allowed}; " + f"worker_id={self.worker_id}; " + f"client_dir={self.client_dir} >" + ) class DistributedTeardownContext(_DistributedSetupTeardownContextMixin): @@ -284,9 +290,7 @@ class DistributedTeardownContext(_DistributedSetupTeardownContextMixin): manager. """ - def __init__(self, - teardown_allowed: bool, - setup_context: DistributedSetupContext): + def __init__(self, teardown_allowed: bool, setup_context: DistributedSetupContext): """ :param teardown_allowed: Whether Distributed Teardown may be performed by the current process. @@ -311,8 +315,9 @@ def __init__(self, def __repr__(self) -> str: return ( - f'< {self.__class__.__name__}: ' - f'node_name={self._setup_node_name}; ' - f'teardown_allowed={self.distributed_teardown_allowed}; ' - f'worker_id={self.worker_id}; ' - f'client_dir={self.client_dir} >') + f"< {self.__class__.__name__}: " + f"node_name={self._setup_node_name}; " + f"teardown_allowed={self.distributed_teardown_allowed}; " + f"worker_id={self.worker_id}; " + f"client_dir={self.client_dir} >" + ) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 4f132423..b365415e 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -27,10 +27,10 @@ Properties of this scheduler: 1. Executes one test scope/class at a time. - 2. Distributes tests of the executing scope/class to the configured XDist + 2. Distributes tests of the executing scope/class to the configured XDist Workers. 3. Guarantees that the Setup of the executing scope/class completes in all - XDist Workers BEFORE any of those Workers start processing the + XDist Workers BEFORE any of those Workers start processing the Teardown of that test scope/class. 4. Guarantees that the Teardown phase of the executing test scope/class completes in all XDist Workers before the Setup phase begins for the @@ -40,6 +40,7 @@ * Implementation of `_split_scope()` and public method documentation: - borrowed from the builtin `loadscope` scheduler """ # pylint: disable=too-many-lines + from __future__ import annotations from collections import OrderedDict @@ -48,15 +49,21 @@ import random from typing import TYPE_CHECKING -import pytest from _pytest.runner import CollectReport +import pytest + from xdist.report import report_collection_diff from xdist.workermanage import parse_spec_config if TYPE_CHECKING: - from typing import NoReturn, Optional, Sequence - from collections.abc import Generator, Iterable, ValuesView + from collections.abc import Generator + from collections.abc import Iterable + from collections.abc import ValuesView + from typing import NoReturn + from typing import Optional + from typing import Sequence + import xdist.remote from xdist.workermanage import WorkerController @@ -72,13 +79,14 @@ class IsoScopeScheduling: # pylint: disable=too-many-instance-attributes once per scope (vs. per worker) using `FileLock` or similar for coordination. """ + class _State(str, enum.Enum): # Waiting for scheduler to be ready to distribute the next Scope. When # the Workset Queue is NOT empty AND all workers which are shutting down # reach zero pending tests AND all other workers have no more than one # pending tests AND at least one worker is available for the distribution # of the next scope, then transition to `ACTIVATE_SCOPE` - WAIT_READY_TO_ACTIVATE_SCOPE = 'WAIT-READY-TO-ACTIVATE-SCOPE' + WAIT_READY_TO_ACTIVATE_SCOPE = "WAIT-READY-TO-ACTIVATE-SCOPE" # Activate (i.e., distribute) tests from the next Scope, if any. If a # scope was distributed, then transition to `WAIT_READY_TO_FENCE`. @@ -87,7 +95,7 @@ class _State(str, enum.Enum): # which are not shutting down. Workers with matching fence tests have # priority over empty workers (to satisfy that "at least two # active-Scope tests per worker" Rule) - ACTIVATE_SCOPE = 'ACTIVATE-SCOPE' + ACTIVATE_SCOPE = "ACTIVATE-SCOPE" # Waiting for scheduler to be ready to fence the active (i.e., # distributed) scope. Wait until each non-empty worker has only one @@ -98,7 +106,7 @@ class _State(str, enum.Enum): # Scope, then reset current active scope and transition to # `WAIT-READY-TO-ACTIVATE-SCOPE` (this means that all workers containing # active-Scope tests crashed) - WAIT_READY_TO_FENCE = 'WAIT-READY-TO-FENCE' + WAIT_READY_TO_FENCE = "WAIT-READY-TO-FENCE" # Fence the workers containing the final active-Scope tests in # order to allow those final pending tests to complete execution. Fence @@ -108,18 +116,16 @@ class _State(str, enum.Enum): # out of tests for fencing, then send "shutdown" to the balance of those # workers instead of a fence test. Finally, transition to # `WAIT_READY_TO_ACTIVATE_SCOPE`. - FENCE = 'FENCE' + FENCE = "FENCE" - def __init__( - self, - config: pytest.Config, - log: xdist.remote.Producer): + def __init__(self, config: pytest.Config, log: xdist.remote.Producer): self._config = config self._log: xdist.remote.Producer = log.distscopeisosched # Current scheduling state - self._state: IsoScopeScheduling._State = \ + self._state: IsoScopeScheduling._State = ( self._State.WAIT_READY_TO_ACTIVATE_SCOPE + ) # Scope ID of tests that are currently executing; `None` prior to the # initial distribution @@ -153,8 +159,9 @@ def __init__( # `_WorkerProxy` instances. Initially empty, it will be populated by # our `add_node_collection` implementation as it's called by xdist's # `DSession` and the corresponding test collection passes validation. - self._worker_by_node: \ - OrderedDict[WorkerController, _WorkerProxy] = OrderedDict() + self._worker_by_node: OrderedDict[WorkerController, _WorkerProxy] = ( + OrderedDict() + ) # Workers pending validation of their Test collections that have not # been admitted to `_worker_by_node` yet. @@ -171,8 +178,9 @@ def __init__( # # A worker is removed from `_pending_worker_by_node` when the xdist # controller invokes `remove_node()` with the corresponding node. - self._pending_worker_by_node: \ - OrderedDict[WorkerController, _WorkerProxy] = OrderedDict() + self._pending_worker_by_node: OrderedDict[WorkerController, _WorkerProxy] = ( + OrderedDict() + ) @property def nodes(self) -> list[WorkerController]: @@ -180,8 +188,9 @@ def nodes(self) -> list[WorkerController]: Called by xdist `DSession`. """ - return (list(self._worker_by_node.keys()) - + list(self._pending_worker_by_node.keys())) + return list(self._worker_by_node.keys()) + list( + self._pending_worker_by_node.keys() + ) @property def collection_is_completed(self) -> bool: @@ -240,10 +249,11 @@ def add_node(self, node: WorkerController) -> None: Called by the ``DSession.worker_workerready`` hook - when it successfully bootstraps a new remote worker. """ - self._log(f'Registering remote worker node {node}') + self._log(f"Registering remote worker node {node}") - assert node not in self._pending_worker_by_node, \ - f'{node=} already in pending workers' + assert ( + node not in self._pending_worker_by_node + ), f"{node=} already in pending workers" self._pending_worker_by_node[node] = _WorkerProxy(node) @@ -274,14 +284,15 @@ def remove_node(self, node: WorkerController) -> Optional[str]: :raise KeyError: if the Remote Worker node has not been registered with the scheduler. (NOTE: xdist's `DSession` expects this behavior) """ - self._log(f'Removing remote worker node {node}') + self._log(f"Removing remote worker node {node}") if node in self._pending_worker_by_node: # Worker was not admitted to active workers yet, remove it from the # pending worker collection. self._pending_worker_by_node.pop(node) - assert node not in self._worker_by_node, \ - f'{node=} in both pending and active workers' + assert ( + node not in self._worker_by_node + ), f"{node=} in both pending and active workers" return None # Worker was admitted to active workers already @@ -302,14 +313,16 @@ def remove_node(self, node: WorkerController) -> Optional[str]: first_pending_test = worker.head_pending_test crashed_test_id = first_pending_test.test_id - self._log(f'Remote Worker {repr(worker)} shut down ungracefully. It ' - f'may have crashed while executing the pending test ' - f'{first_pending_test}. ' - f'NOTE: The ungraceful shutdown may create an imbalance ' - f'between the execution of the setup and teardown ' - f'fixture(s). THIS MAY LEAVE THE SYSTEM UNDER TEST IN AN ' - f'UNEXPECTED STATE, COMPROMISING EXECUTION OF ALL SUBSEQUENT ' - f'TESTS IN CURRENT AND FUTURE SESSIONS.') + self._log( + f"Remote Worker {worker!r} shut down ungracefully. It " + f"may have crashed while executing the pending test " + f"{first_pending_test}. " + f"NOTE: The ungraceful shutdown may create an imbalance " + f"between the execution of the setup and teardown " + f"fixture(s). THIS MAY LEAVE THE SYSTEM UNDER TEST IN AN " + f"UNEXPECTED STATE, COMPROMISING EXECUTION OF ALL SUBSEQUENT " + f"TESTS IN CURRENT AND FUTURE SESSIONS." + ) # Return the pending tests back to the workset queue for test in worker.release_pending_tests(): @@ -318,9 +331,7 @@ def remove_node(self, node: WorkerController) -> Optional[str]: return crashed_test_id def add_node_collection( - self, - node: WorkerController, - collection: Sequence[str] + self, node: WorkerController, collection: Sequence[str] ) -> None: """Register the collected test items from a Remote Worker node. @@ -340,12 +351,12 @@ def add_node_collection( - ``DSession.worker_collectionfinish``. """ - self._log(f'Adding collection for node {node}: {len(collection)=}') + self._log(f"Adding collection for node {node}: {len(collection)=}") # Check that add_node() was called on the node before - assert node in self._pending_worker_by_node, \ - f'Received test collection for {node=} which is not in pending ' \ - f'workers' + assert node in self._pending_worker_by_node, ( + f"Received test collection for {node=} which is not in pending " f"workers" + ) worker = self._pending_worker_by_node[node] @@ -357,10 +368,11 @@ def add_node_collection( # Check that the new collection matches the official collection if self._do_two_nodes_have_same_collection( - reference_node=self._official_test_collection_node, - reference_collection=self._official_test_collection, - node=node, - collection=collection): + reference_node=self._official_test_collection_node, + reference_collection=self._official_test_collection, + node=node, + collection=collection, + ): # The worker's collection is valid, so activate the new worker self._pending_worker_by_node.pop(node) self._worker_by_node[node] = worker @@ -376,8 +388,8 @@ def add_node_collection( # Get all pending workers with registered test collection w: _WorkerProxy workers_with_collection = [ - w for w in self._pending_worker_by_node.values() - if w.collection is not None] + w for w in self._pending_worker_by_node.values() if w.collection is not None + ] if len(workers_with_collection) < self._expected_num_workers: # Not enough test collections registered yet @@ -388,15 +400,15 @@ def add_node_collection( reference_worker = workers_with_collection[0] for pending_worker in workers_with_collection[1:]: if not self._do_two_nodes_have_same_collection( - reference_node=reference_worker.node, - reference_collection=reference_worker.collection, - node=pending_worker.node, - collection=pending_worker.collection): + reference_node=reference_worker.node, + reference_collection=reference_worker.collection, + node=pending_worker.node, + collection=pending_worker.collection, + ): same_collection = False if not same_collection: - self._log( - '**Different tests collected, aborting worker activation**') + self._log("**Different tests collected, aborting worker activation**") return # Collections are identical! @@ -417,8 +429,8 @@ def add_node_collection( # particularly slow) all_tests = [ _TestProxy(test_id=test_id, test_index=test_index) - for test_index, test_id - in enumerate(self._official_test_collection)] + for test_index, test_id in enumerate(self._official_test_collection) + ] shuffled_test_collection = random.sample(all_tests, k=len(all_tests)) # Organize tests into a queue of worksets grouped by test scope ID @@ -426,10 +438,7 @@ def add_node_collection( self._workset_queue.add_test(test) def mark_test_complete( - self, - node: WorkerController, - item_index: int, - duration: float + self, node: WorkerController, item_index: int, duration: float ) -> None: """Mark test item as completed by node and remove from pending tests in the worker and reschedule. @@ -444,9 +453,11 @@ def mark_test_complete( worker = self._worker_by_node[node] if self._log.enabled: - self._log(f'Marking test complete: ' - f'test_id={self._official_test_collection[item_index]}; ' - f'{item_index=}; {worker}') + self._log( + f"Marking test complete: " + f"test_id={self._official_test_collection[item_index]}; " + f"{item_index=}; {worker}" + ) worker.handle_test_completion(test_index=item_index) @@ -474,8 +485,9 @@ def schedule(self) -> None: - ``DSession.worker_collectionfinish``. """ - assert self.collection_is_completed, \ - 'schedule() called before test collection completed' + assert ( + self.collection_is_completed + ), "schedule() called before test collection completed" # Test collection has been completed, so reschedule if needed self._reschedule_workers() @@ -503,7 +515,7 @@ def split_scope(test_id: str) -> str: example/loadsuite/test/test_delta.py::Delta1 example/loadsuite/epsilon/__init__.py """ - return test_id.rsplit('::', 1)[0] + return test_id.rsplit("::", 1)[0] @property def _workers(self) -> Iterable[_WorkerProxy]: @@ -513,8 +525,7 @@ def _workers(self) -> Iterable[_WorkerProxy]: return self._worker_by_node.values() def _reschedule_workers(self) -> None: - """Distribute work to workers if needed at this time. - """ + """Distribute work to workers if needed at this time.""" assert self._state is not None traversed_states = [] @@ -523,9 +534,10 @@ def _reschedule_workers(self) -> None: # NOTE: This loop will terminate because completion of tests and # worker availability are reported outside the scope of this # function, and our state transitions are limited by those factors - assert len(traversed_states) <= len(self._State), \ - f'Too many traversed states - {len(traversed_states)}: ' \ - f'{traversed_states}' + assert len(traversed_states) <= len(self._State), ( + f"Too many traversed states - {len(traversed_states)}: " + f"{traversed_states}" + ) traversed_states.append(self._state) previous_state = self._state @@ -539,7 +551,7 @@ def _reschedule_workers(self) -> None: elif self._state is self._State.FENCE: self._handle_state_fence() else: - raise RuntimeError(f'Unhandled state: {self._state}') + raise RuntimeError(f"Unhandled state: {self._state}") def _handle_state_wait_ready_to_activate_scope(self) -> None: """Handle the `WAIT_READY_TO_ACTIVATE_SCOPE` state. @@ -550,8 +562,9 @@ def _handle_state_wait_ready_to_activate_scope(self) -> None: pending tests AND at least one worker is available for the distribution of the next scope, then transition to `ACTIVATE_SCOPE` """ - assert self._state is self._State.WAIT_READY_TO_ACTIVATE_SCOPE, \ - f'{self._state=} != {self._State.WAIT_READY_TO_ACTIVATE_SCOPE}' + assert ( + self._state is self._State.WAIT_READY_TO_ACTIVATE_SCOPE + ), f"{self._state=} != {self._State.WAIT_READY_TO_ACTIVATE_SCOPE}" if self._workset_queue.empty: # No more scopes are available @@ -575,21 +588,21 @@ def _handle_state_wait_ready_to_activate_scope(self) -> None: # session. next_scope_id = self._workset_queue.head_workset.scope_id - if not self._get_workers_available_for_distribution( - scope_id=next_scope_id): + if not self._get_workers_available_for_distribution(scope_id=next_scope_id): # No workers are available for distribution of the next scope. # It appears that some workers have crashed. xdist will either # replace crashed workers or terminate the session. if self._log.enabled: - self._log(f'No workers are available for {next_scope_id=}, ' - f'they likely crashed; staying in {self._state=}') + self._log( + f"No workers are available for {next_scope_id=}, " + f"they likely crashed; staying in {self._state=}" + ) return # Conditions are satisfied for transition to next state previous_state = self._state self._state = self._State.ACTIVATE_SCOPE - self._log(f'Transitioned from {str(previous_state)} to ' - f'{str(self._state)}') + self._log(f"Transitioned from {previous_state!s} to " f"{self._state!s}") def _handle_state_activate_scope(self) -> None: """Handle the `ACTIVATE_SCOPE` state. @@ -602,12 +615,13 @@ def _handle_state_activate_scope(self) -> None: priority over empty workers (to satisfy the "at least two active-Scope tests per worker" Rule) """ - assert self._state is self._State.ACTIVATE_SCOPE, \ - f'{self._state=} != {self._State.ACTIVATE_SCOPE}' + assert ( + self._state is self._State.ACTIVATE_SCOPE + ), f"{self._state=} != {self._State.ACTIVATE_SCOPE}" # The previous state is responsible for ensuring that the workset queue # is not empty - assert not self._workset_queue.empty, f'Empty {self._workset_queue}' + assert not self._workset_queue.empty, f"Empty {self._workset_queue}" workset = self._workset_queue.dequeue_workset() @@ -615,12 +629,14 @@ def _handle_state_activate_scope(self) -> None: # contain a fence test belonging to this scope as well as empty workers # which are not shutting down available_workers = self._get_workers_available_for_distribution( - scope_id=workset.scope_id) + scope_id=workset.scope_id + ) # The previous state is responsible for ensuring that workers are # available for this Scope - assert available_workers, \ - f'No workers available for {workset.scope_id=} in {self._state=}' + assert ( + available_workers + ), f"No workers available for {workset.scope_id=} in {self._state=}" # Distribute the workset to the available workers self._distribute_workset(workset=workset, workers=available_workers) @@ -631,9 +647,11 @@ def _handle_state_activate_scope(self) -> None: # Conditions are satisfied for transition to next state previous_state = self._state self._state = self._State.WAIT_READY_TO_FENCE - self._log(f'Transitioned from {str(previous_state)} to ' - f'{str(self._state)}. ' - f'Activated scope={self._active_scope_id}') + self._log( + f"Transitioned from {previous_state!s} to " + f"{self._state!s}. " + f"Activated scope={self._active_scope_id}" + ) def _handle_state_wait_ready_to_fence(self) -> None: """Handle the `WAIT_READY_TO_FENCE` state. @@ -648,11 +666,11 @@ def _handle_state_wait_ready_to_fence(self) -> None: `WAIT-READY-TO-ACTIVATE-SCOPE` (this means that all workers containing active-Scope tests crashed) """ - assert self._state is self._State.WAIT_READY_TO_FENCE, \ - f'{self._state=} != {self._State.WAIT_READY_TO_FENCE}' + assert ( + self._state is self._State.WAIT_READY_TO_FENCE + ), f"{self._state=} != {self._State.WAIT_READY_TO_FENCE}" - assert self._active_scope_id is not None, \ - f'{self._active_scope_id=} is None' + assert self._active_scope_id is not None, f"{self._active_scope_id=} is None" for worker in self._workers: if worker.num_pending_tests > 1: @@ -660,7 +678,8 @@ def _handle_state_wait_ready_to_fence(self) -> None: return workers_to_fence = self._get_workers_ready_for_fencing( - scope_id=self._active_scope_id) + scope_id=self._active_scope_id + ) # Conditions are satisfied for transition to next state previous_state = self._state @@ -672,11 +691,12 @@ def _handle_state_wait_ready_to_fence(self) -> None: # No active-Scope tests pending, so nothing to fence. Their # worker(s) must have crashed? self._state = self._State.WAIT_READY_TO_ACTIVATE_SCOPE - self._log(f'Nothing to fence! No active-scope tests pending - ' - f'workers crashed? {self._active_scope_id=}') + self._log( + f"Nothing to fence! No active-scope tests pending - " + f"workers crashed? {self._active_scope_id=}" + ) - self._log(f'Transitioned from {str(previous_state)} to ' - f'{str(self._state)}') + self._log(f"Transitioned from {previous_state!s} to " f"{self._state!s}") def _handle_state_fence(self) -> None: """Handle the `FENCE` state. @@ -690,32 +710,38 @@ def _handle_state_fence(self) -> None: workers instead of a fence test. Finally, transition to `WAIT_READY_TO_ACTIVATE_SCOPE`. """ - assert self._state is self._State.FENCE, \ - f'{self._state=} is not {self._State.FENCE}' + assert ( + self._state is self._State.FENCE + ), f"{self._state=} is not {self._State.FENCE}" workers_to_fence = self._get_workers_ready_for_fencing( - scope_id=self._active_scope_id) + scope_id=self._active_scope_id + ) # The prior state should have ensured that there is at least one worker # that needs to be fenced - assert workers_to_fence, \ - f'No workers ready to fence {self._active_scope_id=} ' \ - f'in {self._state=}; ' \ - f'active workers: {[w.verbose_repr() for w in self._workers]}' + assert workers_to_fence, ( + f"No workers ready to fence {self._active_scope_id=} " + f"in {self._state=}; " + f"active workers: {[w.verbose_repr() for w in self._workers]}" + ) # We will take Fence tests from subsequent worksets. # NOTE: A given workset may be used to fence multiple preceding active # Scopes fence_item_generator = self._generate_fence_items( - source_worksets=self._workset_queue.worksets) + source_worksets=self._workset_queue.worksets + ) # Start fencing for worker in workers_to_fence: fence_item = next(fence_item_generator) if fence_item is not None: worker.run_some_tests([fence_item]) - self._log(f'Fenced {worker} with {fence_item}. ' - f'Active scope={self._active_scope_id}') + self._log( + f"Fenced {worker} with {fence_item}. " + f"Active scope={self._active_scope_id}" + ) else: # No more fence items, so send the "shutdown" message to # the worker to force it to execute its final pending test and @@ -727,13 +753,10 @@ def _handle_state_fence(self) -> None: # Transition to next state previous_state = self._state self._state = self._State.WAIT_READY_TO_ACTIVATE_SCOPE - self._log(f'Transitioned from {str(previous_state)} to ' - f'{str(self._state)}') + self._log(f"Transitioned from {previous_state!s} to " f"{self._state!s}") def _distribute_workset( - self, - workset: _ScopeWorkset, - workers: list[_WorkerProxy] + self, workset: _ScopeWorkset, workers: list[_WorkerProxy] ) -> None: """Distribute the tests in the given workset to the given workers. @@ -762,48 +785,54 @@ def _distribute_workset( # Remaining tests in the workset plus the number borrowed as fences # must add up to the original total tests in the workset - assert (workset.num_tests + num_workers_with_fences - == workset.high_water), \ - f'{workset}.num_tests + {num_workers_with_fences=} ' \ - f'!= {workset.high_water=}; {workers=}' + assert workset.num_tests + num_workers_with_fences == workset.high_water, ( + f"{workset}.num_tests + {num_workers_with_fences=} " + f"!= {workset.high_water=}; {workers=}" + ) # Determine the number of workers we will use for this distribution num_workers_to_use = min( - self._get_max_workers_for_num_tests(workset.high_water), - len(workers)) + self._get_max_workers_for_num_tests(workset.high_water), len(workers) + ) # At minimum, all workers fenced from the given Scope Workset must be # included in the distribution - assert num_workers_to_use >= num_workers_with_fences, \ - f'{num_workers_to_use=} < {num_workers_with_fences=} ' \ - f'for {workset} and available {len(workers)=}' + assert num_workers_to_use >= num_workers_with_fences, ( + f"{num_workers_to_use=} < {num_workers_with_fences=} " + f"for {workset} and available {len(workers)=}" + ) # We should only be called when there is work to be done - assert num_workers_to_use > 0, f'{num_workers_to_use=} <= 0' + assert num_workers_to_use > 0, f"{num_workers_to_use=} <= 0" # Our workset's footprint should not exceed available workers - assert num_workers_to_use <= len(workers), \ - f'{num_workers_to_use=} > {len(workers)=} for {workset}' + assert num_workers_to_use <= len( + workers + ), f"{num_workers_to_use=} > {len(workers)=} for {workset}" # Distribute the tests to the selected workers - self._log(f'Distributing {workset} to {num_workers_to_use=}: ' - f'{workers[:num_workers_to_use]}') + self._log( + f"Distributing {workset} to {num_workers_to_use=}: " + f"{workers[:num_workers_to_use]}" + ) num_tests_remaining = workset.high_water - for (worker, num_available_workers) in zip( - workers, - range(num_workers_to_use, 0, -1)): + for worker, num_available_workers in zip( + workers, range(num_workers_to_use, 0, -1) + ): worker: _WorkerProxy num_available_workers: int # Workers ready for distribution must have no more than one pending # test - assert worker.num_pending_tests <= 1, \ - f'{worker.verbose_repr()} num_pending_tests > 1' + assert ( + worker.num_pending_tests <= 1 + ), f"{worker.verbose_repr()} num_pending_tests > 1" if not worker.empty: # The single pending test in the worker must be a Fence test # borrowed from the given workset - assert worker.head_pending_test.scope_id == workset.scope_id, \ - f'Scope IDs of {worker.verbose_repr()} and {workset} differ' + assert ( + worker.head_pending_test.scope_id == workset.scope_id + ), f"Scope IDs of {worker.verbose_repr()} and {workset} differ" # Determine the target number of tests for this worker (including # a matching Fence test, if any) @@ -818,26 +847,27 @@ def _distribute_workset( if num_tests_to_add: tests_to_add = workset.dequeue_tests(num_tests=num_tests_to_add) worker.run_some_tests(tests_to_add) - self._log(f'Distributed {len(tests_to_add)} tests to {worker} ' - f'from {workset}') + self._log( + f"Distributed {len(tests_to_add)} tests to {worker} " + f"from {workset}" + ) else: # NOTE: A Workset with a high watermark of just one item becomes # empty if a Fence item was withdrawn from it - assert workset.high_water == 1, \ - f'Attempted to distribute 0 tests to {worker} ' \ - f'from {workset}' - self._log(f'No more tests to distribute from {workset} ' - f'to {worker}') + assert workset.high_water == 1, ( + f"Attempted to distribute 0 tests to {worker} " f"from {workset}" + ) + self._log(f"No more tests to distribute from {workset} " f"to {worker}") # Workset should be empty now - assert workset.empty, \ - f'{workset} is not empty after distribution to {num_workers_to_use} ' \ - f'workers: {workers[:num_workers_to_use]}.' + assert workset.empty, ( + f"{workset} is not empty after distribution to {num_workers_to_use} " + f"workers: {workers[:num_workers_to_use]}." + ) @classmethod def _generate_fence_items( - cls, - source_worksets: Iterable[_ScopeWorkset] + cls, source_worksets: Iterable[_ScopeWorkset] ) -> Generator[Optional[_TestProxy], None, None]: """Generator that withdraws (i.e., dequeues) Fence test items from the given ordered Scope Worksets and yields them until it runs out of the @@ -904,13 +934,13 @@ def _get_fence_capacity_of_workset(cls, workset: _ScopeWorkset) -> int: :param workset: The given Scope Workset :return: """ - num_fence_items = ( - cls._get_max_workers_for_num_tests(num_tests=workset.high_water) - - (workset.high_water - workset.num_tests) - ) + num_fence_items = cls._get_max_workers_for_num_tests( + num_tests=workset.high_water + ) - (workset.high_water - workset.num_tests) - assert num_fence_items >= 0, f'Number of fences below zero ' \ - f'({num_fence_items}) in {workset}' + assert num_fence_items >= 0, ( + f"Number of fences below zero " f"({num_fence_items}) in {workset}" + ) return num_fence_items @@ -941,8 +971,7 @@ def _get_max_workers_for_num_tests(num_tests: int) -> int: return num_tests // 2 def _get_workers_available_for_distribution( - self, - scope_id: str + self, scope_id: str ) -> list[_WorkerProxy]: """Return workers available for distribution of the given Scope. @@ -959,16 +988,15 @@ def _get_workers_available_for_distribution( :return: A (possibly empty) list of workers available for distribution. """ return [ - worker for worker in self._workers - if (not worker.shutting_down - and (worker.empty - or worker.tail_pending_test.scope_id == scope_id)) + worker + for worker in self._workers + if ( + not worker.shutting_down + and (worker.empty or worker.tail_pending_test.scope_id == scope_id) + ) ] - def _get_workers_ready_for_fencing( - self, - scope_id: str - ) -> list[_WorkerProxy]: + def _get_workers_ready_for_fencing(self, scope_id: str) -> list[_WorkerProxy]: """Return workers that are ready to be Fenced for the given test Scope. A worker that needs to be Fenced satisfies all the following conditions: @@ -981,10 +1009,13 @@ def _get_workers_ready_for_fencing( :return: A (possibly empty) list of workers to Fence. """ return [ - worker for worker in self._workers - if (not worker.shutting_down + worker + for worker in self._workers + if ( + not worker.shutting_down and worker.num_pending_tests == 1 - and worker.head_pending_test.scope_id == scope_id) + and worker.head_pending_test.scope_id == scope_id + ) ] def _do_two_nodes_have_same_collection( @@ -992,7 +1023,7 @@ def _do_two_nodes_have_same_collection( reference_node: WorkerController, reference_collection: tuple[str], node: WorkerController, - collection: tuple[str, ...] + collection: tuple[str, ...], ) -> bool: """ If collections differ, this method returns False while logging @@ -1008,8 +1039,8 @@ def _do_two_nodes_have_same_collection( otherwise. """ msg = report_collection_diff( - reference_collection, collection, reference_node.gateway.id, - node.gateway.id) + reference_collection, collection, reference_node.gateway.id, node.gateway.id + ) if not msg: return True @@ -1019,8 +1050,7 @@ def _do_two_nodes_have_same_collection( # NOTE: Not sure why/when `_config` would be `None`. Copied check # from the `loadscope` scheduler. - report = CollectReport(node.gateway.id, 'failed', longrepr=msg, - result=[]) + report = CollectReport(node.gateway.id, "failed", longrepr=msg, result=[]) self._config.hook.pytest_collectreport(report=report) return False @@ -1046,8 +1076,7 @@ def __init__(self, node: WorkerController): # Initially None, until assigned by the Scheduler self._collection: Optional[tuple[str]] = None - self._pending_test_by_index: \ - OrderedDict[int, _TestProxy] = OrderedDict() + self._pending_test_by_index: OrderedDict[int, _TestProxy] = OrderedDict() def __repr__(self): return self.verbose_repr(verbose=False) @@ -1073,10 +1102,11 @@ def collection(self, collection: tuple[str]): :param collection: An ordered collection of test IDs collected by the remote worker. Must not be `None`. Also, MUST NOT be set already. """ - assert collection is not None, f'None test collection passed to {self}' + assert collection is not None, f"None test collection passed to {self}" - assert self._collection is None, \ - f'Test collection passed when one already exists to {self}' + assert ( + self._collection is None + ), f"Test collection passed when one already exists to {self}" self._collection = collection @@ -1113,8 +1143,7 @@ def empty(self) -> bool: @property def num_pending_tests(self) -> int: - """Count of tests in the pending queue - """ + """Count of tests in the pending queue""" return len(self._pending_test_by_index) @property @@ -1134,24 +1163,22 @@ def verbose_repr(self, verbose: bool = True) -> str: :return: `repr` of the instance. """ items = [ - '<', - f'{self.__class__.__name__}:', - f'{self._node}', - f'shutting_down={self.shutting_down}', - f'num_pending={self.num_pending_tests}' + "<", + f"{self.__class__.__name__}:", + f"{self._node}", + f"shutting_down={self.shutting_down}", + f"num_pending={self.num_pending_tests}", ] if verbose: if self.num_pending_tests == 1: - items.append( - f'head_scope_id={self.head_pending_test.scope_id}') + items.append(f"head_scope_id={self.head_pending_test.scope_id}") if self.num_pending_tests > 1: - items.append( - f'tail_scope_id={self.tail_pending_test.scope_id}') + items.append(f"tail_scope_id={self.tail_pending_test.scope_id}") - items.append('>') + items.append(">") - return ' '.join(items) + return " ".join(items) def run_some_tests(self, tests: Iterable[_TestProxy]) -> None: """ @@ -1172,8 +1199,9 @@ def handle_test_completion(self, test_index: int) -> None: # Completion should be reported in same order the tests were sent to # the remote worker - assert head_pending_test_index == test_index, \ - f'{head_pending_test_index=} != {test_index}' + assert ( + head_pending_test_index == test_index + ), f"{head_pending_test_index=} != {test_index}" # Remove the test from the worker's pending queue self._pending_test_by_index.pop(test_index) @@ -1198,13 +1226,16 @@ def shutdown(self) -> None: class _TestProxy: """ - Represents a single test from the overall test - collection to be executed + Represents a single test from the overall test + collection to be executed """ # There can be a large number of tests, so economize memory by declaring # `__slots__` (see https://wiki.python.org/moin/UsingSlots) - __slots__ = ('test_id', 'test_index',) + __slots__ = ( + "test_id", + "test_index", + ) def __init__(self, test_id: str, test_index: int): """ @@ -1217,13 +1248,14 @@ def __init__(self, test_id: str, test_index: int): self.test_index: int = test_index def __repr__(self): - return f'<{self.__class__.__name__}: test_index={self.test_index} ' \ - f'scope_id={self.scope_id} test_id={self.test_id}>' + return ( + f"<{self.__class__.__name__}: test_index={self.test_index} " + f"scope_id={self.scope_id} test_id={self.test_id}>" + ) @property def scope_id(self) -> str: - """Scope ID to which this test belongs. - """ + """Scope ID to which this test belongs.""" return IsoScopeScheduling.split_scope(self.test_id) @@ -1232,7 +1264,11 @@ class _ScopeWorkset: Ordered collection of Tests for the given scope """ - __slots__ = ('scope_id', '_high_water', '_test_by_index',) + __slots__ = ( + "scope_id", + "_high_water", + "_test_by_index", + ) def __init__(self, scope_id: str): """ @@ -1246,8 +1282,10 @@ def __init__(self, scope_id: str): self._test_by_index: OrderedDict[int, _TestProxy] = OrderedDict() def __repr__(self): - return f'<{self.__class__.__name__}: scope_id={self.scope_id} ' \ - f'num_tests={self.num_tests} high_water={self.high_water}>' + return ( + f"<{self.__class__.__name__}: scope_id={self.scope_id} " + f"num_tests={self.num_tests} high_water={self.high_water}>" + ) @property def empty(self) -> bool: @@ -1268,11 +1306,11 @@ def num_tests(self) -> int: def enqueue_test(self, test: _TestProxy) -> None: """Append given test to ordered test collection""" - assert test.scope_id == self.scope_id, \ - f'Wrong {test.scope_id=} for {self}' + assert test.scope_id == self.scope_id, f"Wrong {test.scope_id=} for {self}" - assert test.test_index not in self._test_by_index, \ - f'{test.test_index=} was already assigned to {self}' + assert ( + test.test_index not in self._test_by_index + ), f"{test.test_index=} was already assigned to {self}" self._test_by_index[test.test_index] = test @@ -1290,17 +1328,18 @@ def dequeue_tests(self, num_tests: int) -> list[_TestProxy]: available tests. @raise IndexError: If `num_tests` exceeds available tests. """ - assert num_tests > 0, f'Non-positive {num_tests=} requested.' + assert num_tests > 0, f"Non-positive {num_tests=} requested." if num_tests > len(self._test_by_index): - raise IndexError( - f'{num_tests=} exceeds {len(self._test_by_index)=}') + raise IndexError(f"{num_tests=} exceeds {len(self._test_by_index)=}") key_iter = iter(self._test_by_index.keys()) test_indexes_to_dequeue = [next(key_iter) for _ in range(num_tests)] - return [self._test_by_index.pop(test_index) - for test_index in test_indexes_to_dequeue] + return [ + self._test_by_index.pop(test_index) + for test_index in test_indexes_to_dequeue + ] class _WorksetQueue: @@ -1310,8 +1349,10 @@ def __init__(self): self._workset_by_scope: OrderedDict[str, _ScopeWorkset] = OrderedDict() def __repr__(self): - return f'<{self.__class__.__name__}: ' \ - f'num_worksets={len(self._workset_by_scope)}>' + return ( + f"<{self.__class__.__name__}: " + f"num_worksets={len(self._workset_by_scope)}>" + ) @property def empty(self) -> bool: @@ -1357,7 +1398,6 @@ def dequeue_workset(self) -> _ScopeWorkset: @raise IndexError: If queue is empty. """ if self.empty: - raise IndexError('Attempted dequeue from empty Workset Queue.') + raise IndexError("Attempted dequeue from empty Workset Queue.") - return self._workset_by_scope.pop( - next(iter(self._workset_by_scope.keys()))) + return self._workset_by_scope.pop(next(iter(self._workset_by_scope.keys()))) From 17ad842c50a66a1463b994126f7a8d36ba36311b Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Mon, 9 Sep 2024 12:33:46 -0400 Subject: [PATCH 04/52] Addressed pre-commit findings. --- .pre-commit-config.yaml | 1 + src/xdist/iso_scheduling_plugin.py | 5 ++-- src/xdist/iso_scheduling_utils.py | 3 ++- src/xdist/scheduler/isoscope.py | 42 +++++++++++------------------- 4 files changed, 20 insertions(+), 31 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 74546f0b..514b0b36 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,3 +33,4 @@ repos: - execnet>=2.1.0 - types-psutil - setproctitle + - filelock>=3.13.1 diff --git a/src/xdist/iso_scheduling_plugin.py b/src/xdist/iso_scheduling_plugin.py index 1da796ac..53f2e29b 100644 --- a/src/xdist/iso_scheduling_plugin.py +++ b/src/xdist/iso_scheduling_plugin.py @@ -57,7 +57,6 @@ if TYPE_CHECKING: from collections.abc import Callable from collections.abc import Generator - from typing import Optional _LOGGER = logging.getLogger(__name__) @@ -255,8 +254,8 @@ def __init__( self._worker_id: str = worker_id - self._setup_context: Optional[DistributedSetupContext] = None - self._teardown_context: Optional[DistributedTeardownContext] = None + self._setup_context: DistributedSetupContext | None = None + self._teardown_context: DistributedTeardownContext | None = None def maybe_call_setup( self, diff --git a/src/xdist/iso_scheduling_utils.py b/src/xdist/iso_scheduling_utils.py index 5f0a3ead..e5df8b6d 100644 --- a/src/xdist/iso_scheduling_utils.py +++ b/src/xdist/iso_scheduling_utils.py @@ -39,6 +39,7 @@ if TYPE_CHECKING: from collections.abc import Callable from collections.abc import Generator + from contextlib import AbstractContextManager import pytest @@ -61,7 +62,7 @@ class IsoSchedulingFixture(abc.ABC): @abc.abstractmethod def coordinate_setup_teardown( self, setup_request: pytest.FixtureRequest - ) -> Generator[DistributedSetupCoordinator, None, None]: + ) -> AbstractContextManager[DistributedSetupCoordinator]: """Context manager that yields an instance of `DistributedSetupCoordinator` for distributed coordination of Setup and Teardown. diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index b365415e..96a77ff3 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -61,7 +61,6 @@ from collections.abc import Iterable from collections.abc import ValuesView from typing import NoReturn - from typing import Optional from typing import Sequence import xdist.remote @@ -129,7 +128,7 @@ def __init__(self, config: pytest.Config, log: xdist.remote.Producer): # Scope ID of tests that are currently executing; `None` prior to the # initial distribution - self._active_scope_id: Optional[str] = None + self._active_scope_id: str | None = None # The initial expected number of remote workers taking part. # The actual number of workers will vary during the scheduler's @@ -143,10 +142,10 @@ def __init__(self, config: pytest.Config, log: xdist.remote.Producer): # is performed once the number of registered node collections reaches # `_expected_num_workers`. It is initialized to None and then updated # after validation succeeds. - self._official_test_collection: Optional[tuple[str]] = None + self._official_test_collection: tuple[str] | None = None # Remote worker node having `_official_test_collection` as its test # collection (for reporting failed collection validations) - self._official_test_collection_node: Optional[WorkerController] = None + self._official_test_collection_node: WorkerController | None = None # Ordered collection of Scope Worksets. Each Scope Workset is an ordered # collection of tests belonging to the given scope. Initially empty, @@ -257,7 +256,7 @@ def add_node(self, node: WorkerController) -> None: self._pending_worker_by_node[node] = _WorkerProxy(node) - def remove_node(self, node: WorkerController) -> Optional[str]: + def remove_node(self, node: WorkerController) -> str | None: """Remove a Remote Worker node from the scheduler. This should be called either when the node crashed or at node shutdown @@ -386,7 +385,7 @@ def add_node_collection( # Check if we now have enough collections to establish a final one # Get all pending workers with registered test collection - w: _WorkerProxy + # ZZZ remove line w: _WorkerProxy workers_with_collection = [ w for w in self._pending_worker_by_node.values() if w.collection is not None ] @@ -472,6 +471,7 @@ def remove_pending_tests_from_node( node: WorkerController, indices: Sequence[int], ) -> NoReturn: + """Not supported""" raise NotImplementedError() def schedule(self) -> None: @@ -868,7 +868,7 @@ def _distribute_workset( @classmethod def _generate_fence_items( cls, source_worksets: Iterable[_ScopeWorkset] - ) -> Generator[Optional[_TestProxy], None, None]: + ) -> Generator[_TestProxy | None, None, None]: """Generator that withdraws (i.e., dequeues) Fence test items from the given ordered Scope Worksets and yields them until it runs out of the fence items per limits described below, and will thereafter yield @@ -1065,16 +1065,14 @@ class _WorkerProxy: """ def __init__(self, node: WorkerController): - """ - :param node: The corresponding xdist worker node. - """ + """:param node: The corresponding xdist worker node.""" # node: node instance for communication with remote worker, # provided by pytest-xdist controller self._node: WorkerController = node # An ordered collection of test IDs collected by the remote worker. # Initially None, until assigned by the Scheduler - self._collection: Optional[tuple[str]] = None + self._collection: tuple[str] | None = None self._pending_test_by_index: OrderedDict[int, _TestProxy] = OrderedDict() @@ -1083,13 +1081,11 @@ def __repr__(self): @property def node(self) -> WorkerController: - """ - :return: The corresponding xdist worker node. - """ + """:return: The corresponding xdist worker node.""" return self._node @property - def collection(self) -> Optional[tuple[str]]: + def collection(self) -> tuple[str] | None: """ :return: An ordered collection of test IDs collected by the remote worker; `None` if the collection is not available yet. @@ -1260,9 +1256,7 @@ def scope_id(self) -> str: class _ScopeWorkset: - """ - Ordered collection of Tests for the given scope - """ + """Ordered collection of Tests for the given scope.""" __slots__ = ( "scope_id", @@ -1271,9 +1265,7 @@ class _ScopeWorkset: ) def __init__(self, scope_id: str): - """ - :param scope_id: Test Scope to which the tests in this workset belong; - """ + """:param scope_id: Test Scope to which the tests in this workset belong;""" self.scope_id = scope_id # High watermark for number of tests in the workset @@ -1294,9 +1286,7 @@ def empty(self) -> bool: @property def high_water(self) -> int: - """ - :return: High Watermark of the number of tests in the workset. - """ + """:return: High Watermark of the number of tests in the workset.""" return self._high_water @property @@ -1315,9 +1305,7 @@ def enqueue_test(self, test: _TestProxy) -> None: self._test_by_index[test.test_index] = test # Update high watermark - new_num_tests = len(self._test_by_index) - if new_num_tests > self._high_water: - self._high_water = new_num_tests + self._high_water = max(self._high_water, len(self._test_by_index)) def dequeue_tests(self, num_tests: int) -> list[_TestProxy]: """ From be0a561987171dcd7daa21eebfa4b2ce10c57c93 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 16:34:30 +0000 Subject: [PATCH 05/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/xdist/iso_scheduling_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/xdist/iso_scheduling_utils.py b/src/xdist/iso_scheduling_utils.py index e5df8b6d..889b5cac 100644 --- a/src/xdist/iso_scheduling_utils.py +++ b/src/xdist/iso_scheduling_utils.py @@ -38,7 +38,6 @@ if TYPE_CHECKING: from collections.abc import Callable - from collections.abc import Generator from contextlib import AbstractContextManager import pytest From 3689139d3b3dab2bc9b2cae93a71bd1ea4bb73e8 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Mon, 9 Sep 2024 13:19:50 -0400 Subject: [PATCH 06/52] Addressed additional pre-commit findings. --- src/xdist/iso_scheduling_plugin.py | 10 +++++----- src/xdist/scheduler/isoscope.py | 32 ++++++++++++++---------------- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/src/xdist/iso_scheduling_plugin.py b/src/xdist/iso_scheduling_plugin.py index 53f2e29b..009b2937 100644 --- a/src/xdist/iso_scheduling_plugin.py +++ b/src/xdist/iso_scheduling_plugin.py @@ -353,11 +353,11 @@ def maybe_call_teardown( teardown_callback(self._teardown_context) -def _map_file_lock_exception(f: Callable): +def _map_file_lock_exception(f: Callable): # type: ignore[no-untyped-def, type-arg] """Decorator: map `FileLock` exceptions of interest to our own exceptions.""" @functools.wraps(f) - def wrapper(*args, **kwargs): + def wrapper(*args, **kwargs): # type: ignore[no-untyped-def] try: return f(*args, **kwargs) except filelock.Timeout as err: @@ -380,11 +380,11 @@ class _DistributedSetupCoordinationImpl: class DistributedState: """State of the Distributed Setup-Teardown Coordination.""" - def __init__(self, setup_count, teardown_count): + def __init__(self, setup_count: int, teardown_count: int) -> None: self.setup_count = setup_count self.teardown_count = teardown_count - def __repr__(self): + def __repr__(self) -> str: return ( f"<{self.__class__.__qualname__}: " f"setup_count={self.setup_count}; " @@ -404,7 +404,7 @@ def load_from_file_path( return cls(**json.loads(state_file_path.read_text())) @property - def as_json_kwargs_dict(self) -> dict: + def as_json_kwargs_dict(self) -> dict[str, int]: """ :return: JSON-compatible representation of the instance that is also suitable for constructing the instance after fetching from file. diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 96a77ff3..e9178294 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -49,7 +49,6 @@ import random from typing import TYPE_CHECKING -from _pytest.runner import CollectReport import pytest from xdist.report import report_collection_diff @@ -142,7 +141,7 @@ def __init__(self, config: pytest.Config, log: xdist.remote.Producer): # is performed once the number of registered node collections reaches # `_expected_num_workers`. It is initialized to None and then updated # after validation succeeds. - self._official_test_collection: tuple[str] | None = None + self._official_test_collection: tuple[str, ...] | None = None # Remote worker node having `_official_test_collection` as its test # collection (for reporting failed collection validations) self._official_test_collection_node: WorkerController | None = None @@ -367,7 +366,7 @@ def add_node_collection( # Check that the new collection matches the official collection if self._do_two_nodes_have_same_collection( - reference_node=self._official_test_collection_node, + reference_node=self._official_test_collection_node, # type: ignore[arg-type] reference_collection=self._official_test_collection, node=node, collection=collection, @@ -528,7 +527,7 @@ def _reschedule_workers(self) -> None: """Distribute work to workers if needed at this time.""" assert self._state is not None - traversed_states = [] + traversed_states: list[IsoScopeScheduling._State] = [] previous_state = None while self._state != previous_state: # NOTE: This loop will terminate because completion of tests and @@ -815,12 +814,11 @@ def _distribute_workset( ) num_tests_remaining = workset.high_water + worker: _WorkerProxy + num_available_workers: int for worker, num_available_workers in zip( workers, range(num_workers_to_use, 0, -1) ): - worker: _WorkerProxy - num_available_workers: int - # Workers ready for distribution must have no more than one pending # test assert ( @@ -1021,7 +1019,7 @@ def _get_workers_ready_for_fencing(self, scope_id: str) -> list[_WorkerProxy]: def _do_two_nodes_have_same_collection( self, reference_node: WorkerController, - reference_collection: tuple[str], + reference_collection: tuple[str, ...], node: WorkerController, collection: tuple[str, ...], ) -> bool: @@ -1050,7 +1048,7 @@ def _do_two_nodes_have_same_collection( # NOTE: Not sure why/when `_config` would be `None`. Copied check # from the `loadscope` scheduler. - report = CollectReport(node.gateway.id, "failed", longrepr=msg, result=[]) + report = pytest.CollectReport(node.gateway.id, "failed", longrepr=msg, result=[]) self._config.hook.pytest_collectreport(report=report) return False @@ -1072,11 +1070,11 @@ def __init__(self, node: WorkerController): # An ordered collection of test IDs collected by the remote worker. # Initially None, until assigned by the Scheduler - self._collection: tuple[str] | None = None + self._collection: tuple[str, ...] | None = None self._pending_test_by_index: OrderedDict[int, _TestProxy] = OrderedDict() - def __repr__(self): + def __repr__(self) -> str: return self.verbose_repr(verbose=False) @property @@ -1085,7 +1083,7 @@ def node(self) -> WorkerController: return self._node @property - def collection(self) -> tuple[str] | None: + def collection(self) -> tuple[str, ...] | None: """ :return: An ordered collection of test IDs collected by the remote worker; `None` if the collection is not available yet. @@ -1093,7 +1091,7 @@ def collection(self) -> tuple[str] | None: return self._collection @collection.setter - def collection(self, collection: tuple[str]): + def collection(self, collection: tuple[str, ...]): """ :param collection: An ordered collection of test IDs collected by the remote worker. Must not be `None`. Also, MUST NOT be set already. @@ -1243,7 +1241,7 @@ def __init__(self, test_id: str, test_index: int): self.test_id: str = test_id self.test_index: int = test_index - def __repr__(self): + def __repr__(self) -> str: return ( f"<{self.__class__.__name__}: test_index={self.test_index} " f"scope_id={self.scope_id} test_id={self.test_id}>" @@ -1273,7 +1271,7 @@ def __init__(self, scope_id: str): self._test_by_index: OrderedDict[int, _TestProxy] = OrderedDict() - def __repr__(self): + def __repr__(self) -> str: return ( f"<{self.__class__.__name__}: scope_id={self.scope_id} " f"num_tests={self.num_tests} high_water={self.high_water}>" @@ -1333,10 +1331,10 @@ def dequeue_tests(self, num_tests: int) -> list[_TestProxy]: class _WorksetQueue: """Ordered collection of Scope Worksets grouped by scope id.""" - def __init__(self): + def __init__(self) -> None: self._workset_by_scope: OrderedDict[str, _ScopeWorkset] = OrderedDict() - def __repr__(self): + def __repr__(self) -> str: return ( f"<{self.__class__.__name__}: " f"num_worksets={len(self._workset_by_scope)}>" From 2671eec38ab35a441ef70a0818fc45690bcf8a44 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 17:21:09 +0000 Subject: [PATCH 07/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/xdist/scheduler/isoscope.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index e9178294..258e54c6 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -1048,7 +1048,9 @@ def _do_two_nodes_have_same_collection( # NOTE: Not sure why/when `_config` would be `None`. Copied check # from the `loadscope` scheduler. - report = pytest.CollectReport(node.gateway.id, "failed", longrepr=msg, result=[]) + report = pytest.CollectReport( + node.gateway.id, "failed", longrepr=msg, result=[] + ) self._config.hook.pytest_collectreport(report=report) return False From 879ff2c479eb8cace353580ef706ab14f5d781fd Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Mon, 9 Sep 2024 13:36:43 -0400 Subject: [PATCH 08/52] Addressed additional pre-commit findings. --- src/xdist/scheduler/isoscope.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 258e54c6..065371b7 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -47,6 +47,7 @@ import enum from math import ceil import random +from typing import cast from typing import TYPE_CHECKING import pytest @@ -366,8 +367,8 @@ def add_node_collection( # Check that the new collection matches the official collection if self._do_two_nodes_have_same_collection( - reference_node=self._official_test_collection_node, # type: ignore[arg-type] - reference_collection=self._official_test_collection, + reference_node=cast(WorkerController, self._official_test_collection_node), + reference_collection=cast(tuple[str, ...], self._official_test_collection), node=node, collection=collection, ): @@ -399,9 +400,9 @@ def add_node_collection( for pending_worker in workers_with_collection[1:]: if not self._do_two_nodes_have_same_collection( reference_node=reference_worker.node, - reference_collection=reference_worker.collection, + reference_collection=cast(tuple[str, ...], reference_worker.collection), node=pending_worker.node, - collection=pending_worker.collection, + collection=cast(tuple[str, ...],pending_worker.collection), ): same_collection = False @@ -427,7 +428,8 @@ def add_node_collection( # particularly slow) all_tests = [ _TestProxy(test_id=test_id, test_index=test_index) - for test_index, test_id in enumerate(self._official_test_collection) + for test_index, test_id in enumerate( + cast(tuple[str, ...], self._official_test_collection)) ] shuffled_test_collection = random.sample(all_tests, k=len(all_tests)) @@ -453,7 +455,7 @@ def mark_test_complete( if self._log.enabled: self._log( f"Marking test complete: " - f"test_id={self._official_test_collection[item_index]}; " + f"test_id={cast(tuple[str, ...], self._official_test_collection)[item_index]}; " f"{item_index=}; {worker}" ) @@ -714,7 +716,7 @@ def _handle_state_fence(self) -> None: ), f"{self._state=} is not {self._State.FENCE}" workers_to_fence = self._get_workers_ready_for_fencing( - scope_id=self._active_scope_id + scope_id=cast(str, self._active_scope_id) ) # The prior state should have ensured that there is at least one worker @@ -1093,7 +1095,7 @@ def collection(self) -> tuple[str, ...] | None: return self._collection @collection.setter - def collection(self, collection: tuple[str, ...]): + def collection(self, collection: tuple[str, ...]) -> None: """ :param collection: An ordered collection of test IDs collected by the remote worker. Must not be `None`. Also, MUST NOT be set already. From 94ae58cf5086c73e3fb76e72b5bed6118bddeb8b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 17:38:00 +0000 Subject: [PATCH 09/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/xdist/scheduler/isoscope.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 065371b7..658932d7 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -367,8 +367,12 @@ def add_node_collection( # Check that the new collection matches the official collection if self._do_two_nodes_have_same_collection( - reference_node=cast(WorkerController, self._official_test_collection_node), - reference_collection=cast(tuple[str, ...], self._official_test_collection), + reference_node=cast( + WorkerController, self._official_test_collection_node + ), + reference_collection=cast( + tuple[str, ...], self._official_test_collection + ), node=node, collection=collection, ): @@ -402,7 +406,7 @@ def add_node_collection( reference_node=reference_worker.node, reference_collection=cast(tuple[str, ...], reference_worker.collection), node=pending_worker.node, - collection=cast(tuple[str, ...],pending_worker.collection), + collection=cast(tuple[str, ...], pending_worker.collection), ): same_collection = False @@ -429,7 +433,8 @@ def add_node_collection( all_tests = [ _TestProxy(test_id=test_id, test_index=test_index) for test_index, test_id in enumerate( - cast(tuple[str, ...], self._official_test_collection)) + cast(tuple[str, ...], self._official_test_collection) + ) ] shuffled_test_collection = random.sample(all_tests, k=len(all_tests)) From 20caaeb8cc784a7a871cc8d1e152424cce6ad41a Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Mon, 9 Sep 2024 13:44:18 -0400 Subject: [PATCH 10/52] Addressed additional pre-coimmit finding. --- src/xdist/scheduler/isoscope.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 658932d7..bc516122 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -330,7 +330,9 @@ def remove_node(self, node: WorkerController) -> str | None: return crashed_test_id def add_node_collection( - self, node: WorkerController, collection: Sequence[str] + self, + node: WorkerController, + collection: Sequence[str] ) -> None: """Register the collected test items from a Remote Worker node. @@ -443,7 +445,10 @@ def add_node_collection( self._workset_queue.add_test(test) def mark_test_complete( - self, node: WorkerController, item_index: int, duration: float + self, + node: WorkerController, + item_index: int, + duration: float = 0 ) -> None: """Mark test item as completed by node and remove from pending tests in the worker and reschedule. From 77b41579f6fccb0acaf94fe872f687332144b693 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 17:45:46 +0000 Subject: [PATCH 11/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/xdist/scheduler/isoscope.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index bc516122..28eb9823 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -330,9 +330,7 @@ def remove_node(self, node: WorkerController) -> str | None: return crashed_test_id def add_node_collection( - self, - node: WorkerController, - collection: Sequence[str] + self, node: WorkerController, collection: Sequence[str] ) -> None: """Register the collected test items from a Remote Worker node. @@ -445,10 +443,7 @@ def add_node_collection( self._workset_queue.add_test(test) def mark_test_complete( - self, - node: WorkerController, - item_index: int, - duration: float = 0 + self, node: WorkerController, item_index: int, duration: float = 0 ) -> None: """Mark test item as completed by node and remove from pending tests in the worker and reschedule. From 4486a9fd853b70000397d6eb3ae0e2988ff20df8 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Mon, 9 Sep 2024 13:48:01 -0400 Subject: [PATCH 12/52] Addressed pre-coimmit finding. --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 53b482a0..915a3a30 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,5 @@ pytest-xdist 3.ZZZ.ZZZ (2024-zz-zz) -=============================== +=================================== Features -------- From 1b9f92d9f43195d0ffc0027154987b94e2a4a2eb Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Tue, 22 Oct 2024 18:02:12 -0400 Subject: [PATCH 13/52] Add isoscope parametrization to generic acceptance tests test_single_file and test_multi_file. --- testing/acceptance_test.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 3ef10cc9..f9784cf1 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1513,7 +1513,8 @@ def test_c(self): """ + ((_test_content * 4) % ("A", "B", "C", "D")) @pytest.mark.parametrize( - "scope", ["each", "load", "loadscope", "loadfile", "worksteal", "no"] + "scope", ["each", "isoscope", "load", "loadscope", "loadfile", + "worksteal", "no"] ) def test_single_file(self, pytester: pytest.Pytester, scope: str) -> None: pytester.makepyfile(test_a=self.test_file1) @@ -1521,7 +1522,8 @@ def test_single_file(self, pytester: pytest.Pytester, scope: str) -> None: result.assert_outcomes(passed=(12 if scope != "each" else 12 * 2)) @pytest.mark.parametrize( - "scope", ["each", "load", "loadscope", "loadfile", "worksteal", "no"] + "scope", ["each", "isoscope", "load", "loadscope", "loadfile", + "worksteal", "no"] ) def test_multi_file(self, pytester: pytest.Pytester, scope: str) -> None: pytester.makepyfile( From c5d49b17841f9cb8f3c05296b22f9e8268bde003 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 22:06:11 +0000 Subject: [PATCH 14/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/acceptance_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index f9784cf1..637d5e66 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1513,8 +1513,8 @@ def test_c(self): """ + ((_test_content * 4) % ("A", "B", "C", "D")) @pytest.mark.parametrize( - "scope", ["each", "isoscope", "load", "loadscope", "loadfile", - "worksteal", "no"] + "scope", + ["each", "isoscope", "load", "loadscope", "loadfile", "worksteal", "no"], ) def test_single_file(self, pytester: pytest.Pytester, scope: str) -> None: pytester.makepyfile(test_a=self.test_file1) @@ -1522,8 +1522,8 @@ def test_single_file(self, pytester: pytest.Pytester, scope: str) -> None: result.assert_outcomes(passed=(12 if scope != "each" else 12 * 2)) @pytest.mark.parametrize( - "scope", ["each", "isoscope", "load", "loadscope", "loadfile", - "worksteal", "no"] + "scope", + ["each", "isoscope", "load", "loadscope", "loadfile", "worksteal", "no"], ) def test_multi_file(self, pytester: pytest.Pytester, scope: str) -> None: pytester.makepyfile( From 9597f6fe948e5da9411a42bea00589e1696129a2 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Tue, 22 Oct 2024 18:15:04 -0400 Subject: [PATCH 15/52] Use List and Tuple from typing when specifying these types with subscripts to work around "TypeError: 'type' object is not subscriptable" in py38 --- src/xdist/scheduler/isoscope.py | 38 ++++++++++++++++----------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 28eb9823..17a34e85 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -47,7 +47,7 @@ import enum from math import ceil import random -from typing import cast +from typing import cast, List, Tuple from typing import TYPE_CHECKING import pytest @@ -142,7 +142,7 @@ def __init__(self, config: pytest.Config, log: xdist.remote.Producer): # is performed once the number of registered node collections reaches # `_expected_num_workers`. It is initialized to None and then updated # after validation succeeds. - self._official_test_collection: tuple[str, ...] | None = None + self._official_test_collection: Tuple[str, ...] | None = None # Remote worker node having `_official_test_collection` as its test # collection (for reporting failed collection validations) self._official_test_collection_node: WorkerController | None = None @@ -182,7 +182,7 @@ def __init__(self, config: pytest.Config, log: xdist.remote.Producer): ) @property - def nodes(self) -> list[WorkerController]: + def nodes(self) -> List[WorkerController]: """A new list of all active `WorkerController` nodes. Called by xdist `DSession`. @@ -371,7 +371,7 @@ def add_node_collection( WorkerController, self._official_test_collection_node ), reference_collection=cast( - tuple[str, ...], self._official_test_collection + Tuple[str, ...], self._official_test_collection ), node=node, collection=collection, @@ -404,9 +404,9 @@ def add_node_collection( for pending_worker in workers_with_collection[1:]: if not self._do_two_nodes_have_same_collection( reference_node=reference_worker.node, - reference_collection=cast(tuple[str, ...], reference_worker.collection), + reference_collection=cast(Tuple[str, ...], reference_worker.collection), node=pending_worker.node, - collection=cast(tuple[str, ...], pending_worker.collection), + collection=cast(Tuple[str, ...], pending_worker.collection), ): same_collection = False @@ -433,7 +433,7 @@ def add_node_collection( all_tests = [ _TestProxy(test_id=test_id, test_index=test_index) for test_index, test_id in enumerate( - cast(tuple[str, ...], self._official_test_collection) + cast(Tuple[str, ...], self._official_test_collection) ) ] shuffled_test_collection = random.sample(all_tests, k=len(all_tests)) @@ -460,7 +460,7 @@ def mark_test_complete( if self._log.enabled: self._log( f"Marking test complete: " - f"test_id={cast(tuple[str, ...], self._official_test_collection)[item_index]}; " + f"test_id={cast(Tuple[str, ...], self._official_test_collection)[item_index]}; " f"{item_index=}; {worker}" ) @@ -534,7 +534,7 @@ def _reschedule_workers(self) -> None: """Distribute work to workers if needed at this time.""" assert self._state is not None - traversed_states: list[IsoScopeScheduling._State] = [] + traversed_states: List[IsoScopeScheduling._State] = [] previous_state = None while self._state != previous_state: # NOTE: This loop will terminate because completion of tests and @@ -762,7 +762,7 @@ def _handle_state_fence(self) -> None: self._log(f"Transitioned from {previous_state!s} to " f"{self._state!s}") def _distribute_workset( - self, workset: _ScopeWorkset, workers: list[_WorkerProxy] + self, workset: _ScopeWorkset, workers: List[_WorkerProxy] ) -> None: """Distribute the tests in the given workset to the given workers. @@ -977,7 +977,7 @@ def _get_max_workers_for_num_tests(num_tests: int) -> int: def _get_workers_available_for_distribution( self, scope_id: str - ) -> list[_WorkerProxy]: + ) -> List[_WorkerProxy]: """Return workers available for distribution of the given Scope. Available workers are non-shutting-down workers that either @@ -1001,7 +1001,7 @@ def _get_workers_available_for_distribution( ) ] - def _get_workers_ready_for_fencing(self, scope_id: str) -> list[_WorkerProxy]: + def _get_workers_ready_for_fencing(self, scope_id: str) -> List[_WorkerProxy]: """Return workers that are ready to be Fenced for the given test Scope. A worker that needs to be Fenced satisfies all the following conditions: @@ -1026,9 +1026,9 @@ def _get_workers_ready_for_fencing(self, scope_id: str) -> list[_WorkerProxy]: def _do_two_nodes_have_same_collection( self, reference_node: WorkerController, - reference_collection: tuple[str, ...], + reference_collection: Tuple[str, ...], node: WorkerController, - collection: tuple[str, ...], + collection: Tuple[str, ...], ) -> bool: """ If collections differ, this method returns False while logging @@ -1079,7 +1079,7 @@ def __init__(self, node: WorkerController): # An ordered collection of test IDs collected by the remote worker. # Initially None, until assigned by the Scheduler - self._collection: tuple[str, ...] | None = None + self._collection: Tuple[str, ...] | None = None self._pending_test_by_index: OrderedDict[int, _TestProxy] = OrderedDict() @@ -1092,7 +1092,7 @@ def node(self) -> WorkerController: return self._node @property - def collection(self) -> tuple[str, ...] | None: + def collection(self) -> Tuple[str, ...] | None: """ :return: An ordered collection of test IDs collected by the remote worker; `None` if the collection is not available yet. @@ -1100,7 +1100,7 @@ def collection(self) -> tuple[str, ...] | None: return self._collection @collection.setter - def collection(self, collection: tuple[str, ...]) -> None: + def collection(self, collection: Tuple[str, ...]) -> None: """ :param collection: An ordered collection of test IDs collected by the remote worker. Must not be `None`. Also, MUST NOT be set already. @@ -1209,7 +1209,7 @@ def handle_test_completion(self, test_index: int) -> None: # Remove the test from the worker's pending queue self._pending_test_by_index.pop(test_index) - def release_pending_tests(self) -> list[_TestProxy]: + def release_pending_tests(self) -> List[_TestProxy]: """Reset the worker's pending tests, returning those pending tests. :return: A (possibly empty) list of pending tests. @@ -1314,7 +1314,7 @@ def enqueue_test(self, test: _TestProxy) -> None: # Update high watermark self._high_water = max(self._high_water, len(self._test_by_index)) - def dequeue_tests(self, num_tests: int) -> list[_TestProxy]: + def dequeue_tests(self, num_tests: int) -> List[_TestProxy]: """ Remove and return the given number of tests from the head of the collection. From 359938d391603d1ba1635cf0c2c17623f84b314a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 22:18:47 +0000 Subject: [PATCH 16/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/xdist/scheduler/isoscope.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 17a34e85..4b819fc0 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -47,7 +47,9 @@ import enum from math import ceil import random -from typing import cast, List, Tuple +from typing import cast +from typing import List +from typing import Tuple from typing import TYPE_CHECKING import pytest From 7a29564239d5c2e1ef7dd03b59574eee82c0c78c Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Tue, 22 Oct 2024 18:27:46 -0400 Subject: [PATCH 17/52] Address pre-commit findings concerning the use of tuple vs Tuple and List vs list in type hings, favoring list and tuple. --- src/xdist/scheduler/isoscope.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 4b819fc0..84c3764b 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -144,7 +144,7 @@ def __init__(self, config: pytest.Config, log: xdist.remote.Producer): # is performed once the number of registered node collections reaches # `_expected_num_workers`. It is initialized to None and then updated # after validation succeeds. - self._official_test_collection: Tuple[str, ...] | None = None + self._official_test_collection: tuple[str, ...] | None = None # Remote worker node having `_official_test_collection` as its test # collection (for reporting failed collection validations) self._official_test_collection_node: WorkerController | None = None @@ -184,7 +184,7 @@ def __init__(self, config: pytest.Config, log: xdist.remote.Producer): ) @property - def nodes(self) -> List[WorkerController]: + def nodes(self) -> list[WorkerController]: """A new list of all active `WorkerController` nodes. Called by xdist `DSession`. @@ -536,7 +536,7 @@ def _reschedule_workers(self) -> None: """Distribute work to workers if needed at this time.""" assert self._state is not None - traversed_states: List[IsoScopeScheduling._State] = [] + traversed_states: list[IsoScopeScheduling._State] = [] previous_state = None while self._state != previous_state: # NOTE: This loop will terminate because completion of tests and @@ -764,7 +764,7 @@ def _handle_state_fence(self) -> None: self._log(f"Transitioned from {previous_state!s} to " f"{self._state!s}") def _distribute_workset( - self, workset: _ScopeWorkset, workers: List[_WorkerProxy] + self, workset: _ScopeWorkset, workers: list[_WorkerProxy] ) -> None: """Distribute the tests in the given workset to the given workers. @@ -979,7 +979,7 @@ def _get_max_workers_for_num_tests(num_tests: int) -> int: def _get_workers_available_for_distribution( self, scope_id: str - ) -> List[_WorkerProxy]: + ) -> list[_WorkerProxy]: """Return workers available for distribution of the given Scope. Available workers are non-shutting-down workers that either @@ -1003,7 +1003,7 @@ def _get_workers_available_for_distribution( ) ] - def _get_workers_ready_for_fencing(self, scope_id: str) -> List[_WorkerProxy]: + def _get_workers_ready_for_fencing(self, scope_id: str) -> list[_WorkerProxy]: """Return workers that are ready to be Fenced for the given test Scope. A worker that needs to be Fenced satisfies all the following conditions: @@ -1028,9 +1028,9 @@ def _get_workers_ready_for_fencing(self, scope_id: str) -> List[_WorkerProxy]: def _do_two_nodes_have_same_collection( self, reference_node: WorkerController, - reference_collection: Tuple[str, ...], + reference_collection: tuple[str, ...], node: WorkerController, - collection: Tuple[str, ...], + collection: tuple[str, ...], ) -> bool: """ If collections differ, this method returns False while logging @@ -1081,7 +1081,7 @@ def __init__(self, node: WorkerController): # An ordered collection of test IDs collected by the remote worker. # Initially None, until assigned by the Scheduler - self._collection: Tuple[str, ...] | None = None + self._collection: tuple[str, ...] | None = None self._pending_test_by_index: OrderedDict[int, _TestProxy] = OrderedDict() @@ -1094,7 +1094,7 @@ def node(self) -> WorkerController: return self._node @property - def collection(self) -> Tuple[str, ...] | None: + def collection(self) -> tuple[str, ...] | None: """ :return: An ordered collection of test IDs collected by the remote worker; `None` if the collection is not available yet. @@ -1102,7 +1102,7 @@ def collection(self) -> Tuple[str, ...] | None: return self._collection @collection.setter - def collection(self, collection: Tuple[str, ...]) -> None: + def collection(self, collection: tuple[str, ...]) -> None: """ :param collection: An ordered collection of test IDs collected by the remote worker. Must not be `None`. Also, MUST NOT be set already. @@ -1211,7 +1211,7 @@ def handle_test_completion(self, test_index: int) -> None: # Remove the test from the worker's pending queue self._pending_test_by_index.pop(test_index) - def release_pending_tests(self) -> List[_TestProxy]: + def release_pending_tests(self) -> list[_TestProxy]: """Reset the worker's pending tests, returning those pending tests. :return: A (possibly empty) list of pending tests. @@ -1316,7 +1316,7 @@ def enqueue_test(self, test: _TestProxy) -> None: # Update high watermark self._high_water = max(self._high_water, len(self._test_by_index)) - def dequeue_tests(self, num_tests: int) -> List[_TestProxy]: + def dequeue_tests(self, num_tests: int) -> list[_TestProxy]: """ Remove and return the given number of tests from the head of the collection. From 017c1f665391411b84acbe72a7693e3ceed08ac1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 22:29:39 +0000 Subject: [PATCH 18/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/xdist/scheduler/isoscope.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/xdist/scheduler/isoscope.py b/src/xdist/scheduler/isoscope.py index 84c3764b..959999b7 100644 --- a/src/xdist/scheduler/isoscope.py +++ b/src/xdist/scheduler/isoscope.py @@ -48,7 +48,6 @@ from math import ceil import random from typing import cast -from typing import List from typing import Tuple from typing import TYPE_CHECKING From d8d97a12ab0cf078174fad8d0f5ac29901e8a207 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Wed, 23 Oct 2024 16:56:05 -0400 Subject: [PATCH 19/52] Implemented acceptance test class TestIsoScope baesd on TestLoadScope. --- testing/acceptance_test.py | 66 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 637d5e66..7d1e33b5 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1198,6 +1198,72 @@ def pytest_collection_modifyitems(): result.stdout.fnmatch_lines(["*RuntimeError: Some runtime error*"]) +class TestIsoScope: + def test_by_module(self, pytester: pytest.Pytester) -> None: + test_file = """ + import pytest + @pytest.mark.parametrize('i', range(10)) + def test(i): + pass + """ + pytester.makepyfile(test_a=test_file, test_b=test_file) + result = pytester.runpytest("-n2", "--dist=isoscope", "-v") + assert get_workers_and_test_count_by_prefix( + "test_a.py::test", result.outlines + ) in ({"gw0": 10}, {"gw1": 10}) + assert get_workers_and_test_count_by_prefix( + "test_b.py::test", result.outlines + ) in ({"gw0": 10}, {"gw1": 10}) + + def test_by_class(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + test_a=""" + import pytest + class TestA: + @pytest.mark.parametrize('i', range(10)) + def test(self, i): + pass + + class TestB: + @pytest.mark.parametrize('i', range(10)) + def test(self, i): + pass + """ + ) + result = pytester.runpytest("-n2", "--dist=isoscope", "-v") + assert get_workers_and_test_count_by_prefix( + "test_a.py::TestA", result.outlines + ) in ({"gw0": 10}, {"gw1": 10}) + assert get_workers_and_test_count_by_prefix( + "test_a.py::TestB", result.outlines + ) in ({"gw0": 10}, {"gw1": 10}) + + def test_module_single_start(self, pytester: pytest.Pytester) -> None: + """Ensure test suite finishing in case all workers start with a single test (#277).""" + test_file1 = """ + import pytest + def test(): + pass + """ + test_file2 = """ + import pytest + def test_1(): + pass + def test_2(): + pass + """ + pytester.makepyfile(test_a=test_file1, test_b=test_file1, test_c=test_file2) + result = pytester.runpytest("-n2", "--dist=isoscope", "-v") + a = get_workers_and_test_count_by_prefix("test_a.py::test", result.outlines) + b = get_workers_and_test_count_by_prefix("test_b.py::test", result.outlines) + c1 = get_workers_and_test_count_by_prefix("test_c.py::test_1", result.outlines) + c2 = get_workers_and_test_count_by_prefix("test_c.py::test_2", result.outlines) + assert a in ({"gw0": 1}, {"gw1": 1}) + assert b in ({"gw0": 1}, {"gw1": 1}) + assert a.items() != b.items() + assert c1 == c2 + + class TestLoadScope: def test_by_module(self, pytester: pytest.Pytester) -> None: test_file = """ From 95a16a6e8df45c779e31d984dd3384dc5fe496ab Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Wed, 23 Oct 2024 17:31:41 -0400 Subject: [PATCH 20/52] Fix expected counts in TestIsoScope test_by_module, test_by_class, and test_module_single_start. --- testing/acceptance_test.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 7d1e33b5..71c8e3f8 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1210,10 +1210,10 @@ def test(i): result = pytester.runpytest("-n2", "--dist=isoscope", "-v") assert get_workers_and_test_count_by_prefix( "test_a.py::test", result.outlines - ) in ({"gw0": 10}, {"gw1": 10}) + ) == {"gw0": 5, "gw1": 5} assert get_workers_and_test_count_by_prefix( "test_b.py::test", result.outlines - ) in ({"gw0": 10}, {"gw1": 10}) + ) == {"gw0": 5, "gw1": 5} def test_by_class(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( @@ -1233,13 +1233,13 @@ def test(self, i): result = pytester.runpytest("-n2", "--dist=isoscope", "-v") assert get_workers_and_test_count_by_prefix( "test_a.py::TestA", result.outlines - ) in ({"gw0": 10}, {"gw1": 10}) + ) == {"gw0": 5, "gw1": 5} assert get_workers_and_test_count_by_prefix( "test_a.py::TestB", result.outlines - ) in ({"gw0": 10}, {"gw1": 10}) + ) == {"gw0": 5, "gw1": 5} def test_module_single_start(self, pytester: pytest.Pytester) -> None: - """Ensure test suite finishing in case all workers start with a single test (#277).""" + """Ensure test suite is finishing in case all workers start with a single test (#277).""" test_file1 = """ import pytest def test(): @@ -1260,7 +1260,7 @@ def test_2(): c2 = get_workers_and_test_count_by_prefix("test_c.py::test_2", result.outlines) assert a in ({"gw0": 1}, {"gw1": 1}) assert b in ({"gw0": 1}, {"gw1": 1}) - assert a.items() != b.items() + assert a.items() == b.items() assert c1 == c2 From 61f5fadb513004c5c52a72c2941528229e972065 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Wed, 23 Oct 2024 18:17:27 -0400 Subject: [PATCH 21/52] Implemented acceptance test TestIsoScope.test_single_scope_all_workers_utilized --- testing/acceptance_test.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 71c8e3f8..67f89684 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1263,6 +1263,26 @@ def test_2(): assert a.items() == b.items() assert c1 == c2 + def test_single_scope_all_workers_utilized(self, pytester: pytest.Pytester) -> None: + """ + With single scope, there are no fence tests from another scope, so + this scheduler resorts to shutting down the workers in order to execute + the final tests in each worker. isoscope allocates at least two tests + per worker from the active scope, unless the scope has only one test. + """ + test_file = """ + import pytest + @pytest.mark.parametrize('i', range(5)) + def test(i): + pass + """ + pytester.makepyfile(test_a=test_file) + result = pytester.runpytest("-n2", "--dist=isoscope", "-v") + counts_by_worker = get_workers_and_test_count_by_prefix("test_a.py::test", result.outlines) + assert counts_by_worker["gw0"] in (2, 3) + assert counts_by_worker["gw1"] in (2, 3) + assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == 5 + class TestLoadScope: def test_by_module(self, pytester: pytest.Pytester) -> None: From edc3bd3ae1b959466e18e6e271500102263d199e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 22:23:37 +0000 Subject: [PATCH 22/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/acceptance_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 67f89684..80d6800c 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1278,7 +1278,9 @@ def test(i): """ pytester.makepyfile(test_a=test_file) result = pytester.runpytest("-n2", "--dist=isoscope", "-v") - counts_by_worker = get_workers_and_test_count_by_prefix("test_a.py::test", result.outlines) + counts_by_worker = get_workers_and_test_count_by_prefix( + "test_a.py::test", result.outlines + ) assert counts_by_worker["gw0"] in (2, 3) assert counts_by_worker["gw1"] in (2, 3) assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == 5 From db311edc94cc4e3ae0232101d1b9bb0830ef78b8 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Wed, 23 Oct 2024 18:32:49 -0400 Subject: [PATCH 23/52] Implemented acceptance test TestIsoScope.test_single_scope_subset_of_workers_utilized. --- testing/acceptance_test.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 80d6800c..a139024f 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1285,6 +1285,29 @@ def test(i): assert counts_by_worker["gw1"] in (2, 3) assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == 5 + @pytest.mark.parametrize('num_tests', [1, 2, 3]) + def test_single_scope_subset_of_workers_utilized(self, + num_tests: int, + pytester: pytest.Pytester) -> None: + """ + With single scope, there are no fence tests from another scope, so + this scheduler resorts to shutting down the workers in order to execute + the final tests in each worker. isoscope allocates at least two tests + per worker from the active scope, unless the scope has only one test. + """ + test_file = f""" + import pytest + @pytest.mark.parametrize('i', range({num_tests})) + def test(i): + pass + """ + pytester.makepyfile(test_a=test_file) + result = pytester.runpytest("-n2", "--dist=isoscope", "-v") + counts_by_worker = get_workers_and_test_count_by_prefix("test_a.py::test", result.outlines) + assert counts_by_worker["gw0"] in (0, num_tests) + assert counts_by_worker["gw1"] in (0, num_tests) + assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == num_tests + class TestLoadScope: def test_by_module(self, pytester: pytest.Pytester) -> None: From 6bb3ef268a8ed2c4d56ee909bc9e872273e0fd3d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 22:34:21 +0000 Subject: [PATCH 24/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/acceptance_test.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index a139024f..c47014ba 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1285,10 +1285,10 @@ def test(i): assert counts_by_worker["gw1"] in (2, 3) assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == 5 - @pytest.mark.parametrize('num_tests', [1, 2, 3]) - def test_single_scope_subset_of_workers_utilized(self, - num_tests: int, - pytester: pytest.Pytester) -> None: + @pytest.mark.parametrize("num_tests", [1, 2, 3]) + def test_single_scope_subset_of_workers_utilized( + self, num_tests: int, pytester: pytest.Pytester + ) -> None: """ With single scope, there are no fence tests from another scope, so this scheduler resorts to shutting down the workers in order to execute @@ -1303,7 +1303,9 @@ def test(i): """ pytester.makepyfile(test_a=test_file) result = pytester.runpytest("-n2", "--dist=isoscope", "-v") - counts_by_worker = get_workers_and_test_count_by_prefix("test_a.py::test", result.outlines) + counts_by_worker = get_workers_and_test_count_by_prefix( + "test_a.py::test", result.outlines + ) assert counts_by_worker["gw0"] in (0, num_tests) assert counts_by_worker["gw1"] in (0, num_tests) assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == num_tests From 7f35d75cb19dbb1f93a76d78a4a085a5967b1272 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Wed, 23 Oct 2024 18:44:27 -0400 Subject: [PATCH 25/52] Fixed worker name key error for unutilized worker in acceptance test TestIsoScope.test_single_scope_subset_of_workers_utilized. --- testing/acceptance_test.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index c47014ba..fd98246d 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1306,9 +1306,13 @@ def test(i): counts_by_worker = get_workers_and_test_count_by_prefix( "test_a.py::test", result.outlines ) - assert counts_by_worker["gw0"] in (0, num_tests) - assert counts_by_worker["gw1"] in (0, num_tests) - assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == num_tests + assert counts_by_worker.setdefault("gw0", None) in (None, num_tests) + assert counts_by_worker.setdefault("gw1", None) in (None, num_tests) + assert counts_by_worker.setdefault( + "gw0", 0 + ) + counts_by_worker.setdefault( + "gw1", 0 + ) == num_tests class TestLoadScope: From eebc3f4ead4213de287bded185172451f9a41551 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 22:45:11 +0000 Subject: [PATCH 26/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/acceptance_test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index fd98246d..6d9a536e 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1308,11 +1308,11 @@ def test(i): ) assert counts_by_worker.setdefault("gw0", None) in (None, num_tests) assert counts_by_worker.setdefault("gw1", None) in (None, num_tests) - assert counts_by_worker.setdefault( - "gw0", 0 - ) + counts_by_worker.setdefault( - "gw1", 0 - ) == num_tests + assert ( + counts_by_worker.setdefault("gw0", 0) + + counts_by_worker.setdefault("gw1", 0) + == num_tests + ) class TestLoadScope: From 4695e435c9d9f741725e2054e66f7984d5f38e20 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Wed, 23 Oct 2024 18:51:14 -0400 Subject: [PATCH 27/52] Fixed default test counts in acceptance test TestIsoScope.test_single_scope_subset_of_workers_utilized. --- testing/acceptance_test.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 6d9a536e..f5269eda 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1306,13 +1306,9 @@ def test(i): counts_by_worker = get_workers_and_test_count_by_prefix( "test_a.py::test", result.outlines ) - assert counts_by_worker.setdefault("gw0", None) in (None, num_tests) - assert counts_by_worker.setdefault("gw1", None) in (None, num_tests) - assert ( - counts_by_worker.setdefault("gw0", 0) - + counts_by_worker.setdefault("gw1", 0) - == num_tests - ) + assert counts_by_worker.setdefault("gw0", 0) in (0, num_tests) + assert counts_by_worker.setdefault("gw1", 0) in (0, num_tests) + assert counts_by_worker["gw0"] + counts_by_worker.setdefault["gw1"] == num_tests class TestLoadScope: From a4398b2da94adf21d1acfaaa7e34793e894a58f3 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Wed, 23 Oct 2024 18:56:38 -0400 Subject: [PATCH 28/52] Removed extraneous setdefault in acceptance test TestIsoScope.test_single_scope_subset_of_workers_utilized. --- testing/acceptance_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index f5269eda..f003591a 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1308,7 +1308,7 @@ def test(i): ) assert counts_by_worker.setdefault("gw0", 0) in (0, num_tests) assert counts_by_worker.setdefault("gw1", 0) in (0, num_tests) - assert counts_by_worker["gw0"] + counts_by_worker.setdefault["gw1"] == num_tests + assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == num_tests class TestLoadScope: From 47fcf7a21ba9b8ae387c685036b8f6a8bcabf4d8 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Thu, 24 Oct 2024 13:02:43 -0400 Subject: [PATCH 29/52] Implemented acceptance test TestIsoScope.test_multi_scope_with_insufficient_fence. --- testing/acceptance_test.py | 57 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index f003591a..c1dd6ab9 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1310,6 +1310,63 @@ def test(i): assert counts_by_worker.setdefault("gw1", 0) in (0, num_tests) assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == num_tests + def test_multi_scope_with_insufficient_fence( + self, pytester: pytest.Pytester + ) -> None: + """ + When there are not enough fence tests from subsequent scope(s), + this scheduler resorts to shutting down the excess workers in order to + execute the final tests in each worker. isoscope allocates at least two + tests per worker from the active scope, unless the scope has only one + test. + """ + test_file1 = f""" + import pytest + # 6 tests should distribute 2 per worker for 3 workers due to the + # min-2 scope tests per worker rule. + @pytest.mark.parametrize('i', range(6)) + def test(i): + pass + """ + test_file2 = f""" + import pytest + class FenceA: + def test(self): + pass + + class FenceB: + # Two tests are only enough for one fence due to min-2 scope + # tests per worker rule + def test1(self): + pass + def test1(self): + pass + """ + pytester.makepyfile(test_a=test_file1, fence_tests=test_file2) + result = pytester.runpytest("-n3", "--dist=isoscope", "-v") + + counts_by_worker_a = get_workers_and_test_count_by_prefix( + "test_a.py::test", result.outlines + ) + # 6 tests should distribute 2 per worker for 3 workers due to the + # min-2 scope tests per worker rule. + assert sum(counts_by_worker_a.values()) == 6 + for worker in ['gw0', 'gw1', 'gw2']: + assert counts_by_worker_a[worker] == 2 + + counts_by_worker_fence_a = get_workers_and_test_count_by_prefix( + "fence_tests.py::FenceA", result.outlines + ) + counts_by_worker_fence_b = get_workers_and_test_count_by_prefix( + "fence_tests.py::FenceB", result.outlines + ) + + assert len(counts_by_worker_fence_a) == 1 + assert list(counts_by_worker_fence_a.values())[0] == 1 + + assert len(counts_by_worker_fence_b) == 1 + assert list(counts_by_worker_fence_b.values())[0] == 2 + class TestLoadScope: def test_by_module(self, pytester: pytest.Pytester) -> None: From 680d855572f8ff0a7b087548afb682813d6a39e1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 24 Oct 2024 17:04:09 +0000 Subject: [PATCH 30/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/acceptance_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index c1dd6ab9..d09abd0f 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1311,7 +1311,7 @@ def test(i): assert counts_by_worker["gw0"] + counts_by_worker["gw1"] == num_tests def test_multi_scope_with_insufficient_fence( - self, pytester: pytest.Pytester + self, pytester: pytest.Pytester ) -> None: """ When there are not enough fence tests from subsequent scope(s), @@ -1320,7 +1320,7 @@ def test_multi_scope_with_insufficient_fence( tests per worker from the active scope, unless the scope has only one test. """ - test_file1 = f""" + test_file1 = """ import pytest # 6 tests should distribute 2 per worker for 3 workers due to the # min-2 scope tests per worker rule. @@ -1328,7 +1328,7 @@ def test_multi_scope_with_insufficient_fence( def test(i): pass """ - test_file2 = f""" + test_file2 = """ import pytest class FenceA: def test(self): @@ -1351,7 +1351,7 @@ def test1(self): # 6 tests should distribute 2 per worker for 3 workers due to the # min-2 scope tests per worker rule. assert sum(counts_by_worker_a.values()) == 6 - for worker in ['gw0', 'gw1', 'gw2']: + for worker in ["gw0", "gw1", "gw2"]: assert counts_by_worker_a[worker] == 2 counts_by_worker_fence_a = get_workers_and_test_count_by_prefix( From d2bff973bc30f13cf8c54da98e5b7608589712f4 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Thu, 24 Oct 2024 13:11:58 -0400 Subject: [PATCH 31/52] Fixed test discoverability and pre-commit findings in acceptance test TestIsoScope.test_multi_scope_with_insufficient_fence. --- testing/acceptance_test.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index d09abd0f..51e5f07b 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1330,16 +1330,16 @@ def test(i): """ test_file2 = """ import pytest - class FenceA: + class TestFenceA: def test(self): pass - class FenceB: - # Two tests are only enough for one fence due to min-2 scope + class TestFenceB: + # Two tests are only enough for one fence due to min-2 scope # tests per worker rule def test1(self): pass - def test1(self): + def test2(self): pass """ pytester.makepyfile(test_a=test_file1, fence_tests=test_file2) @@ -1362,10 +1362,10 @@ def test1(self): ) assert len(counts_by_worker_fence_a) == 1 - assert list(counts_by_worker_fence_a.values())[0] == 1 + assert next(iter(counts_by_worker_fence_a.values())) == 1 assert len(counts_by_worker_fence_b) == 1 - assert list(counts_by_worker_fence_b.values())[0] == 2 + assert next(iter(counts_by_worker_fence_b.values())) == 2 class TestLoadScope: From 0e00ed90b73cbe374f510b2b093fa968038b9854 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Thu, 24 Oct 2024 13:18:39 -0400 Subject: [PATCH 32/52] Fixed TestFenceA/B search patterin in acceptance test TestIsoScope.test_multi_scope_with_insufficient_fence. --- testing/acceptance_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 51e5f07b..5acb6d70 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1355,10 +1355,10 @@ def test2(self): assert counts_by_worker_a[worker] == 2 counts_by_worker_fence_a = get_workers_and_test_count_by_prefix( - "fence_tests.py::FenceA", result.outlines + "fence_tests.py::TestFenceA", result.outlines ) counts_by_worker_fence_b = get_workers_and_test_count_by_prefix( - "fence_tests.py::FenceB", result.outlines + "fence_tests.py::TestFenceB", result.outlines ) assert len(counts_by_worker_fence_a) == 1 From fa9776234ab1b04132e46068bbcfa9a254e563fe Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Thu, 24 Oct 2024 13:22:54 -0400 Subject: [PATCH 33/52] Fix test_fence_scopes pytest filename in acceptance test TestIsoScope.test_multi_scope_with_insufficient_fence. --- testing/acceptance_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 5acb6d70..cde5b2aa 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1342,7 +1342,7 @@ def test1(self): def test2(self): pass """ - pytester.makepyfile(test_a=test_file1, fence_tests=test_file2) + pytester.makepyfile(test_a=test_file1, test_fence_scopes=test_file2) result = pytester.runpytest("-n3", "--dist=isoscope", "-v") counts_by_worker_a = get_workers_and_test_count_by_prefix( @@ -1355,10 +1355,10 @@ def test2(self): assert counts_by_worker_a[worker] == 2 counts_by_worker_fence_a = get_workers_and_test_count_by_prefix( - "fence_tests.py::TestFenceA", result.outlines + "test_fence_scopes.py::TestFenceA", result.outlines ) counts_by_worker_fence_b = get_workers_and_test_count_by_prefix( - "fence_tests.py::TestFenceB", result.outlines + "test_fence_scopes.py::TestFenceB", result.outlines ) assert len(counts_by_worker_fence_a) == 1 From 3b7f2de448dddffb408e117c93131b3dbc1d7180 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Thu, 24 Oct 2024 14:19:03 -0400 Subject: [PATCH 34/52] Implemented acceptance test TestIsoScope.test_two_tests_min_per_worker_rule_with_two_workers. --- testing/acceptance_test.py | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index cde5b2aa..cd09b491 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1367,6 +1367,52 @@ def test2(self): assert len(counts_by_worker_fence_b) == 1 assert next(iter(counts_by_worker_fence_b.values())) == 2 + @pytest.mark.parametrize('num_tests', [1, 2, 3, 4, 5, 7]) + def test_two_tests_min_per_worker_rule_with_two_workers( + self, num_tests: int, pytester: pytest.Pytester + ) -> None: + """ + isoscope allocates at least two tests per worker from the active scope, + unless the scope has only one test. + """ + test_file1 = f""" + import pytest + # 6 tests should distribute 2 per worker for 3 workers due to the + # min-2 scope tests per worker rule. + @pytest.mark.parametrize('i', range({num_tests})) + def test(i): + pass + """ + pytester.makepyfile(test_a=test_file1) + result = pytester.runpytest("-n2", "--dist=isoscope", "-v") + + if num_tests == 1: + expected_worker_a_test_count = 1 + elif num_tests == 2: + expected_worker_a_test_count = 2 + elif num_tests == 3: + expected_worker_a_test_count = 3 + elif num_tests == 4: + expected_worker_a_test_count = 2 + elif num_tests == 5: + expected_worker_a_test_count = 3 + elif num_tests == 7: + expected_worker_a_test_count = 4 + else: + assert False, f"Unexpected {num_tests=}" + + counts_by_worker_a = get_workers_and_test_count_by_prefix( + "test_a.py::test", result.outlines + ) + + counts_by_worker_a.setdefault("gw0", 0) + counts_by_worker_a.setdefault("gw1", 0) + + assert set(counts_by_worker_a.values()) == { + expected_worker_a_test_count, + num_tests - expected_worker_a_test_count + } + class TestLoadScope: def test_by_module(self, pytester: pytest.Pytester) -> None: From da65e86fe9a93d8bd8262012a8f4423e8b33e17d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:19:45 +0000 Subject: [PATCH 35/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/acceptance_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index cd09b491..e1dda9ee 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1367,7 +1367,7 @@ def test2(self): assert len(counts_by_worker_fence_b) == 1 assert next(iter(counts_by_worker_fence_b.values())) == 2 - @pytest.mark.parametrize('num_tests', [1, 2, 3, 4, 5, 7]) + @pytest.mark.parametrize("num_tests", [1, 2, 3, 4, 5, 7]) def test_two_tests_min_per_worker_rule_with_two_workers( self, num_tests: int, pytester: pytest.Pytester ) -> None: @@ -1410,7 +1410,7 @@ def test(i): assert set(counts_by_worker_a.values()) == { expected_worker_a_test_count, - num_tests - expected_worker_a_test_count + num_tests - expected_worker_a_test_count, } From ad0f4af204ebe9f209071ee5e548c5b149892e9b Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 14:22:56 -0400 Subject: [PATCH 36/52] Work in progress on acceptance test TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 74 +++++++++++++++++++++++++++++++++++--- 1 file changed, 70 insertions(+), 4 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index e1dda9ee..381b43e6 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1,5 +1,6 @@ from __future__ import annotations +import pprint import os import re import shutil @@ -1199,6 +1200,73 @@ def pytest_collection_modifyitems(): class TestIsoScope: + def test_distributed_setup_teardown_coordination( + self, pytester: pytest.Pytester + ) -> None: + """ + The isoscope scheduler provides a distributed coordination mechanism + for Scope-level Resource Setup/Teardown with the following guarantees: + 1. Resource Setup is performed exactly once per test Scope. + 2. Resource Teardown is performed exactly once per test Scope. + 3. Resource Setup of the executing test Scope completes BEFORE + execution of the Scope's tests. + 4. Resource Teardown phase of the executing test Scope begins after + completion of all tests of the Scope. + 5. Resource Setup of the next test Scope begins after completion of + the previous test Scope's Resource Teardown. + """ + test_file = """ + import pathlib + from uuid import uuid1 + import pytest + class TestScopeA: + @classmethod + @pytest.fixture(scope='class', autouse=True) + def distributed_setup_and_teardown( + cls, + iso_scheduling: + request: pytest.FixtureRequest): + with iso_scheduling.coordinate_setup_teardown( + setup_request=request) as coordinator: + # Distributed Setup + coordinator.maybe_call_setup(cls.patch_system_under_test) + try: + # Yield control back to the XDist Worker to allow the + # test cases to run + yield + finally: + # Distributed Teardown + coordinator.maybe_call_teardown(cls.revert_system_under_test) + @classmethod + def patch_system_under_test( + cls, + setup_context: DistributedSetupContext) -> None: + # Initialize the System Under Test for all the test cases in + # this test class and store state in `setup_context.client_dir`. + + @classmethod + def revert_system_under_test( + cls, + teardown_context: DistributedTeardownContext) + # Fetch state from `teardown_context.client_dir` and revert + # changes made by `patch_system_under_test()`. + + @pytest.mark.parametrize('i', range(5)) + def test(self, i): + pass + + class TestScopeB(TestScopeA): + pass + """ + pytester.makepyfile(test_a=test_file, test_b=test_file) + result = pytester.runpytest("-n2", "--dist=isoscope", "-v") + + print("ZZZ outlines") + print(pprint.pprint(result.outlines, indent=4)) + print("ZZZ errlines") + print(pprint.pprint(result.errlines, indent=4)) + assert False + def test_by_module(self, pytester: pytest.Pytester) -> None: test_file = """ import pytest @@ -1367,8 +1435,8 @@ def test2(self): assert len(counts_by_worker_fence_b) == 1 assert next(iter(counts_by_worker_fence_b.values())) == 2 - @pytest.mark.parametrize("num_tests", [1, 2, 3, 4, 5, 7]) - def test_two_tests_min_per_worker_rule_with_two_workers( + @pytest.mark.parametrize('num_tests', [1, 2, 3, 4, 5, 7]) + def test_two_tests_min_per_worker_rule( self, num_tests: int, pytester: pytest.Pytester ) -> None: """ @@ -1377,8 +1445,6 @@ def test_two_tests_min_per_worker_rule_with_two_workers( """ test_file1 = f""" import pytest - # 6 tests should distribute 2 per worker for 3 workers due to the - # min-2 scope tests per worker rule. @pytest.mark.parametrize('i', range({num_tests})) def test(i): pass From 4658a1c40d86019fed490e59324411dc65475a84 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 18:29:02 +0000 Subject: [PATCH 37/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/acceptance_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 381b43e6..a24cf938 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1,7 +1,7 @@ from __future__ import annotations -import pprint import os +import pprint import re import shutil from typing import cast @@ -1435,7 +1435,7 @@ def test2(self): assert len(counts_by_worker_fence_b) == 1 assert next(iter(counts_by_worker_fence_b.values())) == 2 - @pytest.mark.parametrize('num_tests', [1, 2, 3, 4, 5, 7]) + @pytest.mark.parametrize("num_tests", [1, 2, 3, 4, 5, 7]) def test_two_tests_min_per_worker_rule( self, num_tests: int, pytester: pytest.Pytester ) -> None: From 97fe2e69e4f34f1aceab8b51d9c64222951cf0f2 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 14:31:58 -0400 Subject: [PATCH 38/52] Work in progress on acceptance test TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index a24cf938..08d7d232 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1218,13 +1218,20 @@ def test_distributed_setup_teardown_coordination( test_file = """ import pathlib from uuid import uuid1 + from typing import TYPE_CHECKING import pytest + if TYPE_CHECKING: + from xdist.iso_scheduling_utils import ( + IsoSchedulingFixture, + DistributedSetupContext, + DistributedTeardownContext + ) class TestScopeA: @classmethod @pytest.fixture(scope='class', autouse=True) def distributed_setup_and_teardown( cls, - iso_scheduling: + iso_scheduling: IsoSchedulingFixture request: pytest.FixtureRequest): with iso_scheduling.coordinate_setup_teardown( setup_request=request) as coordinator: From 6aa321df9057b78c8a34d45e02d95ba8c6942c86 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 14:35:51 -0400 Subject: [PATCH 39/52] Work in progress on acceptance test TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 08d7d232..495f0d37 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1231,7 +1231,7 @@ class TestScopeA: @pytest.fixture(scope='class', autouse=True) def distributed_setup_and_teardown( cls, - iso_scheduling: IsoSchedulingFixture + iso_scheduling: IsoSchedulingFixture, request: pytest.FixtureRequest): with iso_scheduling.coordinate_setup_teardown( setup_request=request) as coordinator: From d11066866e9ea81522e53c70280ea56bd0ee61b3 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 14:45:34 -0400 Subject: [PATCH 40/52] Work in progress on acceptance test TestIsoScope.test_distributed_setup_teardown_coordination. --- src/xdist/iso_scheduling_plugin.py | 2 +- src/xdist/iso_scheduling_utils.py | 2 +- testing/acceptance_test.py | 6 ++++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/xdist/iso_scheduling_plugin.py b/src/xdist/iso_scheduling_plugin.py index 009b2937..9984378d 100644 --- a/src/xdist/iso_scheduling_plugin.py +++ b/src/xdist/iso_scheduling_plugin.py @@ -127,7 +127,7 @@ def patch_system_under_test( @classmethod def revert_system_under_test( cls, - teardown_context: DistributedTeardownContext) + teardown_context: DistributedTeardownContext): # Fetch state from `teardown_context.client_dir` and revert # changes made by `patch_system_under_test()`. diff --git a/src/xdist/iso_scheduling_utils.py b/src/xdist/iso_scheduling_utils.py index 889b5cac..bf44cc90 100644 --- a/src/xdist/iso_scheduling_utils.py +++ b/src/xdist/iso_scheduling_utils.py @@ -137,7 +137,7 @@ def patch_system_under_test( @classmethod def revert_system_under_test( cls, - teardown_context: DistributedTeardownContext) + teardown_context: DistributedTeardownContext): # Fetch state from `teardown_context.client_dir` and revert # changes made by `patch_system_under_test()`. diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 495f0d37..8f0174c8 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1250,13 +1250,15 @@ def patch_system_under_test( setup_context: DistributedSetupContext) -> None: # Initialize the System Under Test for all the test cases in # this test class and store state in `setup_context.client_dir`. - + pass + @classmethod def revert_system_under_test( cls, - teardown_context: DistributedTeardownContext) + teardown_context: DistributedTeardownContext): # Fetch state from `teardown_context.client_dir` and revert # changes made by `patch_system_under_test()`. + pass @pytest.mark.parametrize('i', range(5)) def test(self, i): From 46f33a6f363cfea1ca5bc554cb8f2da6d893c20d Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 14:47:10 -0400 Subject: [PATCH 41/52] Work in progress on acceptance test TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 8f0174c8..ea262a7a 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1271,9 +1271,9 @@ class TestScopeB(TestScopeA): result = pytester.runpytest("-n2", "--dist=isoscope", "-v") print("ZZZ outlines") - print(pprint.pprint(result.outlines, indent=4)) + pprint.pprint(result.outlines, indent=4) print("ZZZ errlines") - print(pprint.pprint(result.errlines, indent=4)) + pprint.pprint(result.errlines, indent=4) assert False def test_by_module(self, pytester: pytest.Pytester) -> None: From 4711108db17bc15f8033acc70475c77bf380561c Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 14:52:06 -0400 Subject: [PATCH 42/52] Work in progress on acceptance test TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index ea262a7a..0c125b27 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1216,6 +1216,7 @@ def test_distributed_setup_teardown_coordination( the previous test Scope's Resource Teardown. """ test_file = """ + from __future__ import annotations import pathlib from uuid import uuid1 from typing import TYPE_CHECKING From 5f988d9c36bfaa21a692a8afa5a60682c4713e92 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 16:23:24 -0400 Subject: [PATCH 43/52] Work in progress on acceptance test TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 125 ++++++++++++++++++++++--------------- 1 file changed, 73 insertions(+), 52 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 0c125b27..31df71a1 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1,10 +1,12 @@ from __future__ import annotations import os +import pathlib import pprint import re import shutil from typing import cast +from uuid import uuid1 import pytest @@ -1199,9 +1201,23 @@ def pytest_collection_modifyitems(): result.stdout.fnmatch_lines(["*RuntimeError: Some runtime error*"]) +@pytest.fixture(scope="session") +def shared_scope_setup_status_path( + tmp_path_factory: pytest.TempPathFactory, testrun_uid: str +) -> pathlib.Path: + return ( + tmp_path_factory.getbasetemp().parent + / "test_distributed_setup_teardown_coordination" + / testrun_uid + / uuid1().hex + / "scope_setup_status.txt" + ) + + class TestIsoScope: def test_distributed_setup_teardown_coordination( - self, pytester: pytest.Pytester + self, pytester: pytest.Pytester, + shared_scope_setup_status_path: pathlib.Path ) -> None: """ The isoscope scheduler provides a distributed coordination mechanism @@ -1215,58 +1231,63 @@ def test_distributed_setup_teardown_coordination( 5. Resource Setup of the next test Scope begins after completion of the previous test Scope's Resource Teardown. """ - test_file = """ - from __future__ import annotations - import pathlib - from uuid import uuid1 - from typing import TYPE_CHECKING - import pytest - if TYPE_CHECKING: - from xdist.iso_scheduling_utils import ( - IsoSchedulingFixture, - DistributedSetupContext, - DistributedTeardownContext - ) - class TestScopeA: - @classmethod - @pytest.fixture(scope='class', autouse=True) - def distributed_setup_and_teardown( - cls, - iso_scheduling: IsoSchedulingFixture, - request: pytest.FixtureRequest): - with iso_scheduling.coordinate_setup_teardown( - setup_request=request) as coordinator: - # Distributed Setup - coordinator.maybe_call_setup(cls.patch_system_under_test) - try: - # Yield control back to the XDist Worker to allow the - # test cases to run - yield - finally: - # Distributed Teardown - coordinator.maybe_call_teardown(cls.revert_system_under_test) - @classmethod - def patch_system_under_test( - cls, - setup_context: DistributedSetupContext) -> None: - # Initialize the System Under Test for all the test cases in - # this test class and store state in `setup_context.client_dir`. - pass - - @classmethod - def revert_system_under_test( - cls, - teardown_context: DistributedTeardownContext): - # Fetch state from `teardown_context.client_dir` and revert - # changes made by `patch_system_under_test()`. - pass - - @pytest.mark.parametrize('i', range(5)) - def test(self, i): - pass + test_file = f""" + from __future__ import annotations + import pathlib + from typing import TYPE_CHECKING + import pytest + if TYPE_CHECKING: + from xdist.iso_scheduling_utils import ( + IsoSchedulingFixture, + DistributedSetupContext, + DistributedTeardownContext + ) - class TestScopeB(TestScopeA): - pass + _SHARED_SCOPE_SETUP_STATUS_PATH = pathlib.Path( + {str(shared_scope_setup_status_path)}) + + class TestScopeA: + @classmethod + @pytest.fixture(scope='class', autouse=True) + def distributed_setup_and_teardown( + cls, + iso_scheduling: IsoSchedulingFixture, + request: pytest.FixtureRequest): + with iso_scheduling.coordinate_setup_teardown( + setup_request=request) as coordinator: + # Distributed Setup + coordinator.maybe_call_setup(cls.patch_system_under_test) + try: + # Yield control back to the XDist Worker to allow the + # test cases to run + yield + finally: + # Distributed Teardown + coordinator.maybe_call_teardown(cls.revert_system_under_test) + @classmethod + def patch_system_under_test( + cls, + setup_context: DistributedSetupContext) -> None: + # Initialize the System Under Test for all the test cases in + # this test class and store state in `setup_context.client_dir`. + assert _SHARED_SCOPE_SETUP_STATUS_PATH.read_text() == "TEARDOWN_COMPLETE" + _SHARED_SCOPE_SETUP_STATUS_PATH.write_text("SETUP_COMPLETE") + + @classmethod + def revert_system_under_test( + cls, + teardown_context: DistributedTeardownContext): + # Fetch state from `teardown_context.client_dir` and revert + # changes made by `patch_system_under_test()`. + assert _SHARED_SCOPE_SETUP_STATUS_PATH.read_text() == "SETUP_COMPLETE" + _SHARED_SCOPE_SETUP_STATUS_PATH.write_text("TEARDOWN_COMPLETE") + + @pytest.mark.parametrize('i', range(5)) + def test(self, i): + assert _SHARED_SCOPE_SETUP_STATUS_PATH.read_text() == "SETUP_COMPLETE" + + class TestScopeB(TestScopeA): + pass """ pytester.makepyfile(test_a=test_file, test_b=test_file) result = pytester.runpytest("-n2", "--dist=isoscope", "-v") From c649eb5f10b5dfc8b0ac6afb242312b19a79d9f2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 20:23:43 +0000 Subject: [PATCH 44/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/acceptance_test.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 31df71a1..94bf0456 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1216,8 +1216,7 @@ def shared_scope_setup_status_path( class TestIsoScope: def test_distributed_setup_teardown_coordination( - self, pytester: pytest.Pytester, - shared_scope_setup_status_path: pathlib.Path + self, pytester: pytest.Pytester, shared_scope_setup_status_path: pathlib.Path ) -> None: """ The isoscope scheduler provides a distributed coordination mechanism @@ -1244,7 +1243,7 @@ def test_distributed_setup_teardown_coordination( ) _SHARED_SCOPE_SETUP_STATUS_PATH = pathlib.Path( - {str(shared_scope_setup_status_path)}) + {shared_scope_setup_status_path!s}) class TestScopeA: @classmethod From b865e9dda6c62e0bc36aecf0a7343a8eafcf2b84 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 16:30:50 -0400 Subject: [PATCH 45/52] Work in progress on acceptance test TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 94bf0456..10b82c2a 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1291,11 +1291,14 @@ class TestScopeB(TestScopeA): pytester.makepyfile(test_a=test_file, test_b=test_file) result = pytester.runpytest("-n2", "--dist=isoscope", "-v") - print("ZZZ outlines") - pprint.pprint(result.outlines, indent=4) - print("ZZZ errlines") - pprint.pprint(result.errlines, indent=4) - assert False + assert sum( + get_workers_and_test_count_by_prefix( + "test_a.py::TestScopeA", result.outlines).values() + ) == 5 + assert sum( + get_workers_and_test_count_by_prefix( + "test_a.py::TestScopeB", result.outlines).values() + ) == 5 def test_by_module(self, pytester: pytest.Pytester) -> None: test_file = """ From 4292294f5da4ad0f337148c17349f2a3215d8c9d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 20:31:08 +0000 Subject: [PATCH 46/52] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- testing/acceptance_test.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 10b82c2a..4bfab523 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -2,7 +2,6 @@ import os import pathlib -import pprint import re import shutil from typing import cast @@ -1291,14 +1290,22 @@ class TestScopeB(TestScopeA): pytester.makepyfile(test_a=test_file, test_b=test_file) result = pytester.runpytest("-n2", "--dist=isoscope", "-v") - assert sum( - get_workers_and_test_count_by_prefix( - "test_a.py::TestScopeA", result.outlines).values() - ) == 5 - assert sum( - get_workers_and_test_count_by_prefix( - "test_a.py::TestScopeB", result.outlines).values() - ) == 5 + assert ( + sum( + get_workers_and_test_count_by_prefix( + "test_a.py::TestScopeA", result.outlines + ).values() + ) + == 5 + ) + assert ( + sum( + get_workers_and_test_count_by_prefix( + "test_a.py::TestScopeB", result.outlines + ).values() + ) + == 5 + ) def test_by_module(self, pytester: pytest.Pytester) -> None: test_file = """ From f1bf9aaabc787ccebb7c7632a7828c2ace14a447 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 16:45:31 -0400 Subject: [PATCH 47/52] Work in progress on acceptance test TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 4bfab523..7f260e1c 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1242,7 +1242,7 @@ def test_distributed_setup_teardown_coordination( ) _SHARED_SCOPE_SETUP_STATUS_PATH = pathlib.Path( - {shared_scope_setup_status_path!s}) + "{shared_scope_setup_status_path!s}") class TestScopeA: @classmethod @@ -1287,7 +1287,7 @@ def test(self, i): class TestScopeB(TestScopeA): pass """ - pytester.makepyfile(test_a=test_file, test_b=test_file) + pytester.makepyfile(test_a=test_file) result = pytester.runpytest("-n2", "--dist=isoscope", "-v") assert ( From b71c911b856662a330208ff73f76be4622be5b13 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 16:53:14 -0400 Subject: [PATCH 48/52] Removed whitespace from blank lines. --- testing/acceptance_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 7f260e1c..2c66dc7f 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1270,7 +1270,7 @@ def patch_system_under_test( # this test class and store state in `setup_context.client_dir`. assert _SHARED_SCOPE_SETUP_STATUS_PATH.read_text() == "TEARDOWN_COMPLETE" _SHARED_SCOPE_SETUP_STATUS_PATH.write_text("SETUP_COMPLETE") - + @classmethod def revert_system_under_test( cls, @@ -1279,11 +1279,11 @@ def revert_system_under_test( # changes made by `patch_system_under_test()`. assert _SHARED_SCOPE_SETUP_STATUS_PATH.read_text() == "SETUP_COMPLETE" _SHARED_SCOPE_SETUP_STATUS_PATH.write_text("TEARDOWN_COMPLETE") - + @pytest.mark.parametrize('i', range(5)) def test(self, i): assert _SHARED_SCOPE_SETUP_STATUS_PATH.read_text() == "SETUP_COMPLETE" - + class TestScopeB(TestScopeA): pass """ From 06fe668b560b7afc5d55a024a3913f5fe07ec254 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 17:00:55 -0400 Subject: [PATCH 49/52] Initialize the status file used by underlying test in TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 2c66dc7f..0330694b 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1287,6 +1287,9 @@ def test(self, i): class TestScopeB(TestScopeA): pass """ + # Initialize the status file used by underlying test + shared_scope_setup_status_path.write_text("TEARDOWN_COMPLETE") + pytester.makepyfile(test_a=test_file) result = pytester.runpytest("-n2", "--dist=isoscope", "-v") From b1f032dc19b39d67dad19c6d832a635116e78d19 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 17:14:47 -0400 Subject: [PATCH 50/52] Create parent directories for the status file used by underlying test in TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 0330694b..8f2cf0c8 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1288,6 +1288,7 @@ class TestScopeB(TestScopeA): pass """ # Initialize the status file used by underlying test + shared_scope_setup_status_path.parent.mkdir(parents=True, exist_ok=True) shared_scope_setup_status_path.write_text("TEARDOWN_COMPLETE") pytester.makepyfile(test_a=test_file) From 52c081550b563ffee990a695983b0bb10c414494 Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 17:41:00 -0400 Subject: [PATCH 51/52] Fixed SyntaxError: (unicode error) 'unicodeescape' codec can't decode bytes in position 2-3: truncated \UXXXXXXXX escape in TestIsoScope.test_distributed_setup_teardown_coordination. --- testing/acceptance_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 8f2cf0c8..e77d408e 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1242,7 +1242,7 @@ def test_distributed_setup_teardown_coordination( ) _SHARED_SCOPE_SETUP_STATUS_PATH = pathlib.Path( - "{shared_scope_setup_status_path!s}") + r"{shared_scope_setup_status_path!s}") class TestScopeA: @classmethod From 95b91c832cad6dd6241a42a7a5323bc1839ea54e Mon Sep 17 00:00:00 2001 From: Vitaly Kruglikov Date: Fri, 25 Oct 2024 18:30:19 -0400 Subject: [PATCH 52/52] Fix node_path.relative_to(...) in _DistributedSetupCoordinatorImpl.maybe_call_setup to work correctly on Windows. --- src/xdist/iso_scheduling_plugin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/xdist/iso_scheduling_plugin.py b/src/xdist/iso_scheduling_plugin.py index 9984378d..7a17f604 100644 --- a/src/xdist/iso_scheduling_plugin.py +++ b/src/xdist/iso_scheduling_plugin.py @@ -290,11 +290,11 @@ def maybe_call_setup( self._setup_context is None ), f"maybe_call_setup()` already called {self._setup_context=}" - node_path = self._setup_request.node.path + node_path: pathlib.Path = self._setup_request.node.path root_context_dir: pathlib.Path = ( self._root_context_base_dir - / node_path.relative_to(node_path.root) + / node_path.relative_to(node_path.parts[0]) / self._setup_request.node.name )