diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 445cd87..1648446 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -9,7 +9,7 @@ on: jobs: linting: strategy: - fail-fast: false + fail-fast: true matrix: python: ["3.8", "3.9", "3.10", "3.11"] runs-on: ubuntu-latest @@ -29,11 +29,15 @@ jobs: - uses: isort/isort-action@master with: configuration: "--check-only --diff" + - name: Analysing the code style + run: | + tox -e codestyle + - name: Analysing the docstyle + run: | + tox -e docstyle - name: Analysing the code with mypy - continue-on-error: true run: | tox -e types - name: Analysing the code with pylint/flake - continue-on-error: true run: | tox -e errors diff --git a/README.md b/README.md index 1618dd2..7ae153f 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,8 @@ pip install pytest-fluent ## Usage -pytest-fluent-logging forwards meta data from pytest to Fluentd for further processing. The meta data are +pytest-fluent-logging forwards meta data from pytest to Fluentd for further processing. The meta data are + * unique session ID * unique test ID * status of the session respectively test case @@ -31,7 +32,7 @@ pytest-fluent-logging forwards meta data from pytest to Fluentd for further proc * `record_property` entries * custom testcase information * custom session information - + Furthermore, the Python logging instance can be extended in order to forward test case runtime logging. ```python @@ -43,7 +44,7 @@ def test_my_runtime_log(): assert value == 1 ``` -or +or ```python from logging import getLogger @@ -104,22 +105,22 @@ def provide_more_test_information() -> dict: The pytest CLI can be called with the following arguments in order to configure fluent-logging. -| argument | description | default | -|---------------------|------------------------------------------------------------------------------------|----------| -| --session-uuid | Use a custom externally created UUID, e.g. link a CI job with the pytest session. | | -| --fluentd-host | Fluentd host address. If not provided, a local Fluentd instance will be called. | | -| --fluentd-port | Fluent host port | 24224 | -| --fluentd-tag | Set a custom Fluentd tag | 'test' | -| --fluentd-label | Set a custom Fluentd label | 'pytest' | -| --fluentd-timestamp | Specify a Fluentd timestamp | None | -| --extend-logging | Extend the Python logging with a Fluent handler | False | -| --add-docstrings | Add test docstrings to testcase call messages | | +| argument | description | default | +| ------------------- | --------------------------------------------------------------------------------- | -------- | +| --session-uuid | Use a custom externally created UUID, e.g. link a CI job with the pytest session. | | +| --fluentd-host | Fluentd host address. If not provided, a local Fluentd instance will be called. | | +| --fluentd-port | Fluent host port | 24224 | +| --fluentd-tag | Set a custom Fluentd tag | 'test' | +| --fluentd-label | Set a custom Fluentd label | 'pytest' | +| --fluentd-timestamp | Specify a Fluentd timestamp | None | +| --extend-logging | Extend the Python logging with a Fluent handler | False | +| --add-docstrings | Add test docstrings to testcase call messages | | -### Ini Configuration Support +### Ini Configuration Support Default values of the CLI arguments for a project could also be defined in one of the following ini configuration files: -1. pytest.ini: Arguments are defined under pytest section in the file. This file takes precedence over all other configuration files even if empty. +1. pytest.ini: Arguments are defined under pytest section in the file. This file takes precedence over all other configuration files even if empty. ```python [pytest] @@ -224,7 +225,7 @@ Timestamps are added to the information if the ``--fluentd-timestamp`` option is addopts= --session-uuid="ac2f7600-a079-46cf-a7e0-6408b166364c" --fluentd-port=24224 --fluentd-host=localhost --fluentd-tag='dummytest' --fluentd-label='pytest' --fluentd-timestamp='@timestamp' --extend-logging ``` -The timestamp is added to each message. The value is in ISO 8601 format. A sample +The timestamp is added to each message. The value is in ISO 8601 format. A sample of the data collection from `test_addoptions.py` (as above) would look as below: ```json @@ -253,4 +254,3 @@ The changelog. ## Contributing We welcome any contributions, enhancements, and bug-fixes. Open an [issue](https://github.com/Rohde-Schwarz/pytest-fluent/issues) on [Github](https://github.com) and [submit a pull request](https://github.com/Rohde-Schwarz/pytest-fluent/pulls). - diff --git a/docs/conf.py b/docs/conf.py index 5bfad10..17b9f8a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,4 +19,4 @@ ] suppress_warnings = ["myst.header"] html_theme = "sphinx_rtd_theme" -html_static_path = ["images"] +# html_static_path = ["image"] diff --git a/docs/usage.md b/docs/usage.md index 28dbeda..a173413 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -4,3 +4,6 @@ :start-after: "## Usage" :end-before: "## Changelog" ``` + +```{include} ./usage/stage_settings.md +``` diff --git a/docs/usage/stage_settings.md b/docs/usage/stage_settings.md new file mode 100644 index 0000000..5612d4f --- /dev/null +++ b/docs/usage/stage_settings.md @@ -0,0 +1,239 @@ +### Custom stage settings + +Sometimes, the default settings are not enough in order to forward the test information as needed. Thus, you can set custom stage settings +in order to fit your needs. + +You can set specific values for `all` stages or specific values for any used stage. In order to do so, call your test run with the `--stage-settings=YourFileName.json` parameter. The following example stage settings JSON file content + +```json +{ + "all": { + "tag": "run", + "label": "pytest", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + }, + "pytest_sessionstart": { + "tag": "run", + "label": "test", + "add": {"start_info": "Pytest started"}, + }, + "pytest_sessionfinish": { + "tag": "result", + "label": "test", + "add": {"stop_info": "Pytest finished"}, + }, + "pytest_runtest_logstart": { + "tag": "run", + "label": "testcase", + "add": {"start_info": "Testcase started"}, + }, + "pytest_runtest_logreport": { + "tag": "result", + "label": "testcase", + "replace": { + "values": {"passed": "pass", "failed": "fail"}, + }, + "add": {"stop_info": "Testcase finished"}, + }, + "logging": { + "replace": {"keys": {"message": "msg", "sessionId": "id"}}, + }, +} +``` + +will result in the following output + +```json +[ + { + "stage": "session", + "tag": "test", + "label": "pytest", + "state": "start", + "id": "3d82b514-60e2-4580-96ab-3daf5a5446c8" + }, + { + "stage": "testcase", + "testId": "6b5092ad-c905-4879-a70c-cb5b2a7df90d", + "name": "test_data_reporter_with_patched_values.py::test_base", + "tag": "test", + "label": "pytest", + "state": "start", + "id": "3d82b514-60e2-4580-96ab-3daf5a5446c8" + }, + { + "type": "logging", + "host": "hostname", + "where": "test_data_reporter_with_patched_values.test_base", + "level": "INFO", + "stack_trace": "None", + "message": "Test running", + "testId": "6b5092ad-c905-4879-a70c-cb5b2a7df90d", + "stage": "testcase", + "id": "3d82b514-60e2-4580-96ab-3daf5a5446c8" + }, + { + "name": "test_data_reporter_with_patched_values.py::test_base", + "outcome": "pass", + "duration": 0.0034263000000001043, + "markers": { + "test_base": 1, + "test_data_reporter_with_patched_values.py": 1, + "test_data_reporter_with_patched_values0": 1 + }, + "stage": "testcase", + "when": "call", + "testId": "6b5092ad-c905-4879-a70c-cb5b2a7df90d", + "tag": "test", + "label": "pytest", + "id": "3d82b514-60e2-4580-96ab-3daf5a5446c8", + "stop_info": "Testcase finished" + }, + { + "stage": "testcase", + "testId": "6b5092ad-c905-4879-a70c-cb5b2a7df90d", + "name": "test_data_reporter_with_patched_values.py::test_base", + "tag": "test", + "label": "pytest", + "state": "finish", + "id": "3d82b514-60e2-4580-96ab-3daf5a5446c8" + }, + { + "duration": 1.3674933910369873, + "stage": "session", + "tag": "test", + "label": "pytest", + "state": "finish", + "id": "3d82b514-60e2-4580-96ab-3daf5a5446c8" + } +] +``` + +for this test case + +```python +import logging + +def test_base(): + logger = logging.getLogger() + logger.info("Test running") + assert True +``` + +#### Stage setting file + +Custom settings for each supported stage can be easily setup. You have to create a file with +a `.json` or `.yaml` extension and call pytest with this additional parameter `--stage-settings`. + +The file will be validated against a schema of supported values and in case of an error, a `jsonschema.ValidationError` +will be thrown. + +#### Stage settings + +##### Number of supported stage + +The following stages can be modified. + +* `pytest_sessionstart` +* `pytest_runtest_logstart` +* `pytest_runtest_logreport` +* `pytest_runtest_logfinish` +* `pytest_sessionfinish` +* `logging` + +These values are the keys for the dictionary object. Additionally, you can set also +a `all` key for convenience reasons to patch all keys at once. + +#### Patch events + +Probably, your stage setting would look like + +```json +{ + "pytest_sessionstart": { + "tag": "run", + "label": "pytest", + "replace": { + "keys": { + "status": "state", + "sessionId": "id" + }, + "values": { + "passed": "pass" + } + }, + "add": { + "start_info": "Pytest started" + }, + } +} +``` + +The following values are supported + +| Key name | action | type | +| --------- | -------------------------------------------------------------------------------------- | ------ | +| `tag` | Set a specifc Fluent tag for this stage | `str` | +| `label` | Set a specifc Fluent label for this stage | `str` | +| `replace` | Replace key values from a dictionary and also replace some preset pytest result values | `dict` | +| `add` | Add new values to the result dictionary | `dict` | +| `drop` | Drop specific values from the result dictionary | `dict` | + +##### Replace dictionary + +The `replace` patching action has two keys `keys` and `values` in order to replace either a key value or a result value. +See the following default values in order to get an idea about the content. + +At the moment, the following values can be changed + +* `passed` +* `failed` +* `skipped` +* `error` +* `start` +* `finish` +* `session` +* `testcase` + +##### Use values from ARGV and ENV + +If you want to use data provided by the command line arguments or directly from environment variables, +use the following syntax for value strings. + +| Type | Syntax | +| ---- | ------------------------------ | +| ARGV | `""` | +| ENV | `"${USE_ENV}"` or `"$USE_ENV"` | + +Here is a simple example using both variants + +```json +{ + "pytest_sessionstart": { + "tag": "run", + "label": "pytest", + "replace": { + "keys": { + "tag": "", + "sessionId": "${ID}" + }, + "values": { + "passed": "$OUTCOME_PASSED" + } + } + } +} +``` + +The data will be mapped after starting the pytest session. + +#### Default values + +| stage | value | +| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `pytest_sessionstart` |
{
"status": "start",
"stage": "session",
"sessionId": "8d0d165d-5581-478c-ba0f-f7ec7d5bcbcf",
"tag": "test",
"label": "pytest"
}
| +| `pytest_runtest_logstart` |
{
"status": "start",
"stage": "testcase",
"sessionId": "8d0d165d-5581-478c-ba0f-f7ec7d5bcbcf",
"testId": "9f0363fa-ef99-49c7-8a2d-6261e90acb00",
"name": "test_data_reporter_with_patched_values.py::test_base",
"tag": "test",
"label": "pytest"
}
| +| `pytest_runtest_logreport` |
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "passed",
"duration": 0.0035069000000005346,
"markers": {
"test_base": 1,
"test_data_reporter_with_patched_values.py": 1,
"test_data_reporter_with_patched_values0": 1
},
"stage": "testcase",
"when": "call",
"sessionId": "8d0d165d-5581-478c-ba0f-f7ec7d5bcbcf",
"testId": "9f0363fa-ef99-49c7-8a2d-6261e90acb00",
"tag": "test",
"label": "pytest"
}
| +| `pytest_runtest_logfinish` |
{
"status": "finish",
"stage": "testcase",
"sessionId": "8d0d165d-5581-478c-ba0f-f7ec7d5bcbcf",
"testId": "9f0363fa-ef99-49c7-8a2d-6261e90acb00",
"name": "test_data_reporter_with_patched_values.py::test_base",
"tag": "test",
"label": "pytest"
}
| +| `pytest_sessionfinish` |
{
"status": "finish",
"duration": 1.5651893615722656,
"stage": "session",
"sessionId": "8d0d165d-5581-478c-ba0f-f7ec7d5bcbcf",
"tag": "test",
"label": "pytest"
}
| +| `logging` |
{
"type": "logging",
"host": "hostname",
"where": "test_data_reporter_with_patched_values.test_base",
"level": "INFO",
"stack_trace": "None",
"message": "Test running",
"sessionId": "8d0d165d-5581-478c-ba0f-f7ec7d5bcbcf",
"testId": "9f0363fa-ef99-49c7-8a2d-6261e90acb00",
"stage": "testcase"
}
| diff --git a/pyproject.toml b/pyproject.toml index 3359428..272f94c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,9 @@ dependencies = [ "pytest>=7.0.0", "msgpack", "six", - "fluent-logger" + "fluent-logger", + "jsonschema", + "ruamel.yaml", ] dynamic = ["version"] diff --git a/setup.cfg b/setup.cfg index d5524b1..dec14f8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] copyright = Copyright © Rohde & Schwarz GmbH & Co. KG 2022 -platform = +platform = Unix Linux Windows @@ -9,10 +9,15 @@ platform = package_dir= =src packages=find: +include_package_data = True [options.entry_points] pytest11 = fluent-logging = pytest_fluent.plugin [options.packages.find] -where = src \ No newline at end of file +where = src + +[options.package_data] +pytest_fluent.data = + *.json \ No newline at end of file diff --git a/src/pytest_fluent/content_patcher.py b/src/pytest_fluent/content_patcher.py new file mode 100644 index 0000000..dbe53c2 --- /dev/null +++ b/src/pytest_fluent/content_patcher.py @@ -0,0 +1,201 @@ +"""Patch content according to settings.""" +import argparse +import enum +import inspect +import os +import re +import typing + + +class _ContentType(enum.Enum): + ENV = 0 + ARGS = 1 + + +class ContentPatcher: + """Patch the transmission content according to the user settings.""" + + def __init__( + self, + user_settings: dict, + args_settings: argparse.Namespace, + stage_names: typing.List[str], + ) -> None: + """Initialize content patcher.""" + self._args_settings: argparse.Namespace = args_settings + self._user_settings: dict = self._stage_settings(user_settings, stage_names) + + def _stage_settings( + self, user_settings: dict, stage_names: typing.List[str] + ) -> dict: + """Prepare stage settings for faster online lookup. + + Args: + user_settings (dict): User settings from JSON or YAML file. + stage_names (typing.List[str]): Used stage names by Pytest-fluent plugin. + + Returns: + dict: Returns patched user settings. + """ + patched = {} + all_settings = {} + for key, value in user_settings.get("all", {}).items(): + value = self._patch_value(key, value) + all_settings.update({key: value}) + for stage_name in stage_names: + patched.update({stage_name: all_settings.copy()}) + stage_info = user_settings.get(stage_name, {}) + for key, value in stage_info.items(): + value = self._patch_value(key, value) + if isinstance(value, dict): + value = self._merge_patched_values( + patched[stage_name].get(key, {}), value + ) + patched[stage_name].update({key: value}) + return patched + + def _patch_value(self, key: str, value: typing.Any) -> typing.Any: + if key == "replace": + value = {key: self._patch_value(key, v) for key, v in value.items()} + else: + if isinstance(value, dict): + for subkey, subvalue in value.items(): + value[subkey] = self._get_env_or_args(subvalue) + else: + value = self._get_env_or_args(value) + return value + + def _merge_patched_values(self, old: dict, new: dict) -> dict: + merged = old.copy() + for key, value in new.items(): + merged[key] = value + return merged + + @property + def user_settings(self) -> dict: + """Retrieve processed user settings. + + Returns: + dict: Dictionary of user settings. + """ + return self._user_settings + + def get_tag_and_label( + self, stage_name: typing.Optional[str] = None + ) -> typing.Tuple[str, str]: + """Return the tag for the corresponding stage. + + Args: + stage_name (typing.Optional[str], optional): Current stage. + Defaults to None. + + Returns: + typing.Tuple[str, str]: Tag string, Label string. + """ + if stage_name is None: + stage_name = inspect.stack()[1][3] + stage_info = self._user_settings.get(stage_name, {}) + return stage_info["tag"], stage_info["label"] + + def patch( + self, + content: dict, + stage_name: typing.Optional[str] = None, + ignore_entries: typing.List[str] = [], + ) -> dict: + """Patch the content with the provided settings for each stage. + + Args: + content (dict): Structured data for transmission. + stage_name (typing.Optional[str], optional): Calling stage name. + Defaults to None. + + Returns: + dict: Patched dictionary with the user provided stage settings. + """ # noqa + if stage_name is None: + stage_name = inspect.stack()[1][3] + + stage_info = self._user_settings.get(stage_name, {}) + stage_info = {k: v for k, v in stage_info.items() if k not in ignore_entries} + if not stage_info: + return content + return self._patch_stage_content(content, stage_info) + + @staticmethod + def _patch_stage_content(stage_content: dict, user_settings: dict) -> dict: + stage_content_patched = stage_content.copy() + if "tag" in user_settings: + stage_content_patched["tag"] = user_settings["tag"] + if "label" in user_settings: + stage_content_patched["label"] = user_settings["label"] + if "replace" in user_settings: + replace_it = user_settings["replace"] + if "keys" in replace_it: + keys_settings = replace_it["keys"] + for key, value in keys_settings.items(): + if key in stage_content_patched: + tmp = stage_content_patched[key] + stage_content_patched[value] = tmp + del stage_content_patched[key] + if "values" in replace_it: + value_settings = replace_it["values"] + new_value_keys = value_settings.keys() + for key, value in stage_content_patched.items(): + if not isinstance(value, dict) and value in new_value_keys: + stage_content_patched[key] = value_settings[value] + to_add = user_settings.get("add", {}) + stage_content_patched.update(to_add) + to_drop = user_settings.get("drop", []) + for key in to_drop: + del user_settings[key] + return stage_content_patched + + def _get_env_or_args(self, value: str) -> str: + reference = self._is_reference_string(value) + if reference == _ContentType.ENV: + return self._get_env_content(value) + elif reference == _ContentType.ARGS: + return self._get_args_content(value) + else: + return value + + @staticmethod + def _is_reference_string(value: str) -> typing.Optional[_ContentType]: + if re.match(r"(\$)?({)([\w_]+)(})", value): + return _ContentType.ENV + elif re.match(r"(<)([\w-]+)(>)", value): + return _ContentType.ARGS + return None + + @staticmethod + def _get_env_content(value: str) -> str: + """Check if string relates to ENV variable and return that value. + + Args: + value (str): String providing ENV reference e.g. ${USE_ENV} + + Returns: + str: String with ENV content or empty string. + """ + env_match = re.findall(r"\$({)?([\w_]+)(})?", value) + if not env_match: + return "" + env_value = os.getenv(env_match[0][1], "") + return env_value + + def _get_args_content(self, value: str) -> str: + """Check if string relates to CLI argument variable and return that value. + + Args: + value (str): String providing argument reference e.g. + + Returns: + str: String with argument content or empty string. + """ + args_match = re.findall(r"^<([\w-]+)>$", value) + if not args_match: + return "" + args_string = args_match[0].replace("-", "_") + args_value = getattr(self._args_settings, args_string, "") + return args_value diff --git a/src/pytest_fluent/data/default.stage.json b/src/pytest_fluent/data/default.stage.json new file mode 100644 index 0000000..1e8f941 --- /dev/null +++ b/src/pytest_fluent/data/default.stage.json @@ -0,0 +1,6 @@ +{ + "all": { + "tag": "", + "label": "" + } +} \ No newline at end of file diff --git a/src/pytest_fluent/data/schema.stage.json b/src/pytest_fluent/data/schema.stage.json new file mode 100644 index 0000000..5466faf --- /dev/null +++ b/src/pytest_fluent/data/schema.stage.json @@ -0,0 +1,85 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "patternProperties": { + "all|pytest_runtest_logstart|pytest_runtest_logreport|pytest_runtest_logfinish|logging": { + "$ref": "#/definitions/AdditionalProperties" + } + }, + "anyOf": [ + { + "type": "object", + "patternProperties": { + "all": { + "$ref": "#/definitions/AdditionalProperties", + "required": [ + "tag", + "label" + ] + } + } + }, + { + "type": "object", + "patternProperties": { + "pytest_runtest_logstart|pytest_runtest_logreport|pytest_runtest_logfinish|logging": { + "$ref": "#/definitions/AdditionalProperties" + } + } + } + ], + "additionalProperties": false, + "definitions": { + "RegexString": { + "type": "string", + "regex": "(\\$)?([<\\{])?[\\w_.-]+([>\\}])?" + }, + "AdditionalProperties": { + "type": "object", + "additionalProperties": false, + "properties": { + "tag": { + "$ref": "#/definitions/RegexString", + "default": "" + }, + "label": { + "$ref": "#/definitions/RegexString", + "default": "" + }, + "replace": { + "keys": { + "type": "object", + "patternProperties": { + "status|stage|sessionId|testId|name|when|duration": { + "$ref": "#/definitions/RegexString" + } + } + }, + "values": { + "type": "object", + "patternProperties": { + "passed|failed|skipped|error|start|finish|session|testcase": { + "type": "string" + } + } + } + }, + "add": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/RegexString" + } + }, + "drop": { + "type": "array", + "items": { + "$ref": "#/definitions/RegexString" + } + } + } + } + }, + "required": [ + "all" + ] +} \ No newline at end of file diff --git a/src/pytest_fluent/event.py b/src/pytest_fluent/event.py new file mode 100644 index 0000000..491a308 --- /dev/null +++ b/src/pytest_fluent/event.py @@ -0,0 +1,47 @@ +"""Custom Event class.""" + +import logging +import time +import typing + +from fluent.sender import FluentSender + +LOGGER = logging.getLogger(__package__) + + +class Event: + """Customized Event class for sending different tags. + + Args: + host (str): Host name of the Fluent instance. Defaults to "localhost". + port (int): Port of the Fluent instance. Defaults to 24224. + """ + + def __init__( + self, + tags: typing.List[str], + host: str = "localhost", + port: int = 24224, + **kwargs, + ) -> None: + """Initialize custom event class.""" + self.senders = { + tag: FluentSender(tag=tag, host=host, port=port, **kwargs) for tag in tags + } + + def __call__(self, tag: str, label: str, data: dict, **kwargs): + """Send a new event. + + Args: + tag (str): Fluent tag. + label (str): Fluent label. + data (dict): Data to transmit as dictionary. + """ + assert isinstance(data, dict), "data must be a dict" + sender_ = self.senders.get(tag) + if sender_ is None or not isinstance(sender_, FluentSender): + LOGGER.warning(f"Could not retrieve fluent instance for tag {tag}") + return + timestamp = kwargs.get("time", int(time.time())) + if not sender_.emit_with_time(label, timestamp, data): + LOGGER.warning(f"Could not send data via fluent for tag {tag}: {data}") diff --git a/src/pytest_fluent/plugin.py b/src/pytest_fluent/plugin.py index 5175456..3595f5b 100644 --- a/src/pytest_fluent/plugin.py +++ b/src/pytest_fluent/plugin.py @@ -1,6 +1,7 @@ """pytest-fluent-logging plugin definition.""" import datetime import logging +import os import textwrap import time import typing @@ -9,13 +10,18 @@ import msgpack import pytest -from fluent import event, sender from fluent.handler import FluentHandler, FluentRecordFormatter from .additional_information import ( get_additional_session_information, get_additional_test_information, ) +from .content_patcher import ContentPatcher +from .event import Event +from .setting_file_loader_action import ( + SettingFileLoaderAction, + load_and_check_settings_file, +) from .test_report import LogReport ##################################################### @@ -40,24 +46,44 @@ def __init__(self, config): self._timestamp = config.getoption("--fluentd-timestamp") self._extend_logging = config.getoption("--extend-logging") self._add_docstrings = config.getoption("--add-docstrings") + stage_names = [method for method in dir(self) if method.startswith("pytest_")] + stage_names.append("logging") + self._content_patcher = ContentPatcher( + user_settings=config.getoption("--stage-settings"), + args_settings=config.option, + stage_names=stage_names, + ) + tags: typing.List[str] = [] + for value in self._content_patcher.user_settings.values(): + tag = value.get("tag") + if not tag: + continue + tags.append(tag) + tags = list(set(tags)) + self._event = Event( + tags, self._host, self._port, buffer_overflow_handler=overflow_handler + ) self._log_reporter = LogReport(self.config) - self._setup_fluent_sender() self._patch_logging() - def _setup_fluent_sender(self): - if self._host is None: - sender.setup(self._tag, buffer_overflow_handler=overflow_handler) - else: - sender.setup( - self._tag, - host=self._host, - port=self._port, - buffer_overflow_handler=overflow_handler, - ) - def _patch_logging(self): - if self._extend_logging: - extend_loggers(self._host, self._port, self._tag) + if not self._extend_logging: + return + tag = self._content_patcher.user_settings.get("logging", {}).get("tag") + if not tag: + raise ValueError( + "Tag for logging was not set. Please set either specific tag value for \ + key 'logging' or use the 'all' object in stage settings file." + ) + label = self._content_patcher.user_settings.get("logging", {}).get("label") + if label: + tag = f"{tag}.{label}" + extend_loggers( + self._host, + self._port, + tag, + self._content_patcher, + ) def _set_session_uid( self, id: typing.Optional[typing.Union[str, uuid.UUID]] = None @@ -72,11 +98,9 @@ def _set_session_uid( else: raise ValueError("Unique identifier is not in a valid format.") - def set_timestamp_information(self, event_data: dict): + def _set_timestamp_information(self, data: dict): if self._timestamp is not None: - event_data.update( - {self._timestamp: f"{datetime.datetime.utcnow().isoformat()}"} - ) + data.update({self._timestamp: f"{datetime.datetime.utcnow().isoformat()}"}) @property def session_uid( @@ -104,9 +128,11 @@ def pytest_sessionstart(self): "stage": "session", "sessionId": self.session_uid, } + data = self._content_patcher.patch(data) data.update(get_additional_session_information()) - self.set_timestamp_information(event_data=data) - event.Event(self._label, data) + self._set_timestamp_information(data=data) + tag, label = self._content_patcher.get_tag_and_label() + self._event(tag, label, data) def pytest_runtest_logstart(self, nodeid: str, location: typing.Tuple[int, str]): """Custom hook for test start.""" @@ -120,9 +146,11 @@ def pytest_runtest_logstart(self, nodeid: str, location: typing.Tuple[int, str]) "testId": self.test_uid, "name": nodeid, } + data = self._content_patcher.patch(data) data.update(get_additional_test_information()) - self.set_timestamp_information(event_data=data) - event.Event(self._label, data) + self._set_timestamp_information(data=data) + tag, label = self._content_patcher.get_tag_and_label() + self._event(tag, label, data) def pytest_runtest_setup(self, item: pytest.Item): """Custom hook for test setup.""" @@ -132,6 +160,14 @@ def pytest_runtest_setup(self, item: pytest.Item): if not self.config.getoption("collectonly"): pass + def pytest_runtest_teardown(self, item: pytest.Item, nextitem: pytest.Item): + """Custom hook for test teardown.""" + set_stage("testcase") + docstring = get_test_docstring(item) + item.stash[DOCSTRING_STASHKEY] = docstring + if not self.config.getoption("collectonly"): + pass + def pytest_runtest_call(self, item: pytest.Item): """Custom hook for test call.""" set_stage("testcase") @@ -140,6 +176,7 @@ def pytest_runtest_call(self, item: pytest.Item): @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(self, item: pytest.Item, call): + """Custom hook for make report.""" report = (yield).get_result() docstring = item.stash.get(DOCSTRING_STASHKEY, None) report.stash = {DOCSTRING_KEY: docstring} @@ -148,10 +185,10 @@ def pytest_runtest_logreport(self, report: pytest.TestReport): """Custom hook for logging results.""" set_stage("testcase") if not self.config.getoption("collectonly"): - result_data = self._log_reporter(report) - if not result_data: + data = self._log_reporter(report) + if not data: return - result_data.update( + data.update( { "stage": "testcase", "when": report.when, @@ -162,9 +199,11 @@ def pytest_runtest_logreport(self, report: pytest.TestReport): if self._add_docstrings: docstring = report.stash.get(DOCSTRING_KEY, None) if docstring: - result_data.update({"docstring": docstring}) - self.set_timestamp_information(event_data=result_data) - event.Event(self._label, result_data) + data.update({"docstring": docstring}) + self._set_timestamp_information(data=data) + data = self._content_patcher.patch(data) + tag, label = self._content_patcher.get_tag_and_label() + self._event(tag, label, data) def pytest_runtest_logfinish( self, @@ -174,18 +213,17 @@ def pytest_runtest_logfinish( """Custom hook for test end.""" set_stage("testcase") if not self.config.getoption("collectonly"): - event_data = { + data = { "status": "finish", "stage": "testcase", "sessionId": self.session_uid, "testId": self.test_uid, "name": nodeid, } - self.set_timestamp_information(event_data=event_data) - event.Event( - self._label, - event_data, - ) + self._set_timestamp_information(data=data) + data = self._content_patcher.patch(data) + tag, label = self._content_patcher.get_tag_and_label() + self._event(tag, label, data) def pytest_sessionfinish( self, @@ -195,21 +233,27 @@ def pytest_sessionfinish( """Custom hook for session end.""" set_stage("session") if not self.config.getoption("collectonly"): - event_data = { + data = { "status": "finish", - "duration": time.time() - self._session_start_time, + "duration": ( + time.time() + - ( + 0 + if self._session_start_time is None + else self._session_start_time + ) + ), "stage": "session", "sessionId": self.session_uid, } - self.set_timestamp_information(event_data=event_data) - event.Event( - self._label, - event_data, - ) + self._set_timestamp_information(data=data) + data = self._content_patcher.patch(data) + tag, label = self._content_patcher.get_tag_and_label() + self._event(tag, label, data) -stage: str = "session" -fluent_runtime: typing.Optional[FluentLoggerRuntime] = None +STAGE: str = "session" +FLUENT_RUNTIME: typing.Optional[FluentLoggerRuntime] = None ##################################################### # Setup @@ -226,7 +270,8 @@ def pytest_addoption(parser): ) group.addoption( "--fluentd-host", - default=None, + default="localhost", + type=str, help="Fluentd remote host. Defaults to a local Fluentd session", ) group.addoption( @@ -260,24 +305,33 @@ def pytest_addoption(parser): action="store_true", help="Add test docstrings to the testcase call messages.", ) + group.addoption( + "--stage-settings", + type=str, + default=load_and_check_settings_file( + os.path.join(os.path.dirname(__file__), "data", "default.stage.json") + ), + action=SettingFileLoaderAction, + help="Stage setting description JSON or YAML file path or string object.", + ) def pytest_configure(config): """Extend pytest configuration.""" - global fluent_runtime + global FLUENT_RUNTIME config.fluent = FluentLoggerRuntime(config) config.pluginmanager.register(config.fluent, "fluent-reporter-runtime") - fluent_runtime = config.fluent + FLUENT_RUNTIME = config.fluent def pytest_unconfigure(config): """Unregister runtime from pytest.""" - global fluent_runtime + global FLUENT_RUNTIME fluent = getattr(config, "fluent", None) if fluent: del config.fluent config.pluginmanager.unregister(fluent) - fluent_runtime = None + FLUENT_RUNTIME = None ##################################################### @@ -293,14 +347,14 @@ def get_logger(request): port = config.getoption("--fluentd-port") tag = config.getoption("--fluentd-tag") - def get_logger(name=None): + def get_logger_wrapper(name=None): logger = logging.getLogger(name) if name is None: return logger add_handler(host, port, tag, logger) return logger - return get_logger + return get_logger_wrapper @pytest.fixture @@ -326,9 +380,10 @@ def test_uid() -> typing.Optional[str]: class RecordFormatter(FluentRecordFormatter): """Extension of FluentRecordFormatter in order to add unique ID's""" - def __init__(self, *args, **kwargs): + def __init__(self, patcher: typing.Optional[ContentPatcher], *args, **kwargs): """Specific initilization.""" super(RecordFormatter, self).__init__(*args, **kwargs) + self.content_patcher = patcher def format(self, record): """Extend formatting for Fluentd handler.""" @@ -338,34 +393,41 @@ def format(self, record): data["sessionId"] = get_session_uid() data["testId"] = get_test_uid() data["stage"] = get_stage() + if self.content_patcher: + data = self.content_patcher.patch(data, "logging", ["tag", "label"]) return data -def extend_loggers(host, port, tag) -> None: +def extend_loggers(host, port, tag, patcher: ContentPatcher) -> None: """Extend Python logging with a Fluentd handler.""" - modify_logger(host, port, tag, None) - modify_logger(host, port, tag, "fluent") + modify_logger(host, port, tag, None, patcher) + modify_logger(host, port, tag, "fluent", patcher) -def modify_logger(host, port, tag, name=None) -> None: +def modify_logger( + host, port, tag, name=None, patcher: typing.Optional[ContentPatcher] = None +) -> None: """Extend Python logging with a Fluentd handler.""" logger = logging.getLogger(name) - add_handler(host, port, tag, logger) + add_handler(host, port, tag, logger, patcher) -def add_handler(host, port, tag, logger): +def add_handler( + host, port, tag, logger, patcher: typing.Optional[ContentPatcher] = None +): """Add handler to a specific logger.""" handler = FluentHandler( tag, host=host, port=port, buffer_overflow_handler=overflow_handler ) formatter = RecordFormatter( + patcher, { "type": "logging", "host": "%(hostname)s", "where": "%(module)s.%(funcName)s", "level": "%(levelname)s", "stack_trace": "%(exc_text)s", - } + }, ) handler.setFormatter(formatter) logger.addHandler(handler) @@ -380,13 +442,13 @@ def overflow_handler(pendings): def set_stage(val: str) -> None: """Set the current execution stage.""" - global stage - stage = val + global STAGE + STAGE = val def get_stage() -> str: """Get the current execution stage.""" - return stage + return STAGE # Unique identifiers @@ -399,16 +461,16 @@ def create_unique_identifier(): def get_session_uid() -> typing.Optional[str]: """Get current session UID.""" - if fluent_runtime is None: + if FLUENT_RUNTIME is None: return None - return typing.cast(FluentLoggerRuntime, fluent_runtime).session_uid + return typing.cast(FluentLoggerRuntime, FLUENT_RUNTIME).session_uid def get_test_uid() -> typing.Optional[str]: """Get current test UID.""" - if fluent_runtime is None: + if FLUENT_RUNTIME is None: return None - return typing.cast(FluentLoggerRuntime, fluent_runtime).test_uid + return typing.cast(FluentLoggerRuntime, FLUENT_RUNTIME).test_uid # Docstrings diff --git a/src/pytest_fluent/setting_file_loader_action.py b/src/pytest_fluent/setting_file_loader_action.py new file mode 100644 index 0000000..47c971e --- /dev/null +++ b/src/pytest_fluent/setting_file_loader_action.py @@ -0,0 +1,53 @@ +"""Load and schema check settings file.""" +import argparse +import json +import os + +import jsonschema +from ruamel.yaml import YAML + + +class SettingFileLoaderAction(argparse.Action): + """Custom action for loading JSON/YAML configuration.""" + + def __call__(self, parser, args, values, option_string=None): + """Implementing call.""" + parameter = load_and_check_settings_file(values) + setattr(args, self.dest, parameter) + + +def load_and_check_settings_file(file_name: str): + """Load settings file and check content against schema. + + Args: + file_name (str): Path to settings file. + + Raises: + ValueError: File type not supported. + + Returns: + _type_: User settings dictionary. + """ + if file_name.endswith(".json"): + pickle = json + elif file_name.endswith(".yaml"): + pickle = YAML() + + def loads(file_pointer): + return pickle.load(file_pointer.read()) + + setattr(pickle, "loads", loads) + else: + raise ValueError("File type not supported.") + if os.path.exists(file_name): + with open(file_name, encoding="utf-8") as fid: + content = pickle.load(fid) + else: + content = pickle.loads(file_name) + with open( + os.path.join(os.path.dirname(__file__), "data", "schema.stage.json"), + encoding="utf-8", + ) as fid: + schema = json.load(fid) + jsonschema.validate(instance=content, schema=schema) + return content diff --git a/tests/conftest.py b/tests/conftest.py index c9a9cb2..c172d4d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,9 +1,23 @@ import uuid -from unittest.mock import patch +from unittest.mock import MagicMock, patch import pytest +from fluent import handler + +import pytest_fluent.event plugin_name = "pytest_fluent" +SESSION_UUID = uuid.uuid4() + + +def isinstance_patch( + __obj: object, + __class_or_tuple, +) -> bool: + """Patch for isinstance.""" + if isinstance(__obj, MagicMock): + return True + return isinstance(__obj, __class_or_tuple) @pytest.fixture(scope="session") @@ -13,7 +27,7 @@ def logging_content(): @pytest.fixture(scope="session") def session_uuid(): - return uuid.uuid4() + return SESSION_UUID @pytest.fixture() @@ -53,14 +67,17 @@ def test_base(): @pytest.fixture() -def run_mocked_pytest(runpytest): - """create a temporary pytest environment with FluentSender mock.""" - - with patch("fluent.sender.FluentSender") as sender: - yield runpytest, sender +def fluentd_sender(monkeypatch): + """Get FluentSender mock.""" + with patch("pytest_fluent.event.FluentSender") as sender, patch.object( + pytest_fluent.event, "isinstance", isinstance_patch + ): + monkeypatch.setattr(handler.sender, "FluentSender", sender) + yield sender.return_value @pytest.fixture() -def fluentd_sender(): - with patch("fluent.sender.FluentSender") as sender: - yield sender.return_value +def run_mocked_pytest(runpytest, fluentd_sender): + """Create a temporary pytest environment with FluentSender mock.""" + + return runpytest, fluentd_sender diff --git a/tests/data/default.json b/tests/data/default.json new file mode 100644 index 0000000..1e8f941 --- /dev/null +++ b/tests/data/default.json @@ -0,0 +1,6 @@ +{ + "all": { + "tag": "", + "label": "" + } +} \ No newline at end of file diff --git a/tests/data/default.yaml b/tests/data/default.yaml new file mode 100644 index 0000000..1700368 --- /dev/null +++ b/tests/data/default.yaml @@ -0,0 +1,3 @@ +all: + tag: "" + label: "" diff --git a/tests/test_additional_information.py b/tests/test_additional_information.py index 81b4096..d87b5ff 100644 --- a/tests/test_additional_information.py +++ b/tests/test_additional_information.py @@ -15,9 +15,8 @@ def test_info() -> dict: def test_additional_information(run_mocked_pytest, session_uuid): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest runpytest(f"--session-uuid={session_uuid}") - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list for idx, call_arg in enumerate(call_args): data = call_arg.args[2] diff --git a/tests/test_addoptions.py b/tests/test_addoptions.py index 08c7a41..9fca1aa 100644 --- a/tests/test_addoptions.py +++ b/tests/test_addoptions.py @@ -11,28 +11,36 @@ FAKE_TEST_UUID = "6d653fee-0c6a-4923-9216-dfc949bd05a0" -def get_data_from_call_args(call_args, fields: typing.List[str]) -> typing.Dict: - return {field: call_args.args[2].get(field) for field in fields} +@pytest.fixture +def pyfile_testcase(logging_content): + return f""" +import logging +def test_base(): + LOGGER = logging.getLogger() + LOGGER.info('{logging_content}') + LOGGER.warning('{logging_content}') + assert True +""" -@pytest.fixture(name="monkeypatched_uuid4") -def monkeypatched_uuid4_fixture(monkeypatch): - def myuuid4(): - return uuid.UUID(FAKE_TEST_UUID) - monkeypatch.setattr(uuid, "uuid4", myuuid4) +def get_data_from_call_args(call_args, fields: typing.List[str]) -> typing.Dict: + return {field: call_args.args[2].get(field) for field in fields} def test_fluentd_logged_parameters( - monkeypatched_uuid4, runpytest, fluentd_sender, session_uuid, logging_content + monkeypatch, run_mocked_pytest, session_uuid, logging_content, pyfile_testcase ): + runpytest, fluent_sender = run_mocked_pytest + monkeypatch.setattr(uuid, "uuid4", lambda: uuid.UUID(FAKE_TEST_UUID)) result = runpytest( f"--session-uuid={session_uuid}", f"--fluentd-tag={FLUENTD_TAG}", f"--fluentd-label={FLUENTD_LABEL}", "--extend-logging", + pyfile=pyfile_testcase, ) - call_args = fluentd_sender.emit_with_time.call_args_list + call_args = fluent_sender.emit_with_time.call_args_list result.assert_outcomes(passed=1) assert len(call_args) == 7 @@ -102,14 +110,16 @@ def is_pytest_message(args): def test_fluentd_with_options_and_timestamp_enabled_shows_timestamp_field_in_output( - runpytest, fluentd_sender, session_uuid + run_mocked_pytest, fluentd_sender, session_uuid, pyfile_testcase ): + runpytest, fluentd_sender = run_mocked_pytest result = runpytest( f"--session-uuid={session_uuid}", f"--fluentd-tag={FLUENTD_TAG}", f"--fluentd-label={FLUENTD_LABEL}", - f"--fluentd-timestamp=@timestamp", + "--fluentd-timestamp=@timestamp", "--extend-logging", + pyfile=pyfile_testcase, ) result.assert_outcomes(passed=1) call_args = fluentd_sender.emit_with_time.call_args_list @@ -120,11 +130,13 @@ def test_fluentd_with_options_and_timestamp_enabled_shows_timestamp_field_in_out def test_fluentd_with_timestamp_enabled_shows_timestamp_field_in_output( - runpytest, fluentd_sender, session_uuid + run_mocked_pytest, session_uuid, pyfile_testcase ): + runpytest, fluentd_sender = run_mocked_pytest result = runpytest( f"--session-uuid={session_uuid}", - f"--fluentd-timestamp=@timestamp", + "--fluentd-timestamp=@timestamp", + pyfile=pyfile_testcase, ) result.assert_outcomes(passed=1) call_args = fluentd_sender.emit_with_time.call_args_list diff --git a/tests/test_content_patcher.py b/tests/test_content_patcher.py new file mode 100644 index 0000000..ee1568f --- /dev/null +++ b/tests/test_content_patcher.py @@ -0,0 +1,335 @@ +"""Tests for ContentPatcher.""" +# pylint: disable=W0212, C0116, W0621 +import argparse +import typing +import uuid + +import pytest + +from pytest_fluent.content_patcher import ContentPatcher, _ContentType +from pytest_fluent.plugin import FluentLoggerRuntime + +UNIQUE_IDENTIFIER = str(uuid.uuid4()) + + +@pytest.fixture +def stage_names() -> typing.List[str]: + names = [ + method for method in dir(FluentLoggerRuntime) if method.startswith("pytest_") + ] + names.append("logging") + return names + + +@pytest.fixture +def user_settings() -> dict: + return { + "all": { + "tag": "run", + "label": "pytest", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + }, + "pytest_sessionstart": { + "tag": "run", + "label": "test", + "add": {"start_info": "Pytest started"}, + }, + "pytest_sessionfinish": { + "tag": "result", + "label": "test", + "add": {"stop_info": "Pytest finished"}, + }, + "pytest_runtest_logstart": { + "tag": "run", + "label": "testcase", + "add": {"start_info": "Testcase started"}, + }, + "pytest_runtest_logreport": { + "tag": "result", + "label": "testcase", + "replace": { + "values": {"passed": "pass", "failed": "fail"}, + }, + "add": {"stop_info": "Testcase finished"}, + }, + "logging": { + "replace": {"keys": {"message": "msg", "sessionId": "id"}}, + }, + } + + +@pytest.fixture +def user_settings_patched() -> dict: + return { + "pytest_runtest_call": { + "tag": "run", + "label": "pytest", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + }, + "pytest_runtest_logfinish": { + "tag": "run", + "label": "pytest", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + }, + "pytest_runtest_logreport": { + "tag": "result", + "label": "testcase", + "replace": { + "keys": {"status": "state", "sessionId": "id"}, + "values": {"passed": "pass", "failed": "fail"}, + }, + "add": {"stop_info": "Testcase finished"}, + }, + "pytest_runtest_logstart": { + "tag": "run", + "label": "testcase", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + "add": {"start_info": "Testcase started"}, + }, + "pytest_runtest_makereport": { + "tag": "run", + "label": "pytest", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + }, + "pytest_runtest_setup": { + "tag": "run", + "label": "pytest", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + }, + "pytest_runtest_teardown": { + "tag": "run", + "label": "pytest", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + }, + "pytest_sessionfinish": { + "tag": "result", + "label": "test", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + "add": {"stop_info": "Pytest finished"}, + }, + "pytest_sessionstart": { + "tag": "run", + "label": "test", + "replace": {"keys": {"status": "state", "sessionId": "id"}}, + "add": {"start_info": "Pytest started"}, + }, + "logging": { + "tag": "run", + "label": "pytest", + "replace": {"keys": {"message": "msg", "sessionId": "id"}}, + }, + } + + +@pytest.fixture +def stage_content() -> dict: + return {} + + +@pytest.fixture +def stage_content_patched() -> dict: + return {} + + +@pytest.fixture +def namespace() -> argparse.Namespace: + return argparse.Namespace(**{"fluentd_tag": "pytest"}) + + +def test_is_reference_string(): + assert ContentPatcher._is_reference_string("${USE_ENV}") == _ContentType.ENV + assert ContentPatcher._is_reference_string("") == _ContentType.ARGS + + +def test_get_env_content__no_env_string(): + assert ContentPatcher._get_env_content("test") == "" + + +def test_get_env_content__env_string(monkeypatch): + result = "test" + monkeypatch.setenv("USE_ENV", result) + assert ContentPatcher._get_env_content("$USE_ENV") == result + assert ContentPatcher._get_env_content("${USE_ENV}") == result + + +def test_get_env_content__env_string_no_content(): + assert ContentPatcher._get_env_content("$USE_ENV") == "" + + +def test_get_args_content__retrieve_content(stage_content, namespace, stage_names): + patcher = ContentPatcher( + user_settings=stage_content, args_settings=namespace, stage_names=stage_names + ) + assert patcher._get_args_content("") == "pytest" + + +def test_get_args_content__retrieve_no_content(stage_content, namespace, stage_names): + patcher = ContentPatcher( + user_settings=stage_content, args_settings=namespace, stage_names=stage_names + ) + assert patcher._get_args_content("") == "" + + +def test_stage_settings(user_settings, user_settings_patched, stage_names): + patched = ContentPatcher( + user_settings=user_settings, args_settings=namespace, stage_names=stage_names + ) + assert patched._user_settings == user_settings_patched + + +@pytest.mark.parametrize( + "to_patch,expected,stage", + [ + ( + { + "status": "start", + "stage": "session", + "sessionId": UNIQUE_IDENTIFIER, + }, + { + "tag": "run", + "label": "test", + "state": "start", + "stage": "session", + "id": UNIQUE_IDENTIFIER, + "start_info": "Pytest started", + }, + "pytest_sessionstart", + ), + ( + { + "status": "finish", + "stage": "session", + "sessionId": UNIQUE_IDENTIFIER, + }, + { + "tag": "result", + "label": "test", + "state": "finish", + "stage": "session", + "id": UNIQUE_IDENTIFIER, + "stop_info": "Pytest finished", + }, + "pytest_sessionfinish", + ), + ( + { + "status": "start", + "stage": "testcase", + "sessionId": UNIQUE_IDENTIFIER, + "testId": UNIQUE_IDENTIFIER, + "name": "testcase", + }, + { + "tag": "run", + "label": "testcase", + "state": "start", + "stage": "testcase", + "id": UNIQUE_IDENTIFIER, + "testId": UNIQUE_IDENTIFIER, + "name": "testcase", + "start_info": "Testcase started", + }, + "pytest_runtest_logstart", + ), + ( + { + "status": "finish", + "stage": "testcase", + "sessionId": UNIQUE_IDENTIFIER, + "testId": UNIQUE_IDENTIFIER, + "name": "testcase", + }, + { + "tag": "result", + "label": "testcase", + "state": "finish", + "stage": "testcase", + "id": UNIQUE_IDENTIFIER, + "testId": UNIQUE_IDENTIFIER, + "name": "testcase", + "stop_info": "Testcase finished", + }, + "pytest_runtest_logreport", + ), + ], +) +def test_patch_content(to_patch, expected, stage, user_settings_patched): + patched = ContentPatcher._patch_stage_content( + to_patch, user_settings_patched[stage] + ) + assert patched == expected + + +@pytest.mark.parametrize( + "to_patch,expected,stage,ignore", + [ + ( + { + "status": "start", + "stage": "session", + "sessionId": UNIQUE_IDENTIFIER, + }, + { + "tag": "run", + "label": "test", + "state": "start", + "stage": "session", + "id": UNIQUE_IDENTIFIER, + "start_info": "Pytest started", + }, + "pytest_sessionstart", + [], + ), + ( + { + "type": "logging", + "stage": "testcase", + "message": "Logged from test_base", + "sessionId": UNIQUE_IDENTIFIER, + }, + { + "type": "logging", + "stage": "testcase", + "id": UNIQUE_IDENTIFIER, + "msg": "Logged from test_base", + }, + "logging", + ["tag", "label"], + ), + ], +) +def test_patch( + to_patch, + expected, + stage, + ignore, + user_settings, + namespace, + stage_names, +): + patched = ContentPatcher(user_settings, namespace, stage_names).patch( + to_patch, stage, ignore + ) + assert patched == expected + + +def test_get_tag_and_label( + user_settings, namespace, stage_names, user_settings_patched +): + stage = "pytest_runtest_logstart" + patcher = ContentPatcher( + user_settings=user_settings, args_settings=namespace, stage_names=stage_names + ) + assert patcher.get_tag_and_label(stage) == ( + user_settings_patched[stage]["tag"], + user_settings_patched[stage]["label"], + ) + + def pytest_runtest_logstart(): + return patcher.get_tag_and_label() + + assert pytest_runtest_logstart() == ( + user_settings_patched[stage]["tag"], + user_settings_patched[stage]["label"], + ) diff --git a/tests/test_docstrings.py b/tests/test_docstrings.py index 0a3d8c6..6546a42 100644 --- a/tests/test_docstrings.py +++ b/tests/test_docstrings.py @@ -2,7 +2,7 @@ def test_add_docstrings(run_mocked_pytest, session_uuid): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest result = runpytest( f"--session-uuid={session_uuid}", "--add-docstrings", @@ -14,7 +14,6 @@ def test_base(): assert True """, ) - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list result.assert_outcomes(passed=1) assert len(call_args) > 0 @@ -24,7 +23,7 @@ def test_base(): def test_docstrings_disabled(run_mocked_pytest, session_uuid): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest result = runpytest( f"--session-uuid={session_uuid}", pyfile=f""" @@ -35,7 +34,6 @@ def test_base(): assert True """, ) - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list result.assert_outcomes(passed=1) assert len(call_args) > 0 @@ -44,7 +42,7 @@ def test_base(): def test_missing_docstring(run_mocked_pytest, session_uuid): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest result = runpytest( f"--session-uuid={session_uuid}", "--add-docstrings", @@ -53,7 +51,6 @@ def test_base(): assert True """, ) - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list result.assert_outcomes(passed=1) assert len(call_args) > 0 diff --git a/tests/test_fixtures.py b/tests/test_fixtures.py index c9acdc3..dff3c06 100644 --- a/tests/test_fixtures.py +++ b/tests/test_fixtures.py @@ -1,5 +1,5 @@ def test_get_logger(run_mocked_pytest, session_uuid, logging_content): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest result = runpytest( f"--session-uuid={session_uuid}", "--extend-logging", @@ -10,7 +10,6 @@ def test_base(get_logger): assert True """, ) - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list result.assert_outcomes(passed=1) assert len(call_args) > 0 diff --git a/tests/test_ini_configuration.py b/tests/test_ini_configuration.py index b51ec46..d7968aa 100644 --- a/tests/test_ini_configuration.py +++ b/tests/test_ini_configuration.py @@ -3,7 +3,8 @@ import pytest -# set log_level="DEBUG" and log_cli = true in pyproject.toml configuration when debug info is needed +# set log_level="DEBUG" and log_cli = true in pyproject.toml configuration when +# debug info is needed logger = logging.getLogger("debug-log") TAG = "unittest" @@ -15,14 +16,18 @@ @pytest.fixture def tox_ini(pytester, session_uuid): return pytester.makeini( - f"[pytest]\naddopts = --session-uuid='{session_uuid}' --fluentd-port={PORT} --fluentd-host='{HOSTNAME}' --fluentd-tag='{TAG}' --fluentd-label='{LABEL}' --extend-logging" + f'[pytest]\naddopts = --session-uuid="{session_uuid}" --fluentd-port={PORT} ' + f'--fluentd-host="{HOSTNAME}" --fluentd-tag="{TAG}" --fluentd-label="{LABEL}" ' + f"--extend-logging" ) @pytest.fixture def pyprojtoml_ini(pytester, session_uuid): return pytester.makepyprojecttoml( - f"[tool.pytest.ini_options]\naddopts = \"--session-uuid='{session_uuid}' --fluentd-port={PORT} --fluentd-host='{HOSTNAME}' --fluentd-tag='{TAG}' --extend-logging\"" + f'[tool.pytest.ini_options]\naddopts = "--session-uuid={session_uuid} ' + f"--fluentd-port={PORT} --fluentd-host={HOSTNAME} --fluentd-tag={TAG} " + f'--extend-logging"' ) @@ -30,7 +35,9 @@ def pyprojtoml_ini(pytester, session_uuid): def pytest_ini(pytester, session_uuid): return pytester.makefile( ".ini", - pytest=f"[pytest]\naddopts = --session-uuid='{session_uuid}' --fluentd-port={PORT} --fluentd-host='{HOSTNAME}' --fluentd-tag='{TAG}' --fluentd-label='{LABEL}' --extend-logging", + pytest=f'[pytest]\naddopts = --session-uuid="{session_uuid}" ' + f'--fluentd-port={PORT} --fluentd-host="{HOSTNAME}" --fluentd-tag="{TAG}" ' + f'--fluentd-label="{LABEL}" --extend-logging', ) diff --git a/tests/test_parser.py b/tests/test_parser.py new file mode 100644 index 0000000..84238aa --- /dev/null +++ b/tests/test_parser.py @@ -0,0 +1,51 @@ +import argparse +import os + +import pytest + +from pytest_fluent.setting_file_loader_action import SettingFileLoaderAction + +parser = argparse.ArgumentParser() +parser.add_argument( + "--stage-settings", + type=str, + dest="settings", + default=os.path.join(os.path.dirname(__file__), "data", "default.stage.json"), + action=SettingFileLoaderAction, + help="Stage setting description JSON or YAML file path or string object.", +) + + +@pytest.fixture +def default() -> dict: + return {"all": {"tag": "", "label": ""}} + + +def test_json_file(default): + args = parser.parse_args( + [ + "--stage-settings", + os.path.join(os.path.dirname(__file__), "data", "default.json"), + ] + ) + assert args.settings == default + + +def test_yaml_file(default): + args = parser.parse_args( + [ + "--stage-settings", + os.path.join(os.path.dirname(__file__), "data", "default.yaml"), + ] + ) + assert args.settings == default + + +def test_xml_error(): + with pytest.raises(ValueError): + parser.parse_args( + [ + "--stage-settings", + os.path.join(os.path.dirname(__file__), "data", "default.xml"), + ] + ) diff --git a/tests/test_reporting.py b/tests/test_reporting.py index 2231b98..d84f05a 100644 --- a/tests/test_reporting.py +++ b/tests/test_reporting.py @@ -1,5 +1,5 @@ def test_data_reporter_base_with_passed(run_mocked_pytest, session_uuid): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest result = runpytest( f"--session-uuid={session_uuid}", pyfile=""" @@ -7,7 +7,6 @@ def test_base(): assert True """, ) - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list result.assert_outcomes(passed=1) assert len(call_args) > 0 @@ -24,7 +23,7 @@ def test_base(): def test_data_reporter_xdist_passed(run_mocked_pytest, session_uuid): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest result = runpytest( "-n 2", f"--session-uuid={session_uuid}", @@ -48,7 +47,6 @@ def test_base_group_six(): assert True """, ) - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list result.assert_outcomes(passed=6) assert len(call_args) > 0 @@ -76,7 +74,7 @@ def check_for_verdict(session_uuid, report: dict): def test_data_reporter_base_with_xfail(run_mocked_pytest, session_uuid): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest _ = runpytest( f"--session-uuid={session_uuid}", pyfile=""" @@ -87,7 +85,6 @@ def test_base(): assert False """, ) - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list assert len(call_args) > 0 args = call_args[2].args[2] @@ -97,7 +94,7 @@ def test_base(): def test_data_reporter_base_with_exception(run_mocked_pytest, session_uuid): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest _ = runpytest( f"--session-uuid={session_uuid}", pyfile=""" @@ -106,7 +103,6 @@ def test_base(): assert True """, ) - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list assert len(call_args) > 0 args = call_args[2].args[2] @@ -116,7 +112,7 @@ def test_base(): def test_data_reporter_base_with_setup_exception(run_mocked_pytest, session_uuid): - runpytest, sender = run_mocked_pytest + runpytest, fluent_sender = run_mocked_pytest _ = runpytest( f"--session-uuid={session_uuid}", pyfile=""" @@ -133,7 +129,6 @@ def test_base(my_value): assert True """, ) - fluent_sender = sender.return_value call_args = fluent_sender.emit_with_time.call_args_list assert len(call_args) > 0 args = call_args[2].args[2] diff --git a/tests/test_reporting_patching.py b/tests/test_reporting_patching.py new file mode 100644 index 0000000..7eea9f2 --- /dev/null +++ b/tests/test_reporting_patching.py @@ -0,0 +1,160 @@ +import json + +import pytest + +from .conftest import SESSION_UUID + + +@pytest.mark.parametrize( + "patch_file_content,expected_result", + [ + ( + {"all": {"tag": "", "label": ""}}, + [ + { + "status": "start", + "stage": "session", + "sessionId": str(SESSION_UUID), + "tag": "test", + "label": "pytest", + }, + { + "status": "start", + "stage": "testcase", + "sessionId": str(SESSION_UUID), + "name": "test_data_reporter_with_patched_values.py::test_base", + "tag": "test", + "label": "pytest", + }, + { + "type": "logging", + "where": "test_data_reporter_with_patched_values.test_base", + "level": "INFO", + "stack_trace": "None", + "message": "Test running", + "sessionId": str(SESSION_UUID), + "stage": "testcase", + }, + { + "name": "test_data_reporter_with_patched_values.py::test_base", + "outcome": "passed", + "stage": "testcase", + "when": "call", + "sessionId": str(SESSION_UUID), + "tag": "test", + }, + { + "status": "finish", + "stage": "testcase", + "sessionId": str(SESSION_UUID), + "name": "test_data_reporter_with_patched_values.py::test_base", + "tag": "test", + "label": "pytest", + }, + { + "status": "finish", + "stage": "session", + "sessionId": str(SESSION_UUID), + "tag": "test", + "label": "pytest", + }, + ], + ), + ( + { + "all": { + "tag": "", + "label": "", + "replace": { + "keys": {"status": "state", "sessionId": "id"}, + }, + }, + "pytest_runtest_logreport": { + "replace": { + "values": {"passed": "pass", "failed": "fail"}, + }, + "add": {"stop_info": "Testcase finished"}, + }, + }, + [ + { + "state": "start", + "stage": "session", + "id": str(SESSION_UUID), + "tag": "test", + "label": "pytest", + }, + { + "state": "start", + "stage": "testcase", + "id": str(SESSION_UUID), + "name": "test_data_reporter_with_patched_values.py::test_base", + "tag": "test", + "label": "pytest", + }, + { + "type": "logging", + "where": "test_data_reporter_with_patched_values.test_base", + "level": "INFO", + "stack_trace": "None", + "message": "Test running", + "id": str(SESSION_UUID), + "stage": "testcase", + }, + { + "name": "test_data_reporter_with_patched_values.py::test_base", + "outcome": "pass", + "stage": "testcase", + "when": "call", + "id": str(SESSION_UUID), + "tag": "test", + "stop_info": "Testcase finished", + }, + { + "state": "finish", + "stage": "testcase", + "id": str(SESSION_UUID), + "name": "test_data_reporter_with_patched_values.py::test_base", + "tag": "test", + "label": "pytest", + }, + { + "state": "finish", + "stage": "session", + "id": str(SESSION_UUID), + "tag": "test", + "label": "pytest", + }, + ], + ), + ], +) +def test_data_reporter_with_patched_values( + pytester, run_mocked_pytest, session_uuid, patch_file_content, expected_result +): + runpytest, fluent_sender = run_mocked_pytest + pytester.makefile(".json", patch_file=json.dumps(patch_file_content)) + log_content = "Test running" + result = runpytest( + f"--session-uuid={session_uuid}", + "--stage-settings=patch_file.json", + "--extend-logging", + pyfile=f""" + import logging + + def test_base(): + logger = logging.getLogger() + logger.info("{log_content}") + assert True + """, + ) + call_args = fluent_sender.emit_with_time.call_args_list + call_args = [x[0][2] for x in call_args] + result.assert_outcomes(passed=1) + assert len(call_args) == len(expected_result) + for report, expected in zip(call_args, expected_result): + for key in expected.keys(): + assert key in report + if key in ["duration", "testId", "host", "markers"]: + continue + assert report[key] == expected[key] diff --git a/tests/test_schema.py b/tests/test_schema.py new file mode 100644 index 0000000..945de6d --- /dev/null +++ b/tests/test_schema.py @@ -0,0 +1,49 @@ +import json + +import jsonschema +import pytest + +try: + from importlib.resources import files +except ImportError: + from importlib_resources import files + +pytest_fluent_resources = files("pytest_fluent") +schema_file = pytest_fluent_resources / "data" / "schema.stage.json" +default_file = pytest_fluent_resources / "data" / "default.stage.json" + + +@pytest.fixture +def default() -> dict: + with open(default_file, "r") as fp: + default = json.load(fp) + return default + + +@pytest.fixture +def schema() -> dict: + with open(schema_file, "r") as fp: + schema = json.load(fp) + return schema + + +def test_default_compliance(default, schema): + jsonschema.validate(default, schema) + default["pytest_runtest_logstart"] = {"replace": {"status": "state"}} + jsonschema.validate(default, schema) + default["pytest_runtest_logstart"] = {"add": {"my_field": "${MY_FIELD}"}} + jsonschema.validate(default, schema) + default["pytest_runtest_logstart"] = {"tag": "", "label": ""} + jsonschema.validate(default, schema) + + +def test_default_compliance_fail(default, schema): + default["any"] = {"tag": "x", "label": "y", "replace": {"where": "test"}} + + with pytest.raises(jsonschema.ValidationError): + jsonschema.validate(default, schema) + + default["any"] = {"replace": {"where": "test"}} + + with pytest.raises(jsonschema.ValidationError): + jsonschema.validate(default, schema) diff --git a/tox.ini b/tox.ini index 78db77a..05871f2 100644 --- a/tox.ini +++ b/tox.ini @@ -39,7 +39,7 @@ description = Check code and tests for PEP 8 compliance and code complexity. skip_install = true envdir = {toxworkdir}/lint deps = - flake8 + flake8 flake8-colors isort >= 5.0 commands = @@ -51,7 +51,7 @@ description = Check docstrings for PEP 257 compliance (reST style). skip_install = true envdir = {toxworkdir}/lint deps = - flake8 + flake8 flake8-colors flake8-rst-docstrings commands = flake8 --select RST src/ @@ -62,7 +62,7 @@ envdir = {toxworkdir}/lint deps = flake8 flake8-colors -commands = +commands = flake8 --select F src/ tests/ [testenv:pylint] @@ -70,7 +70,7 @@ description = Find errors with static code analysis. envdir = {toxworkdir}/lint deps = pylint -commands = +commands = pylint --output-format=colorized --errors-only src/pytest_fluent [testenv:errors] @@ -137,10 +137,9 @@ commands = description = Generate API documentation. changedir = docs extras = docs -setenv = +setenv = WORKSPACE_ROOT_PATH = {toxinidir} commands = - {envbindir}/python patch_readme.py sphinx-build -W -d {envtmpdir}/doctrees . {envtmpdir}/html [testenv:version]