diff --git a/.github/workflows/timeseries-build-pull-request.yml b/.github/workflows/timeseries-build-pull-request.yml index b794b593f..d03f87ac2 100644 --- a/.github/workflows/timeseries-build-pull-request.yml +++ b/.github/workflows/timeseries-build-pull-request.yml @@ -114,6 +114,31 @@ jobs: with: name: unit-test-report path: /tmp/report.txt + + timeseries-function-tests: + name: Timeseries function Test + permissions: + contents: read + packages: read # needed for actions/checkout + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v1 + with: + path: timeseries + persist-credentials: false + - name: Run time-series-analytics-microservices function tests + run: | + cd "${{ github.workspace }}" + cd ./microservices/time-series-analytics/tests-function + echo "Running function tests" + pip3 install -r requirements.txt + rm -rf /tmp/test_report/report.html + pytest -v --html=/tmp/test_report/report.html test_docker.py::test_input_endpoint test_docker.py::test_health_check + - name: Upload HTML test report to Github + uses: actions/upload-artifact@v4 + with: + name: function-test-report + path: /tmp/test_report timeseries-scans: - uses: ./.github/workflows/timeseries-scans-virus-bandit-pylint.yaml + uses: ./.github/workflows/timeseries-scans.yaml diff --git a/.github/workflows/timeseries-weekly-functional-tests.yaml b/.github/workflows/timeseries-weekly-functional-tests.yaml new file mode 100644 index 000000000..118cb5819 --- /dev/null +++ b/.github/workflows/timeseries-weekly-functional-tests.yaml @@ -0,0 +1,39 @@ +name: "[Time Series Analytics] Run weekly functional tests" +run-name: "[Time Series Analytics] Run weekly functional tests" +on: + schedule: + - cron: '0 14 * * 5' # 14:00 UTC + workflow_dispatch: +permissions: {} + +jobs: + timeseries-weekly-functional-tests.yaml: + name: Weekly-Run TimeSeries Analytics Microservice tests + runs-on: ubuntu-24.04 + permissions: + contents: read + packages: write + strategy: + fail-fast: false + steps: + - name: Check out edge-ai-libraries repository + uses: actions/checkout@v1 + with: + persist-credentials: false + path: edge-ai-libraries-repo + + - name: Run time-series-analytics-microservices function tests + run: | + cd "${{ github.workspace }}" + cd ./microservices/time-series-analytics/tests-function + echo "Running function tests" + pip3 install -r tests-function/requirements.txt + rm -rf /tmp/test_report/report.html + pytest -v --html=/tmp/test_report/report.html test_docker.py + - name: Upload HTML test report to Github + uses: actions/upload-artifact@v4 + with: + name: function-test-report + path: /tmp/test_report + + \ No newline at end of file diff --git a/microservices/time-series-analytics/README.md b/microservices/time-series-analytics/README.md index ea982893d..e3d7d682f 100644 --- a/microservices/time-series-analytics/README.md +++ b/microservices/time-series-analytics/README.md @@ -29,3 +29,14 @@ cd edge-ai-libraries/microservices/time-series-analytics echo "Running unit tests" ./tests/run_tests.sh ``` + +## Running Functional tests + +Follow the steps below to run the automation tests. +```bash +git clone https://github.com/open-edge-platform/edge-ai-libraries +cd edge-ai-libraries/microservices/time-series-analytics/tests-functional +echo "Running automation tests" +pip3 install -r requirements.txt +pytest -v --html=./test_report/report.html . +``` \ No newline at end of file diff --git a/microservices/time-series-analytics/tests-functional/requirements.txt b/microservices/time-series-analytics/tests-functional/requirements.txt new file mode 100644 index 000000000..b21a1e2b5 --- /dev/null +++ b/microservices/time-series-analytics/tests-functional/requirements.txt @@ -0,0 +1,3 @@ +pytest==8.4.1 +pytest-html==4.1.1 +requests==2.32.4 diff --git a/microservices/time-series-analytics/tests-functional/rest_api_utils.py b/microservices/time-series-analytics/tests-functional/rest_api_utils.py new file mode 100644 index 000000000..f65c47a88 --- /dev/null +++ b/microservices/time-series-analytics/tests-functional/rest_api_utils.py @@ -0,0 +1,247 @@ +# +# Apache v2 license +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import pytest +import requests +import time +import subprocess +import json +import os + +# Read the config.json file) +TS_DIR = os.getcwd() + "/../" +config_file = json.load(open(TS_DIR + "config.json")) +print(config_file) + +def run_command(command): + """Run a shell command and return the output.""" + result = subprocess.run(command, shell=True, capture_output=True, text=True) + if result.returncode != 0: + raise RuntimeError(f"Command failed: {command}\n{result.stderr}") + return result.stdout.strip() + +## REST API Tests + +# Get health check /health endpoint +def health_check(port): + """ + Test the health check endpoint of the Time Series Analytics service. + """ + url = f"http://localhost:{port}/health" + try: + response = requests.get(url) + assert response.status_code == 200 + assert response.json() == {"status": "kapacitor daemon is running"} + except Exception as e: + pytest.fail(f"Health check failed: {e}") + +# Post the OPC UA alerts /opcua_alerts endpoint +def opcua_alerts(port): + """ + Test the OPC UA alerts endpoint of the Time Series Analytics service. + """ + alert_message = {"message": "Test alert"} + try: + url = f"http://localhost:{port}/opcua_alerts" + response = requests.post(url, json=alert_message) + assert response.status_code == 500 + assert response.json() == {'detail': '500: OPC UA alerts are not configured in the service'} + except Exception as e: + pytest.fail(f"Failed to post OPC UA alerts: {e}") + +# Post valid input data to the /input endpoint +def input_endpoint(port): + """ + Test the input endpoint of the Time Series Analytics service. + """ + input_data = { + "topic": "point_data", + "tags": { + }, + "fields": { + "temperature": 30 + }, + "timestamp": 0 + } + try: + url = f"http://localhost:{port}/input" + response = requests.post(url, json=input_data) + assert response.status_code == 200 + assert response.json() == {"status": "success", "message": "Data sent to Time Series Analytics microservice"} + except Exception as e: + pytest.fail(f"Failed to post valid input data: {e}") + +# Post invalid input data to the /input endpoint +def input_endpoint_invalid_data(port): + """ + Test the input endpoint of the Time Series Analytics service. + """ + input_data = { + "topic": "point_data", + "tags": { + }, + "fields": { + "temperature": "invalid_value" # Invalid temperature value + }, + "timestamp": 0 + } + try: + url = f"http://localhost:{port}/input" + response = requests.post(url, json=input_data) + assert response.status_code == 500 + assert "400: unable to parse 'point_data temperature=invalid_value" in response.json().get("detail", "") + except Exception as e: + pytest.fail(f"Failed to post invalid input data: {e}") + input_data["fields"]["temperature"] = "" + try: + url = f"http://localhost:{port}/input" + response = requests.post(url, json=input_data) + assert response.status_code == 500 + assert "400: unable to parse 'point_data temperature=" in response.json().get("detail", "") + except Exception as e: + pytest.fail(f"Failed to post no input data: {e}") + +# Post no input data to the /input endpoint +def input_endpoint_no_data(port): + """ + Test the input endpoint of the Time Series Analytics service. + """ + input_data = { + "topic": "point_data", + "tags": { + }, + "fields": { + "temperature": "" # Invalid temperature value + }, + "timestamp": 0 + } + try: + url = f"http://localhost:{port}/input" + response = requests.post(url, json=input_data) + assert response.status_code == 500 + assert "400: unable to parse 'point_data temperature=" in response.json().get("detail", "") + except Exception as e: + pytest.fail(f"Failed to post no input data: {e}") + +# Get config data from the /config endpoint +def get_config_endpoint(port): + """ + Test the config endpoint of the Time Series Analytics service. + """ + url = f"http://localhost:{port}/config" + try: + response = requests.get(url) + assert response.status_code == 200 + assert response.json() == config_file + except Exception as e: + pytest.fail(f"Failed to get config data: {e}") + +# Post config data to the /config endpoint +def post_config_endpoint(port, cmd): + """ + Test the config endpoint of the Time Series Analytics service. + """ + url = f"http://localhost:{port}/config" + try: + response = requests.post(url, json=config_file) + assert response.status_code == 200 + assert response.json() == {"status": "success", "message": "Configuration updated successfully"} + time.sleep(10) # Wait for the configuration to be applied + command = f"{cmd} 2>&1 | grep -i 'Kapacitor daemon process has exited and was reaped.'" + output = run_command(command) + assert "Kapacitor daemon process has exited and was reaped." in output + except Exception as e: + pytest.fail(f"Failed to post config data: {e}") + +# Test concurrent API requests +def concurrent_api_requests(port): + """ + Test concurrent API requests to the Time Series Analytics service. + """ + url = f"http://localhost:{port}" + input_data = { + "topic": "point_data", + "tags": {}, + "fields": {"temperature": 30}, + "timestamp": 0 + } + config_file_alerts = config_file.copy() + config_file_alerts["alerts"] = {} + opcua_alert = {"message": "Test alert"} + endpoints = ['/health', '/config', '/opcua_alerts', '/input' ] + print("config file alert", config_file_alerts) + print("config file", config_file) + def get_request(endpoint): + try: + response = requests.get(url + endpoint) + return response.status_code, response.text + except Exception as e: + return None, str(e) + + def post_request(endpoint, data): + try: + response = requests.post(url + endpoint, json=data) + return response.status_code, response.json() + except Exception as e: + return None, str(e) + + from concurrent.futures import ThreadPoolExecutor + with ThreadPoolExecutor(max_workers=5) as executor: + try: + future_get_health = executor.submit(get_request, endpoints[0]) + future_get_config = executor.submit(get_request, endpoints[1]) + + # Schedule the POST request + future_post_alert = executor.submit(post_request, endpoints[2], opcua_alert) + future_post_input = executor.submit(post_request, endpoints[3], input_data) + future_post_config = executor.submit(post_request, endpoints[1], config_file) + + # Retrieve results + get_health_result = future_get_health.result() + get_config_result = future_get_config.result() + post_alert_result = future_post_alert.result() + + print(f"GET /health: {get_health_result}") + print(f"GET /config: {get_config_result}") + print(f"POST /opcua_alerts: {post_alert_result}") + print(f"POST /input: {future_post_input.result()}") + print(f"POST /config: {future_post_config.result()}") + + health_status_code = [200, 500, 503] + health_status_json = [{"status": "kapacitor daemon is running"}, {"detail": "500: Kapacitor daemon is not running"}, {"status":"Port not accessible and kapacitor daemon not running"}] + assert get_health_result[0] in health_status_code + assert json.loads(get_health_result[1]) in health_status_json + assert get_config_result[0] == 200 + assert json.loads(get_config_result[1]) == config_file or json.loads(get_config_result[1]) == config_file_alerts + assert post_alert_result[0] == 500 + assert post_alert_result[1] == {'detail': '500: OPC UA alerts are not configured in the service'} + assert future_post_input.result()[0] == 200 or future_post_input.result()[0] == 500 + assert future_post_input.result()[1] == {"status": "success", "message": "Data sent to Time Series Analytics microservice"} or \ + future_post_input.result()[1] == {'detail': '500: Kapacitor daemon is not running'} + assert future_post_config.result()[0] == 200 + assert future_post_config.result()[1] == {"status": "success", "message": "Configuration updated successfully"} + except Exception as e: + pytest.fail(f"Concurrent API requests failed: {e}") + +# Post invalid config data to the /config endpoint +def post_invalid_config_endpoint(port, cmd): + """ + Test the config endpoint of the Time Series Analytics service. + """ + url = f"http://localhost:{port}/config" + invalid_config_data = config_file.copy() + invalid_config_data["udfs"]["name"] = "udf_classifier" + try: + response = requests.post(url, json=invalid_config_data) + assert response.status_code == 200 + assert response.json() == {"status": "success", "message": "Configuration updated successfully"} + time.sleep(15) # Wait for the configuration to be applied + command = f"{cmd} 2>&1 | grep -i 'UDF deployment package directory udf_classifier does not exist. Please check and upload/copy the UDF deployment package.'" + output = run_command(command) + print(output) + assert "UDF deployment package directory udf_classifier does not exist. Please check and upload/copy the UDF deployment package." in output + except Exception as e: + pytest.fail(f"Failed to post config data: {e}") diff --git a/microservices/time-series-analytics/tests-functional/test_docker.py b/microservices/time-series-analytics/tests-functional/test_docker.py new file mode 100644 index 000000000..609caad69 --- /dev/null +++ b/microservices/time-series-analytics/tests-functional/test_docker.py @@ -0,0 +1,176 @@ +# +# Apache v2 license +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import subprocess +import os +import pytest +import time +import rest_api_utils as utils + +CONTAINER_NAME = "ia-time-series-analytics-microservice" +TS_DOCKER_PORT = 5000 + +cwd = os.getcwd() +TS_DIR = cwd + "/../" +print(f"Current working directory: {cwd}") +if not os.path.exists(TS_DIR): + pytest.skip("Time Series Analytics directory not found. Skipping tests.") + +def build_docker_image(): + """Build the Docker image for the Time Series Analytics service.""" + print("Building Docker image...") + os.chdir(TS_DIR + "docker") + command = "docker compose build --no-cache" + output = utils.run_command(command) + print(output) + +def docker_compose_up(): + """Start the Docker containers using docker-compose.""" + print("Starting Docker containers...") + os.chdir(TS_DIR + "docker") + command = "docker compose up -d" + output = utils.run_command(command) + print(output) + +def docker_compose_down(): + """Stop and remove the Docker containers.""" + print("Stopping Docker containers...") + os.chdir(TS_DIR + "docker") + command = "docker compose down -v" + output = utils.run_command(command) + print(output) + +def docker_ps(): + """List the running Docker containers.""" + print("Listing running Docker containers...") + command = "docker ps" + output = utils.run_command(command) + print(output) + +@pytest.fixture(scope="function", autouse=True) +def setup_docker_environment(): + """Setup Docker environment before running tests.""" + print("Setting up Docker environment...") + docker_compose_down() + # Build the Docker image only once at the start of the session + if not getattr(setup_docker_environment, "_image_built", False): + build_docker_image() + setattr(setup_docker_environment, "_image_built", True) + docker_compose_up() + print("Started container") + time.sleep(30) # Wait for containers to start + print("yielding control to tests...") + yield + # Stop docker containers + print("Bringing down Docker container...") + docker_compose_down() + time.sleep(5) + +def test_timeseries_microservice_started_successfully(): + """ + Test to check if the required Docker container is running. + """ + command = f"docker ps --filter 'name={CONTAINER_NAME}' --format '{{{{.Names}}}}'" + try: + output = utils.run_command(command) + assert CONTAINER_NAME in output + except Exception as e: + pytest.fail(f"Failed to check if Time Series Analytics Microservice is running: {e}") + +# Test to check if Time Series Analytics Microservice is built and running +def test_test_timeseries_microservice_start(): + """ + Test to check if 'Time Series Analytics Microservice Initialized Successfully. Ready to Receive the Data...' + is present in the Time Series Analytics Microservice container logs. + """ + try: + command = f"docker logs {CONTAINER_NAME} 2>&1 | grep -i 'Kapacitor Initialized Successfully'" + time.sleep(45) # Wait for the container to initialize + output = utils.run_command(command) + print(output) + assert "Kapacitor Initialized Successfully. Ready to Receive the Data..." in output + except Exception as e: + pytest.fail(f"Failed to check Time Series Analytics Microservice initialization: {e}") + +## REST API Tests + +def test_health_check(): + # Get health check /health endpoint + print("Testing health check endpoint in utils...") + utils.health_check(TS_DOCKER_PORT) + +# Post the OPC UA alerts /opcua_alerts endpoint +def test_opcua_alerts(): + """ + Test the OPC UA alerts endpoint of the Time Series Analytics service. + """ + utils.opcua_alerts(TS_DOCKER_PORT) + +# Post valid input data to the /input endpoint +def test_input_endpoint(): + """ + Test the input endpoint of the Time Series Analytics service. + """ + utils.input_endpoint(TS_DOCKER_PORT) + +# Post invalid input data to the /input endpoint +def test_input_endpoint_invalid_data(): + """ + Test the input endpoint of the Time Series Analytics service. + """ + utils.input_endpoint_invalid_data(TS_DOCKER_PORT) + utils.input_endpoint_no_data(TS_DOCKER_PORT) + +def test_get_config_endpoint(): + """ + Test the config endpoint of the Time Series Analytics service. + """ + utils.get_config_endpoint(TS_DOCKER_PORT) + +# Post config data to the /config endpoint +def test_post_config_endpoint(): + """ + Test the config endpoint of the Time Series Analytics service. + """ + cmd = f"docker logs {CONTAINER_NAME}" + utils.post_config_endpoint(TS_DOCKER_PORT, cmd) + +# Test concurrent API requests +def test_concurrent_api_requests(): + """ + Test concurrent API requests to the Time Series Analytics service. + """ + utils.concurrent_api_requests(TS_DOCKER_PORT) + +# Post config data to the /config endpoint +def test_post_invalid_config_endpoint(): + """ + Test the config endpoint of the Time Series Analytics service. + """ + cmd = f"docker logs {CONTAINER_NAME}" + utils.post_invalid_config_endpoint(TS_DOCKER_PORT, cmd) + +def test_temperature_input(): + """ + Test to check if the temperature simulator script runs without error. + """ + os.chdir(TS_DIR) + command = "pip3 install -r simulator/requirements.txt" + utils.run_command(command) + command = ["timeout", "20", "python3", "simulator/temperature_input.py", "--port", str(TS_DOCKER_PORT) ] + try: + print("Starting temperature simulator...") + # Run the simulator for 20 seconds, then terminate + subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + time.sleep(10) + print("Temperature simulator started successfully.") + command = f"docker logs {CONTAINER_NAME} 2>&1 | grep -i 'is outside the range 20-25.'" + time.sleep(10) # Wait for the simulator to produce output + print("Checking Time Series Analytics Microservice logs for temperature data...") + output = utils.run_command(command) + assert "is outside the range 20-25." in output + except RuntimeError as e: + pytest.fail(f"Time Series Analytics Microservice failed for the temperature input data: {e}") diff --git a/microservices/time-series-analytics/tests-functional/test_helm.py b/microservices/time-series-analytics/tests-functional/test_helm.py new file mode 100644 index 000000000..32bc6ccc9 --- /dev/null +++ b/microservices/time-series-analytics/tests-functional/test_helm.py @@ -0,0 +1,212 @@ +# +# Apache v2 license +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import subprocess +import os +import pytest +import time +import shutil +import rest_api_utils as utils + +NAMESPACE = "apps" +RELEASE_NAME = "time-series-analytics-microservice" +TS_HELM_PORT = 30002 + +cwd = os.getcwd() +print(f"Current working directory: {cwd}") +TS_DIR = cwd + "/../" +HELM_DIR = TS_DIR + "helm/" +if not os.path.exists(TS_DIR): + pytest.skip("Time Series Analytics directory not found. Skipping tests.") + + +def helm_install(release_name, chart_path, namespace): + """Install a Helm chart with specified parameters.""" + try: + # Construct the Helm install command + helm_command = [ + "helm", "install", release_name, chart_path, + "-n", namespace, "--create-namespace" + ] + + # Execute the Helm install command and capture output + print(f"Installing Helm chart...") + result = subprocess.run(helm_command, capture_output=True, text=True, check=True) + + # Print the output for debugging purposes + print(result.stdout) + return True + except subprocess.CalledProcessError as e: + print(f"Failed to install Helm chart: Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s): {e.stderr}") + return False + +def helm_uninstall(release_name, namespace): + """Uninstall a Helm release with specified parameters.""" + try: + # Construct the Helm uninstall command + helm_command = [ + "helm", "uninstall", release_name, + "-n", namespace + ] + + # Execute the Helm uninstall command and capture output + print(f"Uninstalling Helm release '{release_name}' from namespace '{namespace}'...") + result = subprocess.run(helm_command, capture_output=True, text=True, check=True) + + # Print the output for debugging purposes + print(result.stdout) + return True + except subprocess.CalledProcessError as e: + print(f"Failed to uninstall Helm release: Error: uninstall: Release not loaded: ts-wind-turbine-anomaly: release: not found: {e.stderr}") + return False + +def get_pod_names(namespace): + """Fetch pod names in the given namespace.""" + try: + result = subprocess.run( + ["kubectl", "get", "pods", "-n", namespace, "-o", "jsonpath={.items[*].metadata.name}"], + capture_output=True, text=True, check=True + ) + + pod_names = result.stdout.strip().split() + return pod_names + except subprocess.CalledProcessError as e: + print(f"Failed to fetch pod names: {e}") + return [] + +@pytest.fixture(scope="function", autouse=True) +def helm_setup(): + """Fixture to set up and tear down Helm environment for each test.""" + + os.chdir(TS_DIR) + shutil.copy("config.json", HELM_DIR + "config.json") + + # Install the Helm chart + if not helm_install(RELEASE_NAME, HELM_DIR, NAMESPACE): + pytest.fail("Helm installation failed.") + time.sleep(30) # Wait for the Helm release to be ready + yield + # Uninstall the Helm release + if not helm_uninstall(RELEASE_NAME, NAMESPACE): + pytest.fail("Helm uninstallation failed.") + # Wait for pods to be terminated + time.sleep(30) + +def test_timeseries_microservice_start(): + """Start the Time Series Analytics Microservice.""" + # Check if pods are running + pod_names = get_pod_names(NAMESPACE) + print(f"Found pods in namespace '{NAMESPACE}': {pod_names}") + if not pod_names: + pytest.fail("No pods found in the namespace.") + + print("Time Series Analytics Microservice started successfully.") + +def test_timeseries_microservice_started_successfully(): + """Start the Time Series Analytics Microservice.""" + # Check if pods are running + pod_names = get_pod_names(NAMESPACE) + print(f"Found pods in namespace '{NAMESPACE}': {pod_names}") + if not pod_names: + pytest.fail("No pods found in the namespace.") + + try: + command = f"kubectl logs -n {NAMESPACE} {pod_names[0]} 2>&1 | grep -i 'Kapacitor Initialized Successfully'" + output = utils.run_command(command) + print(output) + assert "Kapacitor Initialized Successfully. Ready to Receive the Data..." in output + except Exception as e: + pytest.fail(f"Time Series Analytics Microservice did not start: {e}") + +## REST API Tests + +def test_health_check(): + # Get health check /health endpoint + print("Testing health check endpoint in utils...") + utils.health_check(TS_HELM_PORT) + +# Post the OPC UA alerts /opcua_alerts endpoint +def test_opcua_alerts(): + """ + Test the OPC UA alerts endpoint of the Time Series Analytics service. + """ + utils.opcua_alerts(TS_HELM_PORT) + +# Post valid input data to the /input endpoint +def test_input_endpoint(): + """ + Test the input endpoint of the Time Series Analytics service. + """ + utils.input_endpoint(TS_HELM_PORT) + +# Post invalid input data to the /input endpoint +def test_input_endpoint_invalid_data(): + """ + Test the input endpoint of the Time Series Analytics service. + """ + utils.input_endpoint_invalid_data(TS_HELM_PORT) + utils.input_endpoint_no_data(TS_HELM_PORT) + +def test_get_config_endpoint(): + """ + Test the config endpoint of the Time Series Analytics service. + """ + utils.get_config_endpoint(TS_HELM_PORT) + +# Post config data to the /config endpoint +def test_post_config_endpoint(): + """ + Test the config endpoint of the Time Series Analytics service. + """ + podnames = get_pod_names(NAMESPACE) + print(f"Found pods in namespace '{NAMESPACE}': {podnames}") + if not podnames: + pytest.fail("No pods found in the namespace.") + cmd = f"kubectl logs -n {NAMESPACE} {podnames[0]}" + utils.post_config_endpoint(TS_HELM_PORT, cmd) + +# Test concurrent API requests +def test_concurrent_api_requests(): + """ + Test concurrent API requests to the Time Series Analytics service. + """ + utils.concurrent_api_requests(TS_HELM_PORT) + +# Post config data to the /config endpoint +def test_post_invalid_config_endpoint(): + """ + Test the config endpoint of the Time Series Analytics service. + """ + podnames = get_pod_names(NAMESPACE) + print(f"Found pods in namespace '{NAMESPACE}': {podnames}") + if not podnames: + pytest.fail("No pods found in the namespace.") + cmd = f"kubectl logs -n {NAMESPACE} {podnames[0]}" + utils.post_invalid_config_endpoint(TS_HELM_PORT, cmd) + +def test_temperature_input(): + """ + Test to check if the temperature simulator script runs without error. + """ + os.chdir(TS_DIR) + command = ["timeout", "20", "python3", "simulator/temperature_input.py", "--port", str(TS_HELM_PORT)] + try: + print("Starting temperature simulator...") + # Run the simulator for 20 seconds, then terminate + subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + time.sleep(10) + print("Temperature simulator started successfully.") + pod_names = get_pod_names(NAMESPACE) + print(f"Found pods in namespace '{NAMESPACE}': {pod_names}") + if not pod_names: + pytest.fail("No pods found in the namespace.") + command = f"kubectl logs -n {NAMESPACE} {pod_names[0]} 2>&1 | grep -i 'is outside the range 20-25.'" + time.sleep(10) # Wait for the simulator to produce output + print("Checking Time Series Analytics Microservice logs for temperature data...") + output = utils.run_command(command) + assert "is outside the range 20-25." in output + except RuntimeError as e: + pytest.fail(f"Time Series Analytics Microservice failed for the temperature input data: {e}")