diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/benchmark_gpu_payload.json b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/benchmark_gpu_payload.json new file mode 100644 index 000000000..c49a0a443 --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/benchmark_gpu_payload.json @@ -0,0 +1,21 @@ +[ + { + "pipeline": "pallet_defect_detection_benchmarking", + "payload":{ + "parameters": { + "detection-properties": { + "model": "/home/pipeline-server/resources/models/pallet-defect-detection/deployment/Detection/model/model.xml", + "device": "GPU", + "batch-size": 8, + "model-instance-id": "instgpu0", + "inference-interval": 3, + "inference-region": 0, + "nireq": 2, + "ie-config": "NUM_STREAMS=2", + "pre-process-backend": "va-surface-sharing", + "threshold": 0.7 + } + } + } + } +] diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/configs/pipeline-server-config.json b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/configs/pipeline-server-config.json index 31a73442d..493cb2e41 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/configs/pipeline-server-config.json +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/configs/pipeline-server-config.json @@ -117,7 +117,25 @@ } }, "auto_start": false + }, + { + "name": "pallet_defect_detection_benchmarking", + "source": "gstreamer", + "queue_maxsize": 50, + "pipeline": "multifilesrc location=/home/pipeline-server/resources/videos/warehouse_looped.avi loop=true ! parsebin ! vah264dec ! vapostproc ! video/x-raw(memory:VAMemory) ! gvadetect name=detection ! queue ! gvafpscounter ! appsink sync=false async=false", + "parameters": { + "type": "object", + "properties": { + "detection-properties": { + "element": { + "name": "detection", + "format": "element-properties" + } + } + } + }, + "auto_start": false } - ] + ] } } \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/docs/user-guide/how-to-benchmark.md b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/docs/user-guide/how-to-benchmark.md new file mode 100644 index 000000000..46840a2a5 --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/docs/user-guide/how-to-benchmark.md @@ -0,0 +1,186 @@ +# How to Run Benchmarking + +This guide provides step-by-step instructions for running the benchmarking script to evaluate the performance of the Pallet Defect Detection application. The script can help you determine the maximum number of concurrent video streams your system can handle while meeting a specific performance target (e.g., frames per second). + +## Overview of the Benchmarking Script + +The `benchmark_start.sh` script, located in the `manufacturing-ai-suite/industrial-edge-insights-vision` directory, automates the process of running performance tests on the DL Streamer Pipeline Server. It offers two primary modes of operation: + +* **Fixed Stream Mode:** Runs a specified number of concurrent video processing pipelines. This mode is useful for testing a specific, known workload. +* **Stream-Density Mode:** Automatically determines the maximum number of streams that can be processed while maintaining a target Frames Per Second (FPS). This is ideal for capacity planning and finding the performance limits of your hardware. + +## Prerequisites + +Before running the benchmarking script, ensure you have the following: + +* A successful deployment of the Pallet Defect Detection application using Docker Compose or Helm, as described in the [Get Started](./get-started.md) guide. +* The `jq` command-line JSON processor must be installed. You can install it on Ubuntu with: + ```bash + sudo apt-get update && sudo apt-get install -y jq + ``` +* The `bc` calculator for floating-point arithmetic must be installed: + ```bash + sudo apt-get install -y bc + ``` +* The `benchmark_start.sh` script and payload configuration files must be available on your system. + +## Understanding the Payload File + +The benchmarking script requires a JSON payload file to configure the pipelines that will be tested. These payload files are located within the `apps/pallet-defect-detection/` directory. The script uses this file to specify the pipeline to run and the configuration for the video source, destination, and parameters. + +Here is an example of a payload file, `benchmark_gpu_payload.json`: + +```json +[ + { + "pipeline": "pallet_defect_detection_benchmarking", + "payload": { + "parameters": { + "detection-properties": { + "model": "/home/pipeline-server/resources/models/pallet-defect-detection/deployment/Detection/model/model.xml", + "device": "GPU", + "batch-size": 8, + "model-instance-id": "instgpu0", + "inference-interval": 3, + "inference-region": 0, + "nireq": 2, + "ie-config": "NUM_STREAMS=2", + "pre-process-backend": "va-surface-sharing", + "threshold": 0.7 + } + } + } + } +] +``` + +* `pipeline`: The name of the pipeline to execute (e.g., `pallet_defect_detection_benchmarking`). +* `payload`: An object containing the configuration for the pipeline instance. + * `parameters`: Allows you to set pipeline-specific parameters, such as the `device` (CPU, GPU, or AUTO) and other model-related properties including batch size, inference intervals, and OpenVINO™ configurations. + +## Step 1: Configure the Benchmarking Script + +Before running the script, you may need to adjust the `DLSPS_NODE_IP` variable within `benchmark_start.sh` if your DL Streamer Pipeline Server is not running on `localhost`. + +```bash +# Edit the benchmark_start.sh script if needed +nano benchmark_start.sh +``` + +Change `DLSPS_NODE_IP="localhost"` to the correct IP address of the node where the service is exposed. + +## Step 2: Run the Benchmarking Script + +The script can be run in two different modes. + +Navigate to the `manufacturing-ai-suite/industrial-edge-insights-vision` directory to run the script. + +```bash +cd edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-vision/ +``` + +### Fixed Stream Mode + +In this mode, you specify the exact number of pipelines to run concurrently. This is useful for simulating a known workload. + +**To run 4 pipelines simultaneously using the GPU payload:** + +```bash +./benchmark_start.sh -p apps/pallet-defect-detection/benchmark_gpu_payload.json -n 4 +``` + +* `-p apps/pallet-defect-detection/benchmark_gpu_payload.json`: Specifies the path to your payload configuration file. +* `-n 4`: Sets the number of concurrent pipelines to run. + +The script will start the 4 pipelines and print their status. You can then monitor their performance via Grafana at `https://localhost/grafana`, by using the pipeline status API endpoint, or by checking the FPS directly: + +```bash +curl -k https://localhost/api/pipelines/status +``` + +### Stream-Density Mode + +In this mode, the script automatically finds the maximum number of streams that can run while maintaining a target FPS. This is useful for determining the capacity of your system. + +**To find the maximum number of streams that can achieve at least 28.5 FPS on GPU:** + +```bash +./benchmark_start.sh -p apps/pallet-defect-detection/benchmark_gpu_payload.json -t 28.5 +``` + +* `-p apps/pallet-defect-detection/benchmark_gpu_payload.json`: Specifies the path to your payload configuration file. +* `-t 28.5`: Sets the target average FPS per stream. The default is `28.5`. +* `-i 60`: (Optional) Sets the monitoring interval in seconds for collecting FPS data. The default is `60`. + +The script will start with one stream, measure the FPS, and if the target is met, it will stop, add another stream, and repeat the process. This continues until the average FPS drops below the target. The script will then report the maximum number of streams that successfully met the performance goal. + +**Example Output:** + +``` +====================================================== +✅ FINAL RESULT: Stream-Density Benchmark Completed! + Maximum 3 stream(s) can achieve the target FPS of 28.5. + + Average FPS per stream for the optimal configuration: + - Stream 1: 29.2 FPS + - Stream 2: 28.8 FPS + - Stream 3: 28.6 FPS +====================================================== +``` + +### How Stream Performance is Evaluated + +In Stream-Density Mode, the script evaluates if the system can sustain a target FPS across all concurrent streams. The process is as follows: + +1. **Individual Stream Monitoring:** The script monitors each running pipeline instance (stream) independently. +2. **Sampling:** For the duration of the monitoring interval (e.g., 60 seconds), it samples the `avg_fps` value from each stream every 2 seconds. +3. **Averaging per Stream:** After the interval, it calculates the average FPS for *each stream* based on the samples collected for that specific stream. +4. **Validation:** The performance goal is considered met only if **every single stream's** calculated average FPS is greater than or equal to the target FPS. If even one stream falls below the target, the test fails for that number of concurrent streams. + +This ensures that the reported optimal stream count represents a stable configuration where all streams are performing adequately, rather than relying on a combined average that could hide underperforming streams. + +## Step 3: Stop the Benchmarking + +The benchmarking script automatically stops all pipelines when running in Stream-Density mode. However, if you're running in Fixed Stream mode or need to manually stop pipelines, you can stop all running pipelines by interrupting the script with `Ctrl+C` or by running: + +```bash +curl -k -X DELETE https://localhost/api/pipelines +``` + +Alternatively, you can stop individual pipelines by their ID: + +```bash +# Get pipeline IDs +curl -k https://localhost/api/pipelines/status + +# Stop specific pipeline +curl -k -X DELETE https://localhost/api/pipelines/ +``` + +## Performance Optimization Tips + +### GPU Optimization + +* **Batch Size:** Increase `batch-size` for better GPU utilization, but be mindful of memory constraints. +* **Parallel Inference:** Tune `nireq` parameter to match your GPU's parallel processing capabilities. +* **Stream Configuration:** Adjust `NUM_STREAMS` in `ie-config` to optimize for your specific GPU model. + +### CPU Optimization + +* **Inference Interval:** Increase `inference-interval` to reduce CPU load if real-time processing isn't critical. +* **Device Selection:** Use `device: "AUTO"` to let OpenVINO™ automatically select the best device. + +### Memory Optimization + +* **Pre-process Backend:** Use `va-surface-sharing` for GPU memory efficiency. +* **Model Precision:** Consider using INT8 or FP16 model precision for better performance. + +## Summary + +In this guide, you learned how to use the `benchmark_start.sh` script to run performance tests on your Pallet Defect Detection application. You can now measure performance for a fixed number of streams or automatically determine the maximum stream density your system can support. + +Key takeaways: +* Use Fixed Stream Mode for testing known workloads +* Use Stream-Density Mode for capacity planning and finding system limits +* Monitor individual stream performance to ensure consistent quality +* Optimize payload configurations based on your hardware capabilities diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/docs/user-guide/index.rst b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/docs/user-guide/index.rst index ea022f3fd..de9fcd166 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/docs/user-guide/index.rst +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/docs/user-guide/index.rst @@ -91,6 +91,7 @@ This sample application offers the following features: how-to-view-telemetry-data how-to-use-gpu-for-inference how-to-start-mqtt-publisher + how-to-benchmark api-reference environment-variables diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/benchmark_gpu_payload.json b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/benchmark_gpu_payload.json new file mode 100644 index 000000000..14c8d1f78 --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/benchmark_gpu_payload.json @@ -0,0 +1,20 @@ +[ + { + "pipeline": "pcb_anomaly_detection_benchmarking", + "payload":{ + "parameters": { + "classification-properties": { + "model": "/home/pipeline-server/resources/models/pcb-anomaly-detection/deployment/Anomaly classification/model/model.xml", + "device": "GPU", + "batch-size": 8, + "model-instance-id": "instgpu0", + "inference-interval": 3, + "inference-region": 0, + "nireq": 2, + "ie-config": "NUM_STREAMS=2", + "pre-process-backend": "va-surface-sharing" + } + } + } + } +] diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/configs/pipeline-server-config.json b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/configs/pipeline-server-config.json index 96b581a03..67dc3a61b 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/configs/pipeline-server-config.json +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/configs/pipeline-server-config.json @@ -117,6 +117,24 @@ } }, "auto_start": false + }, + { + "name": "pcb_anomaly_detection_benchmarking", + "source": "gstreamer", + "queue_maxsize": 50, + "pipeline": "multifilesrc location=/home/pipeline-server/resources/videos/anomalib_pcb_test_looped.avi loop=true ! parsebin ! vah264dec ! vapostproc ! video/x-raw(memory:VAMemory) ! gvaclassify name=classification ! queue ! gvafpscounter ! appsink sync=false async=false", + "parameters": { + "type": "object", + "properties": { + "classification-properties": { + "element": { + "name": "classification", + "format": "element-properties" + } + } + } + }, + "auto_start": false } ] } diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/docs/user-guide/how-to-benchmark.md b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/docs/user-guide/how-to-benchmark.md new file mode 100644 index 000000000..54912a302 --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/docs/user-guide/how-to-benchmark.md @@ -0,0 +1,186 @@ +# How to Run Benchmarking + +This guide provides step-by-step instructions for running the benchmarking script to evaluate the performance of the PCB Anomaly Detection application. The script can help you determine the maximum number of concurrent video streams your system can handle while meeting a specific performance target (e.g., frames per second). + +## Overview of the Benchmarking Script + +The `benchmark_start.sh` script, located in the `manufacturing-ai-suite/industrial-edge-insights-vision` directory, automates the process of running performance tests on the DL Streamer Pipeline Server. It offers two primary modes of operation: + +* **Fixed Stream Mode:** Runs a specified number of concurrent video processing pipelines. This mode is useful for testing a specific, known workload. +* **Stream-Density Mode:** Automatically determines the maximum number of streams that can be processed while maintaining a target Frames Per Second (FPS). This is ideal for capacity planning and finding the performance limits of your hardware. + +## Prerequisites + +Before running the benchmarking script, ensure you have the following: + +* A successful deployment of the PCB Anomaly Detection application using Docker Compose or Helm, as described in the [Get Started](./get-started.md) guide. +* The `jq` command-line JSON processor must be installed. You can install it on Ubuntu with: + ```bash + sudo apt-get update && sudo apt-get install -y jq + ``` +* The `bc` calculator for floating-point arithmetic must be installed: + ```bash + sudo apt-get install -y bc + ``` +* The `benchmark_start.sh` script and payload configuration files must be available on your system. + +## Understanding the Payload File + +The benchmarking script requires a JSON payload file to configure the pipelines that will be tested. These payload files are located within the `apps/pcb-anomaly-detection/` directory. The script uses this file to specify the pipeline to run and the configuration for the video source, destination, and parameters. + +Here is an example of a payload file, `benchmark_gpu_payload.json`: + +```json +[ + { + "pipeline": "pcb_anomaly_detection_benchmarking", + "payload": { + "parameters": { + "detection-properties": { + "model": "/home/pipeline-server/resources/models/pcb-anomaly-detection/deployment/Detection/model/model.xml", + "device": "GPU", + "batch-size": 8, + "model-instance-id": "instgpu0", + "inference-interval": 3, + "inference-region": 0, + "nireq": 2, + "ie-config": "NUM_STREAMS=2", + "pre-process-backend": "va-surface-sharing", + "threshold": 0.7 + } + } + } + } +] +``` + +* `pipeline`: The name of the pipeline to execute (e.g., `pcb_anomaly_detection_benchmarking`). +* `payload`: An object containing the configuration for the pipeline instance. + * `parameters`: Allows you to set pipeline-specific parameters, such as the `device` (CPU, GPU, or AUTO) and other model-related properties including batch size, inference intervals, and OpenVINO™ configurations. + +## Step 1: Configure the Benchmarking Script + +Before running the script, you may need to adjust the `DLSPS_NODE_IP` variable within `benchmark_start.sh` if your DL Streamer Pipeline Server is not running on `localhost`. + +```bash +# Edit the benchmark_start.sh script if needed +nano benchmark_start.sh +``` + +Change `DLSPS_NODE_IP="localhost"` to the correct IP address of the node where the service is exposed. + +## Step 2: Run the Benchmarking Script + +The script can be run in two different modes. + +Navigate to the `manufacturing-ai-suite/industrial-edge-insights-vision` directory to run the script. + +```bash +cd edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-vision/ +``` + +### Fixed Stream Mode + +In this mode, you specify the exact number of pipelines to run concurrently. This is useful for simulating a known workload. + +**To run 4 pipelines simultaneously using the GPU payload:** + +```bash +./benchmark_start.sh -p apps/pcb-anomaly-detection/benchmark_gpu_payload.json -n 4 +``` + +* `-p apps/pcb-anomaly-detection/benchmark_gpu_payload.json`: Specifies the path to your payload configuration file. +* `-n 4`: Sets the number of concurrent pipelines to run. + +The script will start the 4 pipelines and print their status. You can then monitor their performance via Grafana at `https://localhost/grafana`, by using the pipeline status API endpoint, or by checking the FPS directly: + +```bash +curl -k https://localhost/api/pipelines/status +``` + +### Stream-Density Mode + +In this mode, the script automatically finds the maximum number of streams that can run while maintaining a target FPS. This is useful for determining the capacity of your system. + +**To find the maximum number of streams that can achieve at least 28.5 FPS on GPU:** + +```bash +./benchmark_start.sh -p apps/pcb-anomaly-detection/benchmark_gpu_payload.json -t 28.5 +``` + +* `-p apps/pcb-anomaly-detection/benchmark_gpu_payload.json`: Specifies the path to your payload configuration file. +* `-t 28.5`: Sets the target average FPS per stream. The default is `28.5`. +* `-i 60`: (Optional) Sets the monitoring interval in seconds for collecting FPS data. The default is `60`. + +The script will start with one stream, measure the FPS, and if the target is met, it will stop, add another stream, and repeat the process. This continues until the average FPS drops below the target. The script will then report the maximum number of streams that successfully met the performance goal. + +**Example Output:** + +``` +====================================================== +✅ FINAL RESULT: Stream-Density Benchmark Completed! + Maximum 3 stream(s) can achieve the target FPS of 28.5. + + Average FPS per stream for the optimal configuration: + - Stream 1: 29.2 FPS + - Stream 2: 28.8 FPS + - Stream 3: 28.6 FPS +====================================================== +``` + +### How Stream Performance is Evaluated + +In Stream-Density Mode, the script evaluates if the system can sustain a target FPS across all concurrent streams. The process is as follows: + +1. **Individual Stream Monitoring:** The script monitors each running pipeline instance (stream) independently. +2. **Sampling:** For the duration of the monitoring interval (e.g., 60 seconds), it samples the `avg_fps` value from each stream every 2 seconds. +3. **Averaging per Stream:** After the interval, it calculates the average FPS for *each stream* based on the samples collected for that specific stream. +4. **Validation:** The performance goal is considered met only if **every single stream's** calculated average FPS is greater than or equal to the target FPS. If even one stream falls below the target, the test fails for that number of concurrent streams. + +This ensures that the reported optimal stream count represents a stable configuration where all streams are performing adequately, rather than relying on a combined average that could hide underperforming streams. + +## Step 3: Stop the Benchmarking + +The benchmarking script automatically stops all pipelines when running in Stream-Density mode. However, if you're running in Fixed Stream mode or need to manually stop pipelines, you can stop all running pipelines by interrupting the script with `Ctrl+C` or by running: + +```bash +curl -k -X DELETE https://localhost/api/pipelines +``` + +Alternatively, you can stop individual pipelines by their ID: + +```bash +# Get pipeline IDs +curl -k https://localhost/api/pipelines/status + +# Stop specific pipeline +curl -k -X DELETE https://localhost/api/pipelines/ +``` + +## Performance Optimization Tips + +### GPU Optimization + +* **Batch Size:** Increase `batch-size` for better GPU utilization, but be mindful of memory constraints. +* **Parallel Inference:** Tune `nireq` parameter to match your GPU's parallel processing capabilities. +* **Stream Configuration:** Adjust `NUM_STREAMS` in `ie-config` to optimize for your specific GPU model. + +### CPU Optimization + +* **Inference Interval:** Increase `inference-interval` to reduce CPU load if real-time processing isn't critical. +* **Device Selection:** Use `device: "AUTO"` to let OpenVINO™ automatically select the best device. + +### Memory Optimization + +* **Pre-process Backend:** Use `va-surface-sharing` for GPU memory efficiency. +* **Model Precision:** Consider using INT8 or FP16 model precision for better performance. + +## Summary + +In this guide, you learned how to use the `benchmark_start.sh` script to run performance tests on your PCB Anomaly Detection application. You can now measure performance for a fixed number of streams or automatically determine the maximum stream density your system can support. + +Key takeaways: +* Use Fixed Stream Mode for testing known workloads +* Use Stream-Density Mode for capacity planning and finding system limits +* Monitor individual stream performance to ensure consistent quality +* Optimize payload configurations based on your hardware capabilities \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/docs/user-guide/index.rst b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/docs/user-guide/index.rst index ca7ab2238..50d7dc57c 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/docs/user-guide/index.rst +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pcb-anomaly-detection/docs/user-guide/index.rst @@ -88,6 +88,7 @@ This sample application offers the following features: how-to-view-telemetry-data how-to-use-gpu-for-inference how-to-start-mqtt-publisher + how-to-benchmark api-reference environment-variables release_notes/Overview diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/benchmark_gpu_payload.json b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/benchmark_gpu_payload.json new file mode 100644 index 000000000..a5b60a5db --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/benchmark_gpu_payload.json @@ -0,0 +1,20 @@ +[ + { + "pipeline": "weld_porosity_classification_benchmarking", + "payload":{ + "parameters": { + "classification-properties": { + "model": "/home/pipeline-server/resources/models/weld-porosity/deployment/Classification/model/model.xml", + "device": "GPU", + "batch-size": 8, + "model-instance-id": "instgpu0", + "inference-interval": 3, + "inference-region": 0, + "nireq": 2, + "ie-config": "NUM_STREAMS=2", + "pre-process-backend": "va-surface-sharing" + } + } + } + } +] diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/configs/pipeline-server-config.json b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/configs/pipeline-server-config.json index ae78a3d66..71eba6c00 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/configs/pipeline-server-config.json +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/configs/pipeline-server-config.json @@ -117,6 +117,24 @@ } }, "auto_start": false + }, + { + "name": "weld_porosity_classification_benchmarking", + "source": "gstreamer", + "queue_maxsize": 50, + "pipeline": "multifilesrc location=/home/pipeline-server/resources/videos/welding_looped.avi loop=true ! parsebin ! vah264dec ! vapostproc ! video/x-raw(memory:VAMemory) ! gvaclassify name=classification ! queue ! gvafpscounter ! appsink sync=false async=false", + "parameters": { + "type": "object", + "properties": { + "classification-properties": { + "element": { + "name": "classification", + "format": "element-properties" + } + } + } + }, + "auto_start": false } ] } diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/docs/user-guide/how-to-benchmark.md b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/docs/user-guide/how-to-benchmark.md new file mode 100644 index 000000000..e4d58bb2d --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/docs/user-guide/how-to-benchmark.md @@ -0,0 +1,186 @@ +# How to Run Benchmarking + +This guide provides step-by-step instructions for running the benchmarking script to evaluate the performance of the Weld Porosity Detection application. The script can help you determine the maximum number of concurrent video streams your system can handle while meeting a specific performance target (e.g., frames per second). + +## Overview of the Benchmarking Script + +The `benchmark_start.sh` script, located in the `manufacturing-ai-suite/industrial-edge-insights-vision` directory, automates the process of running performance tests on the DL Streamer Pipeline Server. It offers two primary modes of operation: + +* **Fixed Stream Mode:** Runs a specified number of concurrent video processing pipelines. This mode is useful for testing a specific, known workload. +* **Stream-Density Mode:** Automatically determines the maximum number of streams that can be processed while maintaining a target Frames Per Second (FPS). This is ideal for capacity planning and finding the performance limits of your hardware. + +## Prerequisites + +Before running the benchmarking script, ensure you have the following: + +* A successful deployment of the Weld Porosity Detection application using Docker Compose or Helm, as described in the [Get Started](./get-started.md) guide. +* The `jq` command-line JSON processor must be installed. You can install it on Ubuntu with: + ```bash + sudo apt-get update && sudo apt-get install -y jq + ``` +* The `bc` calculator for floating-point arithmetic must be installed: + ```bash + sudo apt-get install -y bc + ``` +* The `benchmark_start.sh` script and payload configuration files must be available on your system. + +## Understanding the Payload File + +The benchmarking script requires a JSON payload file to configure the pipelines that will be tested. These payload files are located within the `apps/weld-porosity/` directory. The script uses this file to specify the pipeline to run and the configuration for the video source, destination, and parameters. + +Here is an example of a payload file, `benchmark_gpu_payload.json`: + +```json +[ + { + "pipeline": "weld_porosity_detection_benchmarking", + "payload": { + "parameters": { + "detection-properties": { + "model": "/home/pipeline-server/resources/models/weld-porosity/deployment/Detection/model/model.xml", + "device": "GPU", + "batch-size": 8, + "model-instance-id": "instgpu0", + "inference-interval": 3, + "inference-region": 0, + "nireq": 2, + "ie-config": "NUM_STREAMS=2", + "pre-process-backend": "va-surface-sharing", + "threshold": 0.7 + } + } + } + } +] +``` + +* `pipeline`: The name of the pipeline to execute (e.g., `weld_porosity_detection_benchmarking`). +* `payload`: An object containing the configuration for the pipeline instance. + * `parameters`: Allows you to set pipeline-specific parameters, such as the `device` (CPU, GPU, or AUTO) and other model-related properties including batch size, inference intervals, and OpenVINO™ configurations. + +## Step 1: Configure the Benchmarking Script + +Before running the script, you may need to adjust the `DLSPS_NODE_IP` variable within `benchmark_start.sh` if your DL Streamer Pipeline Server is not running on `localhost`. + +```bash +# Edit the benchmark_start.sh script if needed +nano benchmark_start.sh +``` + +Change `DLSPS_NODE_IP="localhost"` to the correct IP address of the node where the service is exposed. + +## Step 2: Run the Benchmarking Script + +The script can be run in two different modes. + +Navigate to the `manufacturing-ai-suite/industrial-edge-insights-vision` directory to run the script. + +```bash +cd edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-vision/ +``` + +### Fixed Stream Mode + +In this mode, you specify the exact number of pipelines to run concurrently. This is useful for simulating a known workload. + +**To run 4 pipelines simultaneously using the GPU payload:** + +```bash +./benchmark_start.sh -p apps/weld-porosity/benchmark_gpu_payload.json -n 4 +``` + +* `-p apps/weld-porosity/benchmark_gpu_payload.json`: Specifies the path to your payload configuration file. +* `-n 4`: Sets the number of concurrent pipelines to run. + +The script will start the 4 pipelines and print their status. You can then monitor their performance via Grafana at `https://localhost/grafana`, by using the pipeline status API endpoint, or by checking the FPS directly: + +```bash +curl -k https://localhost/api/pipelines/status +``` + +### Stream-Density Mode + +In this mode, the script automatically finds the maximum number of streams that can run while maintaining a target FPS. This is useful for determining the capacity of your system. + +**To find the maximum number of streams that can achieve at least 28.5 FPS on GPU:** + +```bash +./benchmark_start.sh -p apps/weld-porosity/benchmark_gpu_payload.json -t 28.5 +``` + +* `-p apps/weld-porosity/benchmark_gpu_payload.json`: Specifies the path to your payload configuration file. +* `-t 28.5`: Sets the target average FPS per stream. The default is `28.5`. +* `-i 60`: (Optional) Sets the monitoring interval in seconds for collecting FPS data. The default is `60`. + +The script will start with one stream, measure the FPS, and if the target is met, it will stop, add another stream, and repeat the process. This continues until the average FPS drops below the target. The script will then report the maximum number of streams that successfully met the performance goal. + +**Example Output:** + +``` +====================================================== +✅ FINAL RESULT: Stream-Density Benchmark Completed! + Maximum 3 stream(s) can achieve the target FPS of 28.5. + + Average FPS per stream for the optimal configuration: + - Stream 1: 29.2 FPS + - Stream 2: 28.8 FPS + - Stream 3: 28.6 FPS +====================================================== +``` + +### How Stream Performance is Evaluated + +In Stream-Density Mode, the script evaluates if the system can sustain a target FPS across all concurrent streams. The process is as follows: + +1. **Individual Stream Monitoring:** The script monitors each running pipeline instance (stream) independently. +2. **Sampling:** For the duration of the monitoring interval (e.g., 60 seconds), it samples the `avg_fps` value from each stream every 2 seconds. +3. **Averaging per Stream:** After the interval, it calculates the average FPS for *each stream* based on the samples collected for that specific stream. +4. **Validation:** The performance goal is considered met only if **every single stream's** calculated average FPS is greater than or equal to the target FPS. If even one stream falls below the target, the test fails for that number of concurrent streams. + +This ensures that the reported optimal stream count represents a stable configuration where all streams are performing adequately, rather than relying on a combined average that could hide underperforming streams. + +## Step 3: Stop the Benchmarking + +The benchmarking script automatically stops all pipelines when running in Stream-Density mode. However, if you're running in Fixed Stream mode or need to manually stop pipelines, you can stop all running pipelines by interrupting the script with `Ctrl+C` or by running: + +```bash +curl -k -X DELETE https://localhost/api/pipelines +``` + +Alternatively, you can stop individual pipelines by their ID: + +```bash +# Get pipeline IDs +curl -k https://localhost/api/pipelines/status + +# Stop specific pipeline +curl -k -X DELETE https://localhost/api/pipelines/ +``` + +## Performance Optimization Tips + +### GPU Optimization + +* **Batch Size:** Increase `batch-size` for better GPU utilization, but be mindful of memory constraints. +* **Parallel Inference:** Tune `nireq` parameter to match your GPU's parallel processing capabilities. +* **Stream Configuration:** Adjust `NUM_STREAMS` in `ie-config` to optimize for your specific GPU model. + +### CPU Optimization + +* **Inference Interval:** Increase `inference-interval` to reduce CPU load if real-time processing isn't critical. +* **Device Selection:** Use `device: "AUTO"` to let OpenVINO™ automatically select the best device. + +### Memory Optimization + +* **Pre-process Backend:** Use `va-surface-sharing` for GPU memory efficiency. +* **Model Precision:** Consider using INT8 or FP16 model precision for better performance. + +## Summary + +In this guide, you learned how to use the `benchmark_start.sh` script to run performance tests on your Weld Porosity Detection application. You can now measure performance for a fixed number of streams or automatically determine the maximum stream density your system can support. + +Key takeaways: +* Use Fixed Stream Mode for testing known workloads +* Use Stream-Density Mode for capacity planning and finding system limits +* Monitor individual stream performance to ensure consistent quality +* Optimize payload configurations based on your hardware capabilities \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/docs/user-guide/index.rst b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/docs/user-guide/index.rst index f630f691f..83ae394bb 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/docs/user-guide/index.rst +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/weld-porosity/docs/user-guide/index.rst @@ -91,6 +91,7 @@ This sample application offers the following features: how-to-start-mqtt-publisher how-to-use-gpu-for-inference how-to-use-opcua-publisher + how-to-benchmark api-reference environment-variables release_notes/Overview diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/benchmark_gpu_payload.json b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/benchmark_gpu_payload.json new file mode 100644 index 000000000..dd9c1087d --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/benchmark_gpu_payload.json @@ -0,0 +1,21 @@ +[ + { + "pipeline": "worker_safety_gear_detection_benchmarking", + "payload":{ + "parameters": { + "detection-properties": { + "model": "/home/pipeline-server/resources/models/worker-safety-gear-detection/deployment/Detection/model/model.xml", + "device": "GPU", + "batch-size": 8, + "model-instance-id": "instgpu0", + "inference-interval": 3, + "inference-region": 0, + "nireq": 2, + "ie-config": "NUM_STREAMS=2", + "pre-process-backend": "va-surface-sharing", + "threshold": 0.7 + } + } + } + } +] diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/configs/pipeline-server-config.json b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/configs/pipeline-server-config.json index 7693dd2b3..8a4639a6a 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/configs/pipeline-server-config.json +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/configs/pipeline-server-config.json @@ -117,6 +117,24 @@ } }, "auto_start": false + }, + { + "name": "worker_safety_gear_detection_benchmarking", + "source": "gstreamer", + "queue_maxsize": 50, + "pipeline": "multifilesrc location=/home/pipeline-server/resources/videos/Safety_Full_Hat_and_Vest_looped.avi loop=true ! parsebin ! vah264dec ! vapostproc ! video/x-raw(memory:VAMemory) ! gvadetect name=detection ! queue ! gvafpscounter ! appsink sync=false async=false", + "parameters": { + "type": "object", + "properties": { + "detection-properties": { + "element": { + "name": "detection", + "format": "element-properties" + } + } + } + }, + "auto_start": false } ] } diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/docs/user-guide/how-to-benchmark.md b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/docs/user-guide/how-to-benchmark.md new file mode 100644 index 000000000..f3e3d03f6 --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/docs/user-guide/how-to-benchmark.md @@ -0,0 +1,186 @@ +# How to Run Benchmarking + +This guide provides step-by-step instructions for running the benchmarking script to evaluate the performance of the Worker Safety Gear Detection application. The script can help you determine the maximum number of concurrent video streams your system can handle while meeting a specific performance target (e.g., frames per second). + +## Overview of the Benchmarking Script + +The `benchmark_start.sh` script, located in the `manufacturing-ai-suite/industrial-edge-insights-vision` directory, automates the process of running performance tests on the DL Streamer Pipeline Server. It offers two primary modes of operation: + +* **Fixed Stream Mode:** Runs a specified number of concurrent video processing pipelines. This mode is useful for testing a specific, known workload. +* **Stream-Density Mode:** Automatically determines the maximum number of streams that can be processed while maintaining a target Frames Per Second (FPS). This is ideal for capacity planning and finding the performance limits of your hardware. + +## Prerequisites + +Before running the benchmarking script, ensure you have the following: + +* A successful deployment of the Worker Safety Gear Detection application using Docker Compose or Helm, as described in the [Get Started](./get-started.md) guide. +* The `jq` command-line JSON processor must be installed. You can install it on Ubuntu with: + ```bash + sudo apt-get update && sudo apt-get install -y jq + ``` +* The `bc` calculator for floating-point arithmetic must be installed: + ```bash + sudo apt-get install -y bc + ``` +* The `benchmark_start.sh` script and payload configuration files must be available on your system. + +## Understanding the Payload File + +The benchmarking script requires a JSON payload file to configure the pipelines that will be tested. These payload files are located within the `apps/worker-safety-gear-detection/` directory. The script uses this file to specify the pipeline to run and the configuration for the video source, destination, and parameters. + +Here is an example of a payload file, `benchmark_gpu_payload.json`: + +```json +[ + { + "pipeline": "worker_safety_gear_detection_benchmarking", + "payload": { + "parameters": { + "detection-properties": { + "model": "/home/pipeline-server/resources/models/worker-safety-gear-detection/deployment/Detection/model/model.xml", + "device": "GPU", + "batch-size": 8, + "model-instance-id": "instgpu0", + "inference-interval": 3, + "inference-region": 0, + "nireq": 2, + "ie-config": "NUM_STREAMS=2", + "pre-process-backend": "va-surface-sharing", + "threshold": 0.7 + } + } + } + } +] +``` + +* `pipeline`: The name of the pipeline to execute (e.g., `worker_safety_gear_detection_benchmarking`). +* `payload`: An object containing the configuration for the pipeline instance. + * `parameters`: Allows you to set pipeline-specific parameters, such as the `device` (CPU, GPU, or AUTO) and other model-related properties including batch size, inference intervals, and OpenVINO™ configurations. + +## Step 1: Configure the Benchmarking Script + +Before running the script, you may need to adjust the `DLSPS_NODE_IP` variable within `benchmark_start.sh` if your DL Streamer Pipeline Server is not running on `localhost`. + +```bash +# Edit the benchmark_start.sh script if needed +nano benchmark_start.sh +``` + +Change `DLSPS_NODE_IP="localhost"` to the correct IP address of the node where the service is exposed. + +## Step 2: Run the Benchmarking Script + +The script can be run in two different modes. + +Navigate to the `manufacturing-ai-suite/industrial-edge-insights-vision` directory to run the script. + +```bash +cd edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-vision/ +``` + +### Fixed Stream Mode + +In this mode, you specify the exact number of pipelines to run concurrently. This is useful for simulating a known workload. + +**To run 4 pipelines simultaneously using the GPU payload:** + +```bash +./benchmark_start.sh -p apps/worker-safety-gear-detection/benchmark_gpu_payload.json -n 4 +``` + +* `-p apps/worker-safety-gear-detection/benchmark_gpu_payload.json`: Specifies the path to your payload configuration file. +* `-n 4`: Sets the number of concurrent pipelines to run. + +The script will start the 4 pipelines and print their status. You can then monitor their performance via Grafana at `https://localhost/grafana`, by using the pipeline status API endpoint, or by checking the FPS directly: + +```bash +curl -k https://localhost/api/pipelines/status +``` + +### Stream-Density Mode + +In this mode, the script automatically finds the maximum number of streams that can run while maintaining a target FPS. This is useful for determining the capacity of your system. + +**To find the maximum number of streams that can achieve at least 28.5 FPS on GPU:** + +```bash +./benchmark_start.sh -p apps/worker-safety-gear-detection/benchmark_gpu_payload.json -t 28.5 +``` + +* `-p apps/worker-safety-gear-detection/benchmark_gpu_payload.json`: Specifies the path to your payload configuration file. +* `-t 28.5`: Sets the target average FPS per stream. The default is `28.5`. +* `-i 60`: (Optional) Sets the monitoring interval in seconds for collecting FPS data. The default is `60`. + +The script will start with one stream, measure the FPS, and if the target is met, it will stop, add another stream, and repeat the process. This continues until the average FPS drops below the target. The script will then report the maximum number of streams that successfully met the performance goal. + +**Example Output:** + +``` +====================================================== +✅ FINAL RESULT: Stream-Density Benchmark Completed! + Maximum 3 stream(s) can achieve the target FPS of 28.5. + + Average FPS per stream for the optimal configuration: + - Stream 1: 29.2 FPS + - Stream 2: 28.8 FPS + - Stream 3: 28.6 FPS +====================================================== +``` + +### How Stream Performance is Evaluated + +In Stream-Density Mode, the script evaluates if the system can sustain a target FPS across all concurrent streams. The process is as follows: + +1. **Individual Stream Monitoring:** The script monitors each running pipeline instance (stream) independently. +2. **Sampling:** For the duration of the monitoring interval (e.g., 60 seconds), it samples the `avg_fps` value from each stream every 2 seconds. +3. **Averaging per Stream:** After the interval, it calculates the average FPS for *each stream* based on the samples collected for that specific stream. +4. **Validation:** The performance goal is considered met only if **every single stream's** calculated average FPS is greater than or equal to the target FPS. If even one stream falls below the target, the test fails for that number of concurrent streams. + +This ensures that the reported optimal stream count represents a stable configuration where all streams are performing adequately, rather than relying on a combined average that could hide underperforming streams. + +## Step 3: Stop the Benchmarking + +The benchmarking script automatically stops all pipelines when running in Stream-Density mode. However, if you're running in Fixed Stream mode or need to manually stop pipelines, you can stop all running pipelines by interrupting the script with `Ctrl+C` or by running: + +```bash +curl -k -X DELETE https://localhost/api/pipelines +``` + +Alternatively, you can stop individual pipelines by their ID: + +```bash +# Get pipeline IDs +curl -k https://localhost/api/pipelines/status + +# Stop specific pipeline +curl -k -X DELETE https://localhost/api/pipelines/ +``` + +## Performance Optimization Tips + +### GPU Optimization + +* **Batch Size:** Increase `batch-size` for better GPU utilization, but be mindful of memory constraints. +* **Parallel Inference:** Tune `nireq` parameter to match your GPU's parallel processing capabilities. +* **Stream Configuration:** Adjust `NUM_STREAMS` in `ie-config` to optimize for your specific GPU model. + +### CPU Optimization + +* **Inference Interval:** Increase `inference-interval` to reduce CPU load if real-time processing isn't critical. +* **Device Selection:** Use `device: "AUTO"` to let OpenVINO™ automatically select the best device. + +### Memory Optimization + +* **Pre-process Backend:** Use `va-surface-sharing` for GPU memory efficiency. +* **Model Precision:** Consider using INT8 or FP16 model precision for better performance. + +## Summary + +In this guide, you learned how to use the `benchmark_start.sh` script to run performance tests on your Worker Safety Gear Detection application. You can now measure performance for a fixed number of streams or automatically determine the maximum stream density your system can support. + +Key takeaways: +* Use Fixed Stream Mode for testing known workloads +* Use Stream-Density Mode for capacity planning and finding system limits +* Monitor individual stream performance to ensure consistent quality +* Optimize payload configurations based on your hardware capabilities \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/docs/user-guide/index.rst b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/docs/user-guide/index.rst index 31f7f68da..efdaac5a2 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/docs/user-guide/index.rst +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/docs/user-guide/index.rst @@ -94,6 +94,7 @@ This sample application offers the following features: how-to-view-telemetry-data how-to-use-gpu-for-inference how-to-start-mqtt-publisher + how-to-benchmark api-reference environment-variables release_notes/Overview diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/benchmark_start.sh b/manufacturing-ai-suite/industrial-edge-insights-vision/benchmark_start.sh new file mode 100755 index 000000000..9031cfd38 --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/benchmark_start.sh @@ -0,0 +1,326 @@ +#!/bin/bash + +DLSPS_NODE_IP="localhost" + +function get_pipeline_status() { + curl -k -s "https://$DLSPS_NODE_IP/api/pipelines/status" +} + +function run_pipelines() { + local num_pipelines=$1 + local payload_data=$2 + local pipeline_name=$3 + + echo + echo -n ">>>>> Initialization: Starting $num_pipelines pipeline(s) of type '$pipeline_name'..." + + for (( x=1; x<=num_pipelines; x++ )); do + response=$(curl -k -s "https://$DLSPS_NODE_IP/api/pipelines/user_defined_pipelines/${pipeline_name}" \ + -X POST -H "Content-Type: application/json" -d "$payload_data") + + if [ $? -ne 0 ] || [[ "$response" == *"Error"* ]]; then + echo -e "\nError: curl command failed or pipeline returned error. Check the deployment status." + echo "Response: $response" + return 1 + fi + sleep 1 # Brief pause between requests + done + + # Wait for all pipelines to be in RUNNING state + echo -n ">>>>> Waiting for pipelines to initialize..." + local running_count=0 + local attempts=0 + while [ "$running_count" -lt "$num_pipelines" ] && [ "$attempts" -lt 60 ]; do + status_output=$(get_pipeline_status) + running_count=$(echo "$status_output" | jq '[.[] | select(.state=="RUNNING")] | length') + + echo -n "." + attempts=$((attempts + 1)) + sleep 2 + done + + if [ "$running_count" -ge "$num_pipelines" ]; then + echo " All pipelines are running." + return 0 + else + echo " Error: Not all pipelines entered RUNNING state." + get_pipeline_status | jq + return 1 + fi +} + +function stop_all_pipelines() { + echo + echo ">>>>> Attempting to stop all running pipelines." + + local pipelines_str + pipelines_str=$(get_pipeline_status | jq -r '[.[] | select(.state=="RUNNING") | .id] | join(",")') + + if [ $? -ne 0 ]; then + echo -e "\nError: Failed to get pipeline status." + return 1 + fi + + if [ -z "$pipelines_str" ]; then + echo "No running pipelines found." + return 0 + fi + + IFS=',' read -ra pipelines <<< "$pipelines_str" + + echo "Found ${#pipelines[@]} running pipelines to stop." + + for pipeline_id in "${pipelines[@]}"; do + curl -k -s --location -X DELETE "https://$DLSPS_NODE_IP/api/pipelines/${pipeline_id}" & + done + + wait + echo "All stop requests sent." + unset IFS + + echo -n ">>>>> Waiting for all pipelines to stop..." + local running=true + while $running; do + echo -n "." + local status + status=$(get_pipeline_status | jq '.[] | .state' | grep "RUNNING") + if [[ -z "$status" ]]; then + running=false + else + sleep 3 + fi + done + echo " done." + echo + return 0 +} + +declare -gA final_avg_fps + +function check_all_streams_meet_target_fps() { + local duration=$1 + local target_fps=$2 + + # Get initial list of running pipeline IDs + local pipeline_ids + pipeline_ids=$(get_pipeline_status | jq -r '[.[] | select(.state=="RUNNING") | .id] | .[]') + + if [ -z "$pipeline_ids" ]; then + echo "No running streams to monitor." + return 1 # Fail if no streams are running + fi + + declare -A fps_sums + declare -A sample_counts + unset final_avg_fps + declare -gA final_avg_fps + + # Initialize sums and counts for each pipeline + for id in $pipeline_ids; do + fps_sums[$id]=0 + sample_counts[$id]=0 + done + + echo ">>>>> Monitoring FPS for $duration seconds..." + local start_time=$SECONDS + while (( SECONDS - start_time < duration )); do + local elapsed_time=$((SECONDS - start_time)) + echo -ne "Monitoring... ${elapsed_time}s / ${duration}s\r" + + local status_output + status_output=$(get_pipeline_status) + + for id in $pipeline_ids; do + # Extract avg_fps for the specific pipeline ID + local current_fps + current_fps=$(echo "$status_output" | jq -r --arg ID "$id" '.[] | select(.id==$ID) | .avg_fps') + + if [ -n "$current_fps" ] && [[ "$current_fps" != "null" ]]; then + fps_sums[$id]=$(echo "${fps_sums[$id]} + $current_fps" | bc) + sample_counts[$id]=$((sample_counts[$id] + 1)) + fi + done + sleep 2 + done + echo -ne "\n" # Move to next line after progress bar finishes + + # Now, check if the average of each stream met the target + local all_streams_met_target=true + for id in $pipeline_ids; do + local num_samples=${sample_counts[$id]} + if [ "$num_samples" -gt 0 ]; then + local total_fps=${fps_sums[$id]} + local avg_fps + avg_fps=$(echo "scale=2; $total_fps / $num_samples" | bc) + final_avg_fps[$id]=$avg_fps + + echo "Stream $id Average FPS: $avg_fps" + + if (( $(echo "$avg_fps < $target_fps" | bc -l) )); then + echo " -> ❌ FAILED to meet target FPS of $target_fps" + all_streams_met_target=false + else + echo " -> ✅ OK" + fi + else + echo "Stream $id: No FPS data collected." + all_streams_met_target=false + fi + done + + if $all_streams_met_target; then + return 0 # Success + else + return 1 # Failure + fi +} + +function run_stream_density_mode() { + local payload_file=$1 + local target_fps=$2 + local duration=$3 + + echo ">>>>> Running in Stream-Density Calculation Mode (Target FPS: $target_fps)" + + local optimal_streams=0 + local current_streams=1 + declare -A last_successful_fps + + # Extract pipeline name and payload body from the JSON file + local pipeline_name + pipeline_name=$(jq -r '.[0].pipeline' "$payload_file") + local payload_body + payload_body=$(jq '.[0].payload' "$payload_file") + + if [ -z "$pipeline_name" ] || [ -z "$payload_body" ]; then + echo "Error: Could not extract 'pipeline' or 'payload' from $payload_file" + exit 1 + fi + + while true; do + echo + echo "--- Testing with $current_streams stream(s) ---" + + run_pipelines "$current_streams" "$payload_body" "$pipeline_name" + if [ $? -ne 0 ]; then + echo "Failed to start pipelines. Aborting." + break + fi + + echo ">>>>> Waiting 10 seconds for stabilization..." + sleep 10 + + if check_all_streams_meet_target_fps "$duration" "$target_fps"; then + echo "✓ Target FPS met with $current_streams stream(s)." + optimal_streams=$current_streams + + # Save the FPS values from this successful run + unset last_successful_fps + declare -A last_successful_fps + for id in "${!final_avg_fps[@]}"; do + last_successful_fps[$id]=${final_avg_fps[$id]} + done + + stop_all_pipelines + sleep 5 + + current_streams=$((current_streams + 1)) + else + echo "❌ Target FPS not met with $current_streams stream(s)." + break + fi + done + + stop_all_pipelines + + echo + echo "======================================================" + if [ "$optimal_streams" -gt 0 ]; then + echo "✅ FINAL RESULT: Stream-Density Benchmark Completed!" + echo " Maximum $optimal_streams stream(s) can achieve the target FPS of $target_fps." + echo + echo " Average FPS per stream for the optimal configuration:" + for id in "${!last_successful_fps[@]}"; do + echo " - Stream $id: ${last_successful_fps[$id]} FPS" + done + else + echo "❌ FINAL RESULT: Target FPS Not Achievable." + echo " No configuration could achieve the target FPS of $target_fps." + fi + echo "======================================================" +} + +# --- Main Script --- + +function usage() { + echo "Usage: $0 -p [-n | -t ] [-i ]" + echo + echo "Modes:" + echo " Fixed Stream Mode: Provide -n to run a specific number of pipelines." + echo " Stream-Density Mode: Omit -n and provide -t to find the optimal number of streams." + echo + echo "Arguments:" + echo " -p : (Required) Path to the benchmark payload JSON file." + echo " -n : Number of pipelines to run." + echo " -t : Target FPS for stream-density mode (default: 28.5)." + echo " -i : Monitoring interval in seconds for stream-density mode (default: 60)." + exit 1 +} + +num_pipelines="" +payload_file="" +target_fps="28.5" +interval=60 + +while getopts "n:p:t:i:" opt; do + case ${opt} in + n ) num_pipelines=$OPTARG ;; + p ) payload_file=$OPTARG ;; + t ) target_fps=$OPTARG ;; + i ) interval=$OPTARG ;; + \? ) usage ;; + esac +done + +if [ -z "$payload_file" ]; then + echo "Error: Payload file is required." + usage +fi + +if [ ! -f "$payload_file" ]; then + echo "Error: Benchmark payload file not found: $payload_file" + exit 1 +fi + +stop_all_pipelines +if [ $? -ne 0 ]; then + exit 1 +fi + +if [ -n "$num_pipelines" ]; then + if ! [[ "$num_pipelines" =~ ^[0-9]+$ ]] || [ "$num_pipelines" -le 0 ]; then + echo "Error: Number of pipelines (-n) must be a positive integer." + usage + fi + + pipeline_name=$(jq -r '.[0].pipeline' "$payload_file") + payload_body=$(jq '.[0].payload' "$payload_file") + + if [ -z "$pipeline_name" ] || [ -z "$payload_body" ]; then + echo "Error: Could not extract 'pipeline' or 'payload' from $payload_file" + exit 1 + fi + + run_pipelines "$num_pipelines" "$payload_body" "$pipeline_name" + if [ $? -ne 0 ]; then + exit 1 + fi + + echo + echo ">>>>> $num_pipelines pipeline(s) are running." + echo ">>>>> Results can be visualized in Grafana at 'https://localhost/grafana'" + echo ">>>>> Pipeline status can be checked with 'curl -k https://localhost/api/pipelines/status'" + +else + run_stream_density_mode "$payload_file" "$target_fps" "$interval" +fi