diff --git a/inference/core/interfaces/stream_manager/manager_app/entities.py b/inference/core/interfaces/stream_manager/manager_app/entities.py index c5c4e3022..4703a03f7 100644 --- a/inference/core/interfaces/stream_manager/manager_app/entities.py +++ b/inference/core/interfaces/stream_manager/manager_app/entities.py @@ -100,8 +100,8 @@ class WebRTCTURNConfig(BaseModel): class InitialiseWebRTCPipelinePayload(InitialisePipelinePayload): webrtc_offer: WebRTCOffer webrtc_turn_config: Optional[WebRTCTURNConfig] = None - stream_output: Optional[List[str]] = Field(default_factory=list) - data_output: Optional[List[str]] = Field(default_factory=list) + stream_output: Optional[List[Optional[str]]] = Field(default_factory=list) + data_output: Optional[List[Optional[str]]] = Field(default_factory=list) webrtc_peer_timeout: float = 1 webcam_fps: Optional[float] = None diff --git a/inference/core/interfaces/stream_manager/manager_app/inference_pipeline_manager.py b/inference/core/interfaces/stream_manager/manager_app/inference_pipeline_manager.py index 4350ca2d6..1cc929854 100644 --- a/inference/core/interfaces/stream_manager/manager_app/inference_pipeline_manager.py +++ b/inference/core/interfaces/stream_manager/manager_app/inference_pipeline_manager.py @@ -11,6 +11,7 @@ from types import FrameType from typing import Dict, Optional, Tuple +import cv2 as cv from pydantic import ValidationError from inference.core import logger @@ -275,11 +276,37 @@ def start_loop(loop: asyncio.AbstractEventLoop): def webrtc_sink( prediction: Dict[str, WorkflowImageData], video_frame: VideoFrame ) -> None: - if parsed_payload.stream_output[0] not in prediction: - from_inference_queue.sync_put(video_frame.image) - return - if prediction[parsed_payload.stream_output[0]] is None: - from_inference_queue.sync_put(video_frame.image) + errors = [] + if not any( + isinstance(v, WorkflowImageData) for v in prediction.values() + ): + errors.append("Visualisation blocks were not executed") + errors.append("or workflow was not configured to output visuals.") + errors.append( + "Please try to adjust the scene so models detect objects" + ) + errors.append("or stop preview, update workflow and try again.") + elif parsed_payload.stream_output[0] not in prediction: + if not parsed_payload.stream_output[0]: + errors.append("No stream output selected to show") + else: + errors.append( + f"{parsed_payload.stream_output[0]} not available in results" + ) + errors.append("Please stop, update outputs and try again") + if errors: + result_frame = video_frame.image.copy() + for row, error in enumerate(errors): + result_frame = cv.putText( + result_frame, + error, + (10, 20 + 30 * row), + cv.FONT_HERSHEY_SIMPLEX, + 0.7, + (0, 255, 0), + 2, + ) + from_inference_queue.sync_put(result_frame) return from_inference_queue.sync_put( prediction[parsed_payload.stream_output[0]].numpy_image diff --git a/inference/core/version.py b/inference/core/version.py index e8eb7c8d3..9cc9b6c89 100644 --- a/inference/core/version.py +++ b/inference/core/version.py @@ -1,4 +1,4 @@ -__version__ = "0.35.0rc2" +__version__ = "0.35.0" if __name__ == "__main__":