Skip to content

Commit 9d835b4

Browse files
Merge pull request #962 from roboflow/feature/add_indexing_for_video_processing
Add changes to enable indexing video processing results in workflows CLI
2 parents b175b5a + 1ef88a8 commit 9d835b4

File tree

4 files changed

+37
-13
lines changed

4 files changed

+37
-13
lines changed

inference/core/version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "0.34.0"
1+
__version__ = "0.35.0rc1"
22

33

44
if __name__ == "__main__":

inference_cli/lib/workflows/core.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def run_video_processing_with_workflows(
3030

3131
from inference_cli.lib.workflows.video_adapter import process_video_with_workflow
3232

33-
process_video_with_workflow(
33+
_ = process_video_with_workflow(
3434
input_video_path=input_video_path,
3535
output_directory=output_directory,
3636
output_file_type=output_file_type,

inference_cli/lib/workflows/entities.py

+6
Original file line numberDiff line numberDiff line change
@@ -34,3 +34,9 @@ class ImagesDirectoryProcessingDetails:
3434
]
3535
aggregated_results_path: Optional[str] = field(default=None)
3636
failures_report_path: Optional[str] = field(default=None)
37+
38+
39+
@dataclass(frozen=True)
40+
class VideoProcessingDetails:
41+
structured_results_file: Optional[str]
42+
video_outputs: Optional[Dict[str, str]]

inference_cli/lib/workflows/video_adapter.py

+29-11
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
from inference.core.utils.image_utils import load_image_bgr
1717
from inference_cli.lib.utils import dump_jsonl
1818
from inference_cli.lib.workflows.common import deduct_images, dump_objects_to_json
19-
from inference_cli.lib.workflows.entities import OutputFileType
19+
from inference_cli.lib.workflows.entities import OutputFileType, VideoProcessingDetails
2020

2121

2222
def process_video_with_workflow(
@@ -31,10 +31,11 @@ def process_video_with_workflow(
3131
max_fps: Optional[float] = None,
3232
save_image_outputs_as_video: bool = True,
3333
api_key: Optional[str] = None,
34-
) -> None:
34+
) -> VideoProcessingDetails:
3535
structured_sink = WorkflowsStructuredDataSink(
3636
output_directory=output_directory,
3737
output_file_type=output_file_type,
38+
numbers_of_streams=1,
3839
)
3940
progress_sink = ProgressSink.init(input_video_path=input_video_path)
4041
sinks = [structured_sink.on_prediction, progress_sink.on_prediction]
@@ -61,9 +62,14 @@ def process_video_with_workflow(
6162
pipeline.start(use_main_thread=True)
6263
pipeline.join()
6364
progress_sink.stop()
64-
structured_sink.flush()
65+
structured_results_file = structured_sink.flush()[0]
66+
video_outputs = None
6567
if video_sink is not None:
66-
video_sink.release()
68+
video_outputs = video_sink.release()
69+
return VideoProcessingDetails(
70+
structured_results_file=structured_results_file,
71+
video_outputs=video_outputs,
72+
)
6773

6874

6975
class WorkflowsStructuredDataSink:
@@ -72,10 +78,12 @@ def __init__(
7278
self,
7379
output_directory: str,
7480
output_file_type: OutputFileType,
81+
numbers_of_streams: int = 1,
7582
):
7683
self._output_directory = output_directory
7784
self._structured_results_buffer = defaultdict(list)
7885
self._output_file_type = output_file_type
86+
self._numbers_of_streams = numbers_of_streams
7987

8088
def on_prediction(
8189
self,
@@ -94,11 +102,17 @@ def on_prediction(
94102
}
95103
self._structured_results_buffer[stream_idx].append(prediction)
96104

97-
def flush(self) -> None:
105+
def flush(self) -> List[Optional[str]]:
106+
stream_idx2file_path = {}
98107
for stream_idx, buffer in self._structured_results_buffer.items():
99-
self._flush_stream_buffer(stream_idx=stream_idx)
100-
101-
def _flush_stream_buffer(self, stream_idx: int) -> None:
108+
file_path = self._flush_stream_buffer(stream_idx=stream_idx)
109+
stream_idx2file_path[stream_idx] = file_path
110+
return [
111+
stream_idx2file_path.get(stream_idx)
112+
for stream_idx in range(self._numbers_of_streams)
113+
]
114+
115+
def _flush_stream_buffer(self, stream_idx: int) -> Optional[str]:
102116
content = self._structured_results_buffer[stream_idx]
103117
if len(content) == 0:
104118
return None
@@ -114,6 +128,7 @@ def _flush_stream_buffer(self, stream_idx: int) -> None:
114128
else:
115129
dump_jsonl(path=file_path, content=content)
116130
self._structured_results_buffer[stream_idx] = []
131+
return file_path
117132

118133
def __del__(self):
119134
self.flush()
@@ -182,11 +197,14 @@ def on_prediction(
182197
image = load_image_bgr(value)
183198
stream_sinks[key].write_frame(frame=image)
184199

185-
def release(self) -> None:
186-
for stream_sinks in self._video_sinks.values():
187-
for sink in stream_sinks.values():
200+
def release(self) -> Optional[Dict[str, str]]:
201+
stream_idx2keys_videos: Dict[int, Dict[str, str]] = defaultdict(dict)
202+
for stream_idx, stream_sinks in self._video_sinks.items():
203+
for key, sink in stream_sinks.items():
188204
sink.release()
205+
stream_idx2keys_videos[stream_idx][key] = sink.target_path
189206
self._video_sinks = defaultdict(dict)
207+
return stream_idx2keys_videos.get(0)
190208

191209
def __del__(self):
192210
self.release()

0 commit comments

Comments
 (0)