From d39bac7c530e1b7f1b7d17ef6ae3bf782b2c8bdb Mon Sep 17 00:00:00 2001 From: casmwenger Date: Thu, 12 Dec 2024 08:30:20 -0700 Subject: [PATCH 1/3] Base visualization copy and bonding box vis copy --- .../core_steps/visualizations/bounding_box/v1.py | 4 ++-- .../workflows/core_steps/visualizations/common/base.py | 4 ++-- .../core_steps/visualizations/common/base_colorable.py | 8 ++++---- inference/core/workflows/prototypes/block.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py b/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py index 11373d8821..7843e959bd 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py @@ -41,13 +41,13 @@ class BoundingBoxManifest(ColorableVisualizationManifest): ) thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore - description="Thickness of the bounding box in pixels.", + description="Set the thickness of the bounding box edges.", default=2, examples=[2, "$inputs.thickness"], ) roundness: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore - description="Roundness of the corners of the bounding box.", + description="Define the roundness of the bounding box corners.", default=0.0, examples=[0.0, "$inputs.roundness"], ) diff --git a/inference/core/workflows/core_steps/visualizations/common/base.py b/inference/core/workflows/core_steps/visualizations/common/base.py index dc6442afa6..e6f623dc0c 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base.py +++ b/inference/core/workflows/core_steps/visualizations/common/base.py @@ -34,12 +34,12 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): ) image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", - description="The input image for this step.", + description="Select the input image to visualize on.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) copy_image: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( # type: ignore - description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", + description="Enable this option to create a copy of the input image for visualization, preserving the original. Use this when stacking multiple visualizations.", default=True, examples=[True, False], ) diff --git a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py index bf15aefeab..7a6a92bd5e 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py +++ b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py @@ -77,7 +77,7 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="DEFAULT", - description="Color palette to use for annotations.", + description="Select a color palette for the bounding boxes.", examples=["DEFAULT", "$inputs.color_palette"], ) @@ -86,14 +86,14 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): Selector(kind=[INTEGER_KIND]), ] = Field( # type: ignore default=10, - description="Number of colors in the color palette. Applies when using a matplotlib `color_palette`.", + description="Specify the number of colors in the palette. This applies when using custom or Matplotlib palettes.", examples=[10, "$inputs.palette_size"], ) custom_colors: Union[List[str], Selector(kind=[LIST_OF_VALUES_KIND])] = ( Field( # type: ignore default=[], - description='List of colors to use for annotations when `color_palette` is set to "CUSTOM".', + description='Define a list of custom colors for bounding boxes in HEX format.', examples=[["#FF0000", "#00FF00", "#0000FF"], "$inputs.custom_colors"], ) ) @@ -103,7 +103,7 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="CLASS", - description="Strategy to use for mapping colors to annotations.", + description="Choose how bounding box colors are assigned.", examples=["CLASS", "$inputs.color_axis"], ) diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index 6ad5db2dc8..025b75cbfe 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -24,7 +24,7 @@ class WorkflowBlockManifest(BaseModel, ABC): ) type: str - name: str = Field(title="Step Name", description="The unique name of this step.") + name: str = Field(title="Step Name", description="Enter a unique identifier for this step.") @classmethod @abstractmethod From ef210fd189498a37a97b37b5eddf16ebaa4cfae2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 20 Dec 2024 12:23:32 +0100 Subject: [PATCH 2/3] Apply fixes --- .../visualizations/common/base_colorable.py | 4 +-- inference/core/workflows/prototypes/block.py | 4 ++- .../unit_tests/lib/workflows/test_common.py | 35 +++++++++---------- .../unit_tests/core_steps/test_init_files.py | 17 ++++++--- 4 files changed, 34 insertions(+), 26 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py index 7a6a92bd5e..2fe26c356b 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py +++ b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py @@ -77,7 +77,7 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="DEFAULT", - description="Select a color palette for the bounding boxes.", + description="Select a color palette for the visualised elements.", examples=["DEFAULT", "$inputs.color_palette"], ) @@ -93,7 +93,7 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): custom_colors: Union[List[str], Selector(kind=[LIST_OF_VALUES_KIND])] = ( Field( # type: ignore default=[], - description='Define a list of custom colors for bounding boxes in HEX format.', + description="Define a list of custom colors for bounding boxes in HEX format.", examples=[["#FF0000", "#00FF00", "#0000FF"], "$inputs.custom_colors"], ) ) diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index 025b75cbfe..b3be8b4fe4 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -24,7 +24,9 @@ class WorkflowBlockManifest(BaseModel, ABC): ) type: str - name: str = Field(title="Step Name", description="Enter a unique identifier for this step.") + name: str = Field( + title="Step Name", description="Enter a unique identifier for this step." + ) @classmethod @abstractmethod diff --git a/tests/inference_cli/unit_tests/lib/workflows/test_common.py b/tests/inference_cli/unit_tests/lib/workflows/test_common.py index d8b3a6f080..fbb3257e40 100644 --- a/tests/inference_cli/unit_tests/lib/workflows/test_common.py +++ b/tests/inference_cli/unit_tests/lib/workflows/test_common.py @@ -86,25 +86,22 @@ def test_aggregate_batch_processing_results_when_json_output_is_expected_and_res continue decoded_results.append(json.loads(line)) print(decoded_results) - assert ( - decoded_results - == [ - { - "some": "value", - "image": "other.jpg", - "other": 3.0, - "list_field": [1, 2, 3], - "object_field": {"nested": "value"}, - }, - { - "some": "value", - "image": "some.jpg", - "other": 3.0, - "list_field": [1, 2, 3], - "object_field": {"nested": "value"}, - } - ] - ) + assert decoded_results == [ + { + "some": "value", + "image": "other.jpg", + "other": 3.0, + "list_field": [1, 2, 3], + "object_field": {"nested": "value"}, + }, + { + "some": "value", + "image": "some.jpg", + "other": 3.0, + "list_field": [1, 2, 3], + "object_field": {"nested": "value"}, + }, + ] def test_aggregate_batch_processing_results_when_json_output_is_expected_and_results_not_present( diff --git a/tests/workflows/unit_tests/core_steps/test_init_files.py b/tests/workflows/unit_tests/core_steps/test_init_files.py index ecdb671781..cae1d4edc3 100644 --- a/tests/workflows/unit_tests/core_steps/test_init_files.py +++ b/tests/workflows/unit_tests/core_steps/test_init_files.py @@ -1,5 +1,6 @@ import os from pathlib import Path + import pytest @@ -9,7 +10,9 @@ def test_init_files_present_in_inference_core_workflows_core_steps(): missing_init_dirs = [] - for root, dirs, files in os.walk(str(project_root / "inference" / "core" / "workflows" / "core_steps")): + for root, dirs, files in os.walk( + str(project_root / "inference" / "core" / "workflows" / "core_steps") + ): if "__pycache__" in root: continue @@ -18,7 +21,9 @@ def test_init_files_present_in_inference_core_workflows_core_steps(): rel_path = os.path.relpath(root, project_root) missing_init_dirs.append(rel_path) - assert not missing_init_dirs, f"The following directories are missing __init__.py files:\n{chr(10).join(missing_init_dirs)}" + assert ( + not missing_init_dirs + ), f"The following directories are missing __init__.py files:\n{chr(10).join(missing_init_dirs)}" def test_init_files_present_in_inference_enterprise_workflows_core_steps(): @@ -27,7 +32,9 @@ def test_init_files_present_in_inference_enterprise_workflows_core_steps(): missing_init_dirs = [] - for root, dirs, files in os.walk(str(project_root / "inference" / "enterprise" / "workflows" / "core_steps")): + for root, dirs, files in os.walk( + str(project_root / "inference" / "enterprise" / "workflows" / "core_steps") + ): if "__pycache__" in root: continue @@ -36,4 +43,6 @@ def test_init_files_present_in_inference_enterprise_workflows_core_steps(): rel_path = os.path.relpath(root, project_root) missing_init_dirs.append(rel_path) - assert not missing_init_dirs, f"The following directories are missing __init__.py files:\n{chr(10).join(missing_init_dirs)}" + assert ( + not missing_init_dirs + ), f"The following directories are missing __init__.py files:\n{chr(10).join(missing_init_dirs)}" From 4cc715e9d6bf1e66c4b07aaa3aa6797e03fffd56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 20 Dec 2024 12:23:55 +0100 Subject: [PATCH 3/3] Lint tests --- .../models_predictions_tests/test_owlv2.py | 35 ++++++---- .../models_predictions_tests/test_sam2.py | 1 + .../unit_tests/core_steps/cache/test_cache.py | 67 +++++-------------- .../core_steps/fusion/test_buffer.py | 28 ++------ .../core_steps/visualizations/test_grid.py | 26 +++---- 5 files changed, 56 insertions(+), 101 deletions(-) diff --git a/tests/inference/models_predictions_tests/test_owlv2.py b/tests/inference/models_predictions_tests/test_owlv2.py index dc8db543dc..7052f23feb 100644 --- a/tests/inference/models_predictions_tests/test_owlv2.py +++ b/tests/inference/models_predictions_tests/test_owlv2.py @@ -1,13 +1,20 @@ -import pytest import gc -from unittest.mock import MagicMock import os +from unittest.mock import MagicMock + +import pytest import torch + +from inference.core.cache.model_artifacts import get_cache_file_path from inference.core.entities.requests.inference import ObjectDetectionInferenceRequest from inference.core.entities.requests.owlv2 import OwlV2InferenceRequest -from inference.models.owlv2.owlv2 import OwlV2, SerializedOwlV2, Owlv2Singleton, LazyImageRetrievalWrapper from inference.core.env import OWLV2_VERSION_ID -from inference.core.cache.model_artifacts import get_cache_file_path +from inference.models.owlv2.owlv2 import ( + LazyImageRetrievalWrapper, + OwlV2, + Owlv2Singleton, + SerializedOwlV2, +) @pytest.mark.slow @@ -58,13 +65,14 @@ def test_owlv2(): assert abs(532 - posts[3].x) < 1.5 assert abs(572 - posts[4].x) < 1.5 + def test_owlv2_serialized(): image = { "type": "url", "value": "https://media.roboflow.com/inference/seawithdock.jpeg", } - training_data=[ + training_data = [ { "image": image, "boxes": [ @@ -93,19 +101,20 @@ def test_owlv2_serialized(): hf_id=f"google/{OWLV2_VERSION_ID}", ) assert os.path.exists(serialized_pt) - pt_path = get_cache_file_path(file=SerializedOwlV2.weights_file_path, model_id=model_id) + pt_path = get_cache_file_path( + file=SerializedOwlV2.weights_file_path, model_id=model_id + ) os.makedirs(os.path.dirname(pt_path), exist_ok=True) os.rename(serialized_pt, pt_path) serialized_owlv2 = SerializedOwlV2(model_id=model_id) - + # Get the image hash before inference image_wrapper = LazyImageRetrievalWrapper(request.image) image_hash = image_wrapper.image_hash assert image_hash in serialized_owlv2.owlv2.cpu_image_embed_cache - + response = serialized_owlv2.infer_from_request(request) - - + assert len(response.predictions) == 5 posts = [p for p in response.predictions if p.class_name == "post"] posts.sort(key=lambda x: x.x) @@ -114,15 +123,13 @@ def test_owlv2_serialized(): assert abs(264 - posts[2].x) < 1.5 assert abs(532 - posts[3].x) < 1.5 assert abs(572 - posts[4].x) < 1.5 - + pt_path = serialized_owlv2.save_small_model_without_image_embeds() assert os.path.exists(pt_path) pt_dict = torch.load(pt_path) assert len(pt_dict["image_embeds"]) == 0 - - @pytest.mark.slow def test_owlv2_multiple_prompts(): image = { @@ -411,6 +418,7 @@ def test_owlv2_multiple_training_images_repeated_inference(): assert p1.height == p2.height assert p1.confidence == p2.confidence + @pytest.mark.slow def test_owlv2_model_unloaded_when_garbage_collected(): model = OwlV2() @@ -418,5 +426,6 @@ def test_owlv2_model_unloaded_when_garbage_collected(): gc.collect() assert len(Owlv2Singleton._instances) == 0 + if __name__ == "__main__": test_owlv2() diff --git a/tests/inference/models_predictions_tests/test_sam2.py b/tests/inference/models_predictions_tests/test_sam2.py index ef8e5e698f..f3b7c99f70 100644 --- a/tests/inference/models_predictions_tests/test_sam2.py +++ b/tests/inference/models_predictions_tests/test_sam2.py @@ -17,6 +17,7 @@ from inference.core.workflows.core_steps.models.foundation.segment_anything2.v1 import ( convert_sam2_segmentation_response_to_inference_instances_seg_response, ) + try: from inference.models.sam2 import SegmentAnything2 from inference.models.sam2.segment_anything2 import ( diff --git a/tests/workflows/unit_tests/core_steps/cache/test_cache.py b/tests/workflows/unit_tests/core_steps/cache/test_cache.py index 6f12be02df..76322a1493 100644 --- a/tests/workflows/unit_tests/core_steps/cache/test_cache.py +++ b/tests/workflows/unit_tests/core_steps/cache/test_cache.py @@ -2,20 +2,16 @@ import numpy as np +from inference.core.workflows.core_steps.cache.cache_get.v1 import CacheGetBlockV1 +from inference.core.workflows.core_steps.cache.cache_set.v1 import CacheSetBlockV1 from inference.core.workflows.core_steps.common.entities import StepExecutionMode -from inference.core.workflows.core_steps.cache.cache_get.v1 import ( - CacheGetBlockV1, -) -from inference.core.workflows.core_steps.cache.cache_set.v1 import ( - CacheSetBlockV1, -) - from inference.core.workflows.execution_engine.entities.base import ( ImageParentMetadata, VideoMetadata, WorkflowImageData, ) + def test_cache_on_video() -> None: # given metadata = VideoMetadata( @@ -34,28 +30,19 @@ def test_cache_on_video() -> None: cache_set_block = CacheSetBlockV1(step_execution_mode=StepExecutionMode.LOCAL) # empty result - get_empty = cache_get_block.run( - image=image, - key="foo" - ) + get_empty = cache_get_block.run(image=image, key="foo") assert get_empty == { "output": False, } # set then get - cache_set_block.run( - image=image, - key="foo", - value="bar" - ) - get_full = cache_get_block.run( - image=image, - key="foo" - ) + cache_set_block.run(image=image, key="foo", value="bar") + get_full = cache_get_block.run(image=image, key="foo") assert get_full == { "output": "bar", } + def test_cache_with_no_metadata() -> None: # given image = WorkflowImageData( @@ -66,28 +53,19 @@ def test_cache_with_no_metadata() -> None: cache_set_block = CacheSetBlockV1(step_execution_mode=StepExecutionMode.LOCAL) # empty result - get_empty = cache_get_block.run( - image=image, - key="foo" - ) + get_empty = cache_get_block.run(image=image, key="foo") assert get_empty == { "output": False, } # set then get - cache_set_block.run( - image=image, - key="foo", - value="bar" - ) - get_full = cache_get_block.run( - image=image, - key="foo" - ) + cache_set_block.run(image=image, key="foo", value="bar") + get_full = cache_get_block.run(image=image, key="foo") assert get_full == { "output": "bar", } + def test_cache_on_multiple_videos() -> None: # given metadata_1 = VideoMetadata( @@ -120,33 +98,20 @@ def test_cache_on_multiple_videos() -> None: cache_set_block = CacheSetBlockV1(step_execution_mode=StepExecutionMode.LOCAL) # empty result - get_empty = cache_get_block.run( - image=image_1, - key="foo" - ) + get_empty = cache_get_block.run(image=image_1, key="foo") assert get_empty == { "output": False, } # set then get - cache_set_block.run( - image=image_1, - key="foo", - value="bar" - ) - get_full = cache_get_block.run( - image=image_1, - key="foo" - ) + cache_set_block.run(image=image_1, key="foo", value="bar") + get_full = cache_get_block.run(image=image_1, key="foo") assert get_full == { "output": "bar", } # make sure it doesn't bleed over - get_empty = cache_get_block.run( - image=image_2, - key="foo" - ) + get_empty = cache_get_block.run(image=image_2, key="foo") assert get_empty == { "output": False, - } \ No newline at end of file + } diff --git a/tests/workflows/unit_tests/core_steps/fusion/test_buffer.py b/tests/workflows/unit_tests/core_steps/fusion/test_buffer.py index 7ba7dc65d2..38ca75ba21 100644 --- a/tests/workflows/unit_tests/core_steps/fusion/test_buffer.py +++ b/tests/workflows/unit_tests/core_steps/fusion/test_buffer.py @@ -1,47 +1,33 @@ from inference.core.workflows.core_steps.fusion.buffer.v1 import BufferBlockV1 + def test_buffer() -> None: buffer_block = BufferBlockV1() # first result - first = buffer_block.run( - data=1, - length=2, - pad=False - ) + first = buffer_block.run(data=1, length=2, pad=False) assert first == { "output": [1], } # add more data - second = buffer_block.run( - data=2, - length=2, - pad=False - ) + second = buffer_block.run(data=2, length=2, pad=False) assert second == { "output": [2, 1], } # rollover - third = buffer_block.run( - data=3, - length=2, - pad=False - ) + third = buffer_block.run(data=3, length=2, pad=False) assert third == { "output": [3, 2], } + def test_with_padding() -> None: buffer_block = BufferBlockV1() # first result - first = buffer_block.run( - data=1, - length=2, - pad=True - ) + first = buffer_block.run(data=1, length=2, pad=True) assert first == { "output": [1, None], - } \ No newline at end of file + } diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_grid.py b/tests/workflows/unit_tests/core_steps/visualizations/test_grid.py index de541b4055..b3c1abd263 100644 --- a/tests/workflows/unit_tests/core_steps/visualizations/test_grid.py +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_grid.py @@ -1,13 +1,14 @@ import numpy as np from inference.core.workflows.core_steps.visualizations.grid.v1 import ( - GridVisualizationBlockV1 + GridVisualizationBlockV1, ) from inference.core.workflows.execution_engine.entities.base import ( ImageParentMetadata, WorkflowImageData, ) + def test_grid_visualization_block_single() -> None: # given block = GridVisualizationBlockV1() @@ -17,11 +18,7 @@ def test_grid_visualization_block_single() -> None: numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), ) - output = block.run( - images=[image], - width=1000, - height=1000 - ) + output = block.run(images=[image], width=1000, height=1000) assert output is not None assert "image" in output @@ -34,6 +31,7 @@ def test_grid_visualization_block_single() -> None: output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8) ) + def test_grid_visualization_block_2x2() -> None: # given block = GridVisualizationBlockV1() @@ -60,9 +58,7 @@ def test_grid_visualization_block_2x2() -> None: ) output = block.run( - images=[image_1, image_2, image_3, image_4], - width=400, - height=400 + images=[image_1, image_2, image_3, image_4], width=400, height=400 ) assert output is not None @@ -71,27 +67,25 @@ def test_grid_visualization_block_2x2() -> None: # dimensions of output match params assert output.get("image").numpy_image.shape == (400, 400, 3) - + # check that each quadrant is the right color # top left: black assert np.array_equal( output.get("image").numpy_image[:200, :200, :], - np.zeros((200, 200, 3), dtype=np.uint8) + np.zeros((200, 200, 3), dtype=np.uint8), ) # top right: white assert np.array_equal( output.get("image").numpy_image[:200, 200:, :], - np.array([[[255, 255, 255]] * 200] * 200, dtype=np.uint8) + np.array([[[255, 255, 255]] * 200] * 200, dtype=np.uint8), ) # bottom left: red assert np.array_equal( output.get("image").numpy_image[200:, :200, :], - np.array([[[255, 0, 0]] * 200] * 200, dtype=np.uint8) + np.array([[[255, 0, 0]] * 200] * 200, dtype=np.uint8), ) # bottom right: green assert np.array_equal( output.get("image").numpy_image[200:, 200:, :], - np.array([[[0, 255, 0]] * 200] * 200, dtype=np.uint8) + np.array([[[0, 255, 0]] * 200] * 200, dtype=np.uint8), ) - -