Skip to content

Commit 70dc9b5

Browse files
committed
Merge branch 'dev' into pythonicworkflow
Signed-off-by: YunLiu <[email protected]> Signed-off-by: YunLiu <[email protected]>
2 parents ec202e0 + d94df3f commit 70dc9b5

File tree

5 files changed

+172
-11
lines changed

5 files changed

+172
-11
lines changed

monai/bundle/reference_resolver.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,16 @@ def get_resolved_content(self, id: str, **kwargs: Any) -> ConfigExpression | str
192192
"""
193193
return self._resolve_one_item(id=id, **kwargs)
194194

195+
def remove_resolved_content(self, id: str) -> Any | None:
196+
"""
197+
Remove the resolved ``ConfigItem`` by id.
198+
199+
Args:
200+
id: id name of the expected item.
201+
202+
"""
203+
return self.resolved_content.pop(id) if id in self.resolved_content else None
204+
195205
@classmethod
196206
def normalize_id(cls, id: str | int) -> str:
197207
"""

monai/bundle/workflows.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -544,8 +544,23 @@ def check_properties(self) -> list[str] | None:
544544
ret.extend(wrong_props)
545545
return ret
546546

547-
def _run_expr(self, id: str, **kwargs: dict) -> Any:
548-
return self.parser.get_parsed_content(id, **kwargs) if id in self.parser else None
547+
def _run_expr(self, id: str, **kwargs: dict) -> list[Any]:
548+
"""
549+
Evaluate the expression or expression list given by `id`. The resolved values from the evaluations are not stored,
550+
allowing this to be evaluated repeatedly (eg. in streaming applications) without restarting the hosting process.
551+
"""
552+
ret = []
553+
if id in self.parser:
554+
# suppose all the expressions are in a list, run and reset the expressions
555+
if isinstance(self.parser[id], list):
556+
for i in range(len(self.parser[id])):
557+
sub_id = f"{id}{ID_SEP_KEY}{i}"
558+
ret.append(self.parser.get_parsed_content(sub_id, **kwargs))
559+
self.parser.ref_resolver.remove_resolved_content(sub_id)
560+
else:
561+
ret.append(self.parser.get_parsed_content(id, **kwargs))
562+
self.parser.ref_resolver.remove_resolved_content(id)
563+
return ret
549564

550565
def _get_prop_id(self, name: str, property: dict) -> Any:
551566
prop_id = property[BundlePropertyConfig.ID]

tests/test_bundle_workflow.py

Lines changed: 38 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
from monai.data import Dataset
2828
from monai.inferers import SimpleInferer, SlidingWindowInferer
2929
from monai.networks.nets import UNet
30-
from monai.transforms import Compose, LoadImage, LoadImaged
30+
from monai.transforms import Compose, LoadImage, LoadImaged, SaveImaged
3131
from tests.nonconfig_workflow import NonConfigWorkflow, PythonicWorkflowImpl
3232

3333
TEST_CASE_1 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.json")]
@@ -36,6 +36,8 @@
3636

3737
TEST_CASE_3 = [os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json")]
3838

39+
TEST_CASE_4 = [os.path.join(os.path.dirname(__file__), "testing_data", "responsive_inference.json")]
40+
3941
TEST_CASE_NON_CONFIG_WRONG_LOG = [None, "logging.conf", "Cannot find the logging config file: logging.conf."]
4042

4143

@@ -46,9 +48,9 @@ def setUp(self):
4648
self.expected_shape = (128, 128, 128)
4749
test_image = np.random.rand(*self.expected_shape)
4850
self.filename = os.path.join(self.data_dir, "image.nii")
49-
self.filename2 = os.path.join(self.data_dir, "image2.nii")
51+
self.filename1 = os.path.join(self.data_dir, "image1.nii")
5052
nib.save(nib.Nifti1Image(test_image, np.eye(4)), self.filename)
51-
nib.save(nib.Nifti1Image(test_image, np.eye(4)), self.filename2)
53+
nib.save(nib.Nifti1Image(test_image, np.eye(4)), self.filename1)
5254

5355
def tearDown(self):
5456
shutil.rmtree(self.data_dir)
@@ -119,6 +121,35 @@ def test_inference_config(self, config_file):
119121
self._test_inferer(inferer)
120122
self.assertEqual(inferer.workflow_type, "infer")
121123

124+
@parameterized.expand([TEST_CASE_4])
125+
def test_responsive_inference_config(self, config_file):
126+
input_loader = LoadImaged(keys="image")
127+
output_saver = SaveImaged(keys="pred", output_dir=self.data_dir, output_postfix="seg")
128+
129+
# test standard MONAI model-zoo config workflow
130+
inferer = ConfigWorkflow(
131+
workflow_type="infer",
132+
config_file=config_file,
133+
logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"),
134+
)
135+
# FIXME: temp add the property for test, we should add it to some formal realtime infer properties
136+
inferer.add_property(name="dataflow", required=True, config_id="dataflow")
137+
138+
inferer.initialize()
139+
inferer.dataflow.update(input_loader({"image": self.filename}))
140+
inferer.run()
141+
output_saver(inferer.dataflow)
142+
self.assertTrue(os.path.exists(os.path.join(self.data_dir, "image", "image_seg.nii.gz")))
143+
144+
# bundle is instantiated and idle, just change the input for next inference
145+
inferer.dataflow.clear()
146+
inferer.dataflow.update(input_loader({"image": self.filename1}))
147+
inferer.run()
148+
output_saver(inferer.dataflow)
149+
self.assertTrue(os.path.exists(os.path.join(self.data_dir, "image1", "image1_seg.nii.gz")))
150+
151+
inferer.finalize()
152+
122153
@parameterized.expand([TEST_CASE_3])
123154
def test_train_config(self, config_file):
124155
# test standard MONAI model-zoo config workflow
@@ -187,11 +218,11 @@ def test_pythonic_workflow(self):
187218
self.assertEqual(workflow.inferer.roi_size, (64, 64, 32))
188219
workflow.run()
189220
# update input data and run again
190-
workflow.dataflow.update(input_loader({"image": self.filename2}))
221+
workflow.dataflow.update(input_loader({"image": self.filename1}))
191222
workflow.run()
192223
pred = workflow.dataflow["pred"]
193224
self.assertEqual(pred.shape[2:], self.expected_shape)
194-
self.assertEqual(pred.meta["filename_or_obj"], self.filename2)
225+
self.assertEqual(pred.meta["filename_or_obj"], self.filename1)
195226
workflow.finalize()
196227

197228
def test_create_pythonic_workflow(self):
@@ -223,11 +254,11 @@ def test_create_pythonic_workflow(self):
223254

224255
workflow.run()
225256
# update input data and run again
226-
workflow.dataflow.update(input_loader({"image": self.filename2}))
257+
workflow.dataflow.update(input_loader({"image": self.filename1}))
227258
workflow.run()
228259
pred = workflow.dataflow["pred"]
229260
self.assertEqual(pred.shape[2:], self.expected_shape)
230-
self.assertEqual(pred.meta["filename_or_obj"], self.filename2)
261+
self.assertEqual(pred.meta["filename_or_obj"], self.filename1)
231262

232263
# test add properties
233264
workflow.add_property(name="net", required=True, desc="network for the training.")

tests/test_module_list.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,13 +58,17 @@ def test_transform_api(self):
5858
continue
5959
with self.subTest(n=n):
6060
basename = n[:-1] # Transformd basename is Transform
61+
62+
# remove aliases to check, do this before the assert below so that a failed assert does skip this
63+
for postfix in ("D", "d", "Dict"):
64+
remained.remove(f"{basename}{postfix}")
65+
6166
for docname in (f"{basename}", f"{basename}d"):
6267
if docname in to_exclude_docs:
6368
continue
6469
if (contents is not None) and f"`{docname}`" not in f"{contents}":
6570
self.assertTrue(False, f"please add `{docname}` to docs/source/transforms.rst")
66-
for postfix in ("D", "d", "Dict"):
67-
remained.remove(f"{basename}{postfix}")
71+
6872
self.assertFalse(remained)
6973

7074

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
{
2+
"imports": [
3+
"$from collections import defaultdict"
4+
],
5+
"bundle_root": "will override",
6+
"device": "$torch.device('cpu')",
7+
"network_def": {
8+
"_target_": "UNet",
9+
"spatial_dims": 3,
10+
"in_channels": 1,
11+
"out_channels": 2,
12+
"channels": [
13+
2,
14+
2,
15+
4,
16+
8,
17+
4
18+
],
19+
"strides": [
20+
2,
21+
2,
22+
2,
23+
2
24+
],
25+
"num_res_units": 2,
26+
"norm": "batch"
27+
},
28+
"network": "$@network_def.to(@device)",
29+
"dataflow": "$defaultdict()",
30+
"preprocessing": {
31+
"_target_": "Compose",
32+
"transforms": [
33+
{
34+
"_target_": "EnsureChannelFirstd",
35+
"keys": "image"
36+
},
37+
{
38+
"_target_": "ScaleIntensityd",
39+
"keys": "image"
40+
},
41+
{
42+
"_target_": "RandRotated",
43+
"_disabled_": true,
44+
"keys": "image"
45+
}
46+
]
47+
},
48+
"dataset": {
49+
"_target_": "Dataset",
50+
"data": [
51+
"@dataflow"
52+
],
53+
"transform": "@preprocessing"
54+
},
55+
"dataloader": {
56+
"_target_": "DataLoader",
57+
"dataset": "@dataset",
58+
"batch_size": 1,
59+
"shuffle": false,
60+
"num_workers": 0
61+
},
62+
"inferer": {
63+
"_target_": "SlidingWindowInferer",
64+
"roi_size": [
65+
64,
66+
64,
67+
32
68+
],
69+
"sw_batch_size": 4,
70+
"overlap": 0.25
71+
},
72+
"postprocessing": {
73+
"_target_": "Compose",
74+
"transforms": [
75+
{
76+
"_target_": "Activationsd",
77+
"keys": "pred",
78+
"softmax": true
79+
},
80+
{
81+
"_target_": "AsDiscreted",
82+
"keys": "pred",
83+
"argmax": true
84+
}
85+
]
86+
},
87+
"evaluator": {
88+
"_target_": "SupervisedEvaluator",
89+
"device": "@device",
90+
"val_data_loader": "@dataloader",
91+
"network": "@network",
92+
"inferer": "@inferer",
93+
"postprocessing": "@postprocessing",
94+
"amp": false,
95+
"epoch_length": 1
96+
},
97+
"run": [
98+
99+
"[email protected](@evaluator.state.output[0])"
100+
]
101+
}

0 commit comments

Comments
 (0)