Skip to content

Commit fd939be

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 004a5d2 commit fd939be

File tree

1 file changed

+32
-37
lines changed

1 file changed

+32
-37
lines changed

hugging_face/hugging_face_pipeline_for_monai.ipynb

Lines changed: 32 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@
173173
" strides=(2, 2, 2, 2),\n",
174174
" num_res_units=2,\n",
175175
" norm=\"batch\",\n",
176-
" **kwargs\n",
176+
" **kwargs,\n",
177177
" ):\n",
178178
" super().__init__(**kwargs)\n",
179179
" self.spatial_dims = spatial_dims\n",
@@ -182,7 +182,7 @@
182182
" self.channels = channels\n",
183183
" self.strides = strides\n",
184184
" self.num_res_units = num_res_units\n",
185-
" self.norm=norm\n"
185+
" self.norm = norm"
186186
]
187187
},
188188
{
@@ -214,7 +214,7 @@
214214
" channels=config.channels,\n",
215215
" strides=config.strides,\n",
216216
" num_res_units=config.num_res_units,\n",
217-
" norm=config.norm\n",
217+
" norm=config.norm,\n",
218218
" )\n",
219219
"\n",
220220
" def forward(self, x):\n",
@@ -271,20 +271,15 @@
271271
"\n",
272272
" def _init_preprocessing_transforms(self, image_key=\"image\", load_image=True):\n",
273273
" transform_list = [LoadImaged(keys=image_key)] if load_image else []\n",
274-
" transform_list = transform_list.extend([\n",
275-
" EnsureChannelFirstd(keys=image_key),\n",
276-
" Orientationd(keys=image_key, axcodes=\"RAS\"),\n",
277-
" Spacingd(keys=image_key, pixdim=(1.5, 1.5, 2.0), mode=\"bilinear\"),\n",
278-
" ScaleIntensityRanged(\n",
279-
" keys=image_key,\n",
280-
" a_min=-57,\n",
281-
" a_max=164,\n",
282-
" b_min=0,\n",
283-
" b_max=1,\n",
284-
" clip=True\n",
285-
" ),\n",
286-
" EnsureTyped(keys=image_key)\n",
287-
" ])\n",
274+
" transform_list = transform_list.extend(\n",
275+
" [\n",
276+
" EnsureChannelFirstd(keys=image_key),\n",
277+
" Orientationd(keys=image_key, axcodes=\"RAS\"),\n",
278+
" Spacingd(keys=image_key, pixdim=(1.5, 1.5, 2.0), mode=\"bilinear\"),\n",
279+
" ScaleIntensityRanged(keys=image_key, a_min=-57, a_max=164, b_min=0, b_max=1, clip=True),\n",
280+
" EnsureTyped(keys=image_key),\n",
281+
" ]\n",
282+
" )\n",
288283
" preprocessing_transforms = Compose(transform_list)\n",
289284
" return preprocessing_transforms\n",
290285
"\n",
@@ -306,33 +301,35 @@
306301
" transform=copy.deepcopy(self.preprocessing_transforms),\n",
307302
" orig_keys=image_key,\n",
308303
" nearest_interp=False,\n",
309-
" to_tensor=True\n",
304+
" to_tensor=True,\n",
310305
" ),\n",
311306
" AsDiscreted(keys=pred_key, argmax=True),\n",
312307
" ]\n",
313-
" transform_list = transform_list.append(SaveImaged(\n",
314-
" keys=pred_key,\n",
315-
" output_dir=output_dir,\n",
316-
" output_ext=output_ext,\n",
317-
" output_dtype=output_dtype,\n",
318-
" output_postfix=output_postfix,\n",
319-
" separate_folder=separate_folder\n",
320-
" )) if save_output else transform_list\n",
308+
" transform_list = (\n",
309+
" transform_list.append(\n",
310+
" SaveImaged(\n",
311+
" keys=pred_key,\n",
312+
" output_dir=output_dir,\n",
313+
" output_ext=output_ext,\n",
314+
" output_dtype=output_dtype,\n",
315+
" output_postfix=output_postfix,\n",
316+
" separate_folder=separate_folder,\n",
317+
" )\n",
318+
" )\n",
319+
" if save_output\n",
320+
" else transform_list\n",
321+
" )\n",
321322
"\n",
322323
" postprocessing_transforms = Compose(transform_list)\n",
323324
" return postprocessing_transforms\n",
324-
" \n",
325+
"\n",
325326
" def _init_inferer(\n",
326327
" self,\n",
327328
" roi_size=(96, 96, 96),\n",
328329
" overlap=0.5,\n",
329330
" sw_batch_size=4,\n",
330331
" ):\n",
331-
" return SlidingWindowInferer(\n",
332-
" roi_size=roi_size,\n",
333-
" sw_batch_size=sw_batch_size,\n",
334-
" overlap=overlap\n",
335-
" )\n",
332+
" return SlidingWindowInferer(roi_size=roi_size, sw_batch_size=sw_batch_size, overlap=overlap)\n",
336333
"\n",
337334
" def _sanitize_parameters(self, **kwargs):\n",
338335
" preprocessing_kwargs = {}\n",
@@ -359,9 +356,7 @@
359356
" ):\n",
360357
" for key, value in kwargs.items():\n",
361358
" if key in self._preprocess_params and value != self._preprocess_params[key]:\n",
362-
" logging.warning(\n",
363-
" f\"Please set the parameter {key} during initialization.\"\n",
364-
" )\n",
359+
" logging.warning(f\"Please set the parameter {key} during initialization.\")\n",
365360
"\n",
366361
" if key not in self.PREPROCESSING_EXTRA_ARGS:\n",
367362
" logging.warning(f\"Cannot set parameter {key} for preprocessing.\")\n",
@@ -375,7 +370,7 @@
375370
" ):\n",
376371
" inputs.to(self.device)\n",
377372
" self.model.to(self.device)\n",
378-
" mode=eval_mode,\n",
373+
" mode = (eval_mode,)\n",
379374
" outputs = {Keys.IMAGE: inputs, Keys.LABEL: None}\n",
380375
" with mode(self.model):\n",
381376
" if amp:\n",
@@ -391,7 +386,7 @@
391386
" logging.warning(f\"Cannot set parameter {key} for postprocessing.\")\n",
392387
"\n",
393388
" outputs = self.postprocessing_transforms(decollate_batch(outputs))\n",
394-
" return outputs\n"
389+
" return outputs"
395390
]
396391
},
397392
{

0 commit comments

Comments
 (0)