Skip to content

Commit bcbe85f

Browse files
authored
Merge branch 'dev' into videodataset
2 parents 89e7c06 + 90de55b commit bcbe85f

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+1075
-430
lines changed

docs/source/networks.rst

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -750,3 +750,38 @@ Utilities
750750

751751
.. automodule:: monai.apps.reconstruction.networks.nets.utils
752752
:members:
753+
754+
Noise Schedulers
755+
----------------
756+
.. automodule:: monai.networks.schedulers
757+
.. currentmodule:: monai.networks.schedulers
758+
759+
`Scheduler`
760+
~~~~~~~~~~~
761+
.. autoclass:: Scheduler
762+
:members:
763+
764+
`NoiseSchedules`
765+
~~~~~~~~~~~~~~~~
766+
.. autoclass:: NoiseSchedules
767+
:members:
768+
769+
`DDPMScheduler`
770+
~~~~~~~~~~~~~~~
771+
.. autoclass:: DDPMScheduler
772+
:members:
773+
774+
`DDIMScheduler`
775+
~~~~~~~~~~~~~~~
776+
.. autoclass:: DDIMScheduler
777+
:members:
778+
779+
`PNDMScheduler`
780+
~~~~~~~~~~~~~~~
781+
.. autoclass:: PNDMScheduler
782+
:members:
783+
784+
`RFlowScheduler`
785+
~~~~~~~~~~~~~~~~
786+
.. autoclass:: RFlowScheduler
787+
:members:

monai/apps/deepedit/interaction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def __call__(self, engine: SupervisedTrainer | SupervisedEvaluator, batchdata: d
7272

7373
with torch.no_grad():
7474
if engine.amp:
75-
with torch.cuda.amp.autocast():
75+
with torch.autocast("cuda"):
7676
predictions = engine.inferer(inputs, engine.network)
7777
else:
7878
predictions = engine.inferer(inputs, engine.network)

monai/apps/deepgrow/interaction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def __call__(self, engine: SupervisedTrainer | SupervisedEvaluator, batchdata: d
6767
engine.network.eval()
6868
with torch.no_grad():
6969
if engine.amp:
70-
with torch.cuda.amp.autocast():
70+
with torch.autocast("cuda"):
7171
predictions = engine.inferer(inputs, engine.network)
7272
else:
7373
predictions = engine.inferer(inputs, engine.network)

monai/apps/detection/networks/retinanet_detector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ def forward(self, images: torch.Tensor):
180180
nesterov=True,
181181
)
182182
torch.save(detector.network.state_dict(), 'model.pt') # save model
183-
detector.network.load_state_dict(torch.load('model.pt')) # load model
183+
detector.network.load_state_dict(torch.load('model.pt', weights_only=True)) # load model
184184
"""
185185

186186
def __init__(

monai/apps/detection/networks/retinanet_network.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,8 @@ def __init__(
8888

8989
for layer in self.conv.children():
9090
if isinstance(layer, conv_type): # type: ignore
91-
torch.nn.init.normal_(layer.weight, std=0.01)
92-
torch.nn.init.constant_(layer.bias, 0)
91+
torch.nn.init.normal_(layer.weight, std=0.01) # type: ignore[arg-type]
92+
torch.nn.init.constant_(layer.bias, 0) # type: ignore[arg-type]
9393

9494
self.cls_logits = conv_type(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1)
9595
torch.nn.init.normal_(self.cls_logits.weight, std=0.01)
@@ -167,8 +167,8 @@ def __init__(self, in_channels: int, num_anchors: int, spatial_dims: int):
167167

168168
for layer in self.conv.children():
169169
if isinstance(layer, conv_type): # type: ignore
170-
torch.nn.init.normal_(layer.weight, std=0.01)
171-
torch.nn.init.zeros_(layer.bias)
170+
torch.nn.init.normal_(layer.weight, std=0.01) # type: ignore[arg-type]
171+
torch.nn.init.zeros_(layer.bias) # type: ignore[arg-type]
172172

173173
def forward(self, x: list[Tensor]) -> list[Tensor]:
174174
"""
@@ -297,7 +297,7 @@ def __init__(
297297
)
298298
self.feature_extractor = feature_extractor
299299

300-
self.feature_map_channels: int = self.feature_extractor.out_channels
300+
self.feature_map_channels: int = self.feature_extractor.out_channels # type: ignore[assignment]
301301
self.num_anchors = num_anchors
302302
self.classification_head = RetinaNetClassificationHead(
303303
self.feature_map_channels, self.num_anchors, self.num_classes, spatial_dims=self.spatial_dims

monai/apps/detection/utils/box_coder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -221,15 +221,15 @@ def decode_single(self, rel_codes: Tensor, reference_boxes: Tensor) -> Tensor:
221221

222222
pred_ctr_xyx_axis = dxyz_axis * whd_axis[:, None] + ctr_xyz_axis[:, None]
223223
pred_whd_axis = torch.exp(dwhd_axis) * whd_axis[:, None]
224-
pred_whd_axis = pred_whd_axis.to(dxyz_axis.dtype)
224+
pred_whd_axis = pred_whd_axis.to(dxyz_axis.dtype) # type: ignore[union-attr]
225225

226226
# When convert float32 to float16, Inf or Nan may occur
227227
if torch.isnan(pred_whd_axis).any() or torch.isinf(pred_whd_axis).any():
228228
raise ValueError("pred_whd_axis is NaN or Inf.")
229229

230230
# Distance from center to box's corner.
231231
c_to_c_whd_axis = (
232-
torch.tensor(0.5, dtype=pred_ctr_xyx_axis.dtype, device=pred_whd_axis.device) * pred_whd_axis
232+
torch.tensor(0.5, dtype=pred_ctr_xyx_axis.dtype, device=pred_whd_axis.device) * pred_whd_axis # type: ignore[arg-type]
233233
)
234234

235235
pred_boxes.append(pred_ctr_xyx_axis - c_to_c_whd_axis)

monai/apps/generation/maisi/networks/autoencoderkl_maisi.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -232,6 +232,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
232232
if self.print_info:
233233
logger.info(f"Number of splits: {self.num_splits}")
234234

235+
if self.dim_split <= 1 and self.num_splits <= 1:
236+
x = self.conv(x)
237+
return x
238+
235239
# compute size of splits
236240
l = x.size(self.dim_split + 2)
237241
split_size = l // self.num_splits

monai/apps/mmars/mmars.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def load_from_mmar(
241241
return torch.jit.load(_model_file, map_location=map_location)
242242

243243
# loading with `torch.load`
244-
model_dict = torch.load(_model_file, map_location=map_location)
244+
model_dict = torch.load(_model_file, map_location=map_location, weights_only=True)
245245
if weights_only:
246246
return model_dict.get(model_key, model_dict) # model_dict[model_key] or model_dict directly
247247

monai/apps/reconstruction/networks/blocks/varnetblock.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def soft_dc(self, x: Tensor, ref_kspace: Tensor, mask: Tensor) -> Tensor:
5555
Returns:
5656
Output of DC block with the same shape as x
5757
"""
58-
return torch.where(mask, x - ref_kspace, self.zeros) * self.dc_weight
58+
return torch.where(mask, x - ref_kspace, self.zeros) * self.dc_weight # type: ignore
5959

6060
def forward(self, current_kspace: Tensor, ref_kspace: Tensor, mask: Tensor, sens_maps: Tensor) -> Tensor:
6161
"""

monai/bundle/scripts.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -760,7 +760,7 @@ def load(
760760
if load_ts_module is True:
761761
return load_net_with_metadata(full_path, map_location=torch.device(device), more_extra_files=config_files)
762762
# loading with `torch.load`
763-
model_dict = torch.load(full_path, map_location=torch.device(device))
763+
model_dict = torch.load(full_path, map_location=torch.device(device), weights_only=True)
764764

765765
if not isinstance(model_dict, Mapping):
766766
warnings.warn(f"the state dictionary from {full_path} should be a dictionary but got {type(model_dict)}.")
@@ -1279,9 +1279,8 @@ def verify_net_in_out(
12791279
if input_dtype == torch.float16:
12801280
# fp16 can only be executed in gpu mode
12811281
net.to("cuda")
1282-
from torch.cuda.amp import autocast
12831282

1284-
with autocast():
1283+
with torch.autocast("cuda"):
12851284
output = net(test_data.cuda(), **extra_forward_args_)
12861285
net.to(device_)
12871286
else:
@@ -1330,7 +1329,7 @@ def _export(
13301329
# here we use ignite Checkpoint to support nested weights and be compatible with MONAI CheckpointSaver
13311330
Checkpoint.load_objects(to_load={key_in_ckpt: net}, checkpoint=ckpt_file)
13321331
else:
1333-
ckpt = torch.load(ckpt_file)
1332+
ckpt = torch.load(ckpt_file, weights_only=True)
13341333
copy_model_state(dst=net, src=ckpt if key_in_ckpt == "" else ckpt[key_in_ckpt])
13351334

13361335
# Use the given converter to convert a model and save with metadata, config content

0 commit comments

Comments
 (0)