Skip to content

Commit 5d14afd

Browse files
digger-yutjruwase
andauthored
fix typo deepspeed/runtime (#3663)
Co-authored-by: Olatunji Ruwase <[email protected]>
1 parent 460bec4 commit 5d14afd

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

deepspeed/runtime/zero/partition_parameters.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -800,9 +800,9 @@ def _validate_remote_device(self, remote_device, ds_config):
800800
f'"nvme_path" in DeepSpeed Config cannot be None if remote device is {OffloadDeviceEnum.nvme}'
801801

802802
def _post_init_method(self, module):
803-
#see_memory_usage(f"Before converting parmas in {module.__class__.__name__}", force=False)
803+
#see_memory_usage(f"Before converting params in {module.__class__.__name__}", force=False)
804804
print_rank_0(f'Converting Params in {module.__class__.__name__}', force=False)
805-
see_memory_usage(f"Before converting and partitioning parmas in {module.__class__.__name__}", force=False)
805+
see_memory_usage(f"Before converting and partitioning params in {module.__class__.__name__}", force=False)
806806

807807
global param_count
808808
for name, param in module.named_parameters(recurse=False):
@@ -825,7 +825,7 @@ def _post_init_method(self, module):
825825

826826
param.partition()
827827
see_memory_usage(
828-
f"Param count {param_count}. After converting and partitioning parmas in {module.__class__.__name__}",
828+
f"Param count {param_count}. After converting and partitioning params in {module.__class__.__name__}",
829829
force=False)
830830

831831
def _convert_to_deepspeed_param(self, param):
@@ -1404,7 +1404,7 @@ def _reduce_scatter_gradients(self, param_list):
14041404
partition_size = param.ds_tensor.ds_numel
14051405
start = self.get_partition_rank() * partition_size
14061406
end = start + partition_size
1407-
#print_rank_0("REduce scatter was executed for praam {param.ds_id}")
1407+
#print_rank_0("REduce scatter was executed for param {param.ds_id}")
14081408
if start < param.ds_numel and end > param.ds_numel:
14091409
elements = param.ds_numel - start
14101410
param.grad.view(-1).narrow(0, start, elements).copy_(reduced_partition.narrow(0, 0, elements))

deepspeed/runtime/zero/stage3.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -892,7 +892,7 @@ def initialize_optimizer_states(self):
892892
else:
893893
self.fp32_partitioned_groups_flat[i].grad = gradient_buffer.narrow(0, 0, num_elements)
894894

895-
# Initialize the optimizer states with the flattended fp32 partition.
895+
# Initialize the optimizer states with the flattened fp32 partition.
896896
if not is_adagrad:
897897
self._optimizer_step(i)
898898

@@ -906,7 +906,7 @@ def initialize_optimizer_states(self):
906906
f'[End] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}',
907907
force=False)
908908

909-
# Initialize the optimizer states with the flattended fp32 partition.
909+
# Initialize the optimizer states with the flattened fp32 partition.
910910
if is_adagrad:
911911
self.optimizer = torch.optim.Adagrad(self.fp32_partitioned_groups_flat, **self.optimizer.defaults)
912912

deepspeed/runtime/zero/stage_1_and_2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -611,7 +611,7 @@ def initialize_optimizer_states(self):
611611
self.single_partition_of_fp32_groups[i].grad = get_accelerator().pin_memory(
612612
single_grad_partition) if self.cpu_offload else single_grad_partition
613613

614-
# Initialize the optimizer states with the flattended fp32 partition.
614+
# Initialize the optimizer states with the flattened fp32 partition.
615615
# State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers
616616
# which do lazy initialization of the state at the first call to step.
617617
if isinstance(self.optimizer, torch.optim.Adagrad):

0 commit comments

Comments
 (0)