Skip to content

Commit

Permalink
Fix gradient checkpointing with PEFT (#634)
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelbenayoun authored Jun 19, 2024
1 parent 047c65e commit 0916a11
Showing 1 changed file with 5 additions and 1 deletion.
6 changes: 5 additions & 1 deletion optimum/neuron/accelerate/utils/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from ....utils import logging
from ...utils import is_torch_neuronx_available, is_torch_xla_available, patch_everywhere
from ...utils.patching import Patcher
from ...utils.peft_utils import NeuronPeftModel
from ...utils.require_utils import requires_neuronx_distributed, requires_safetensors, requires_torch_xla


Expand Down Expand Up @@ -186,12 +187,15 @@ def patched_gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=No


@requires_neuronx_distributed
def apply_activation_checkpointing(model: Union["PreTrainedModel", "NxDPPModel"]):
def apply_activation_checkpointing(model: Union["PreTrainedModel", "NxDPPModel", NeuronPeftModel]):
from neuronx_distributed.pipeline import NxDPPModel
from neuronx_distributed.utils.activation_checkpoint import (
apply_activation_checkpointing as nxd_apply_activation_checkpointing,
)

if isinstance(model, NeuronPeftModel):
model._prepare_model_for_gradient_checkpointing(model.get_base_model())

if isinstance(model, NxDPPModel):
modules = model.local_module.modules()
else:
Expand Down

0 comments on commit 0916a11

Please sign in to comment.