Skip to content

Commit

Permalink
torch compile and cuda alloc improvements (#1755)
Browse files Browse the repository at this point in the history
* enable experimental expandable_segments

* hf trainer seems to be missing torch compile

* disable PYTORCH_CUDA_ALLOC_CONF to see if that fixes cicd
  • Loading branch information
winglian committed Jul 16, 2024
1 parent e1725ae commit cfc533a
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 0 deletions.
12 changes: 12 additions & 0 deletions src/axolotl/core/trainer_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,18 @@ def __init__(
if self.args.orpo_alpha:
self.loss_fct = torch.nn.CrossEntropyLoss(reduction="none")

def _wrap_model(self, model, training=True, dataloader=None):
if self.args.torch_compile:
torch._dynamo.config.accumulated_cache_size_limit = ( # pylint: disable=protected-access
256
)
model = torch.compile(
model,
backend=self.args.torch_compile_backend,
mode=self.args.torch_compile_mode,
)
return super()._wrap_model(model, training=training, dataloader=dataloader)

def create_optimizer(self):
if (
self.args.loraplus_lr_ratio is None
Expand Down
7 changes: 7 additions & 0 deletions src/axolotl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,13 @@ class TrainDatasetMeta:
def train(
*, cfg: DictDefault, cli_args: TrainerCliArgs, dataset_meta: TrainDatasetMeta
) -> Tuple[Union[PeftModel, PreTrainedModel], PreTrainedTokenizer]:
# enable expandable segments for cuda allocation to improve VRAM usage
# torch_version = torch.__version__.split(".")
# torch_major, torch_minor = int(torch_version[0]), int(torch_version[1])
# if torch_major == 2 and torch_minor >= 2:
# if os.getenv("PYTORCH_CUDA_ALLOC_CONF") is None:
# os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

# load the tokenizer first
LOG.debug(
f"loading tokenizer... {cfg.tokenizer_config or cfg.base_model_config}",
Expand Down

0 comments on commit cfc533a

Please sign in to comment.