From a122a44150682a3cb6e55b195fc46dfdfc9eea0a Mon Sep 17 00:00:00 2001 From: Ahmed Gheith Date: Fri, 21 Jun 2024 11:47:10 -0700 Subject: [PATCH] adhere to lazy import rules (#806) Summary: Pull Request resolved: https://github.com/facebookresearch/ClassyVision/pull/806 Lazy import changes `Python` import semantics, specifically when it comes to initialization of packages/modules: https://www.internalfb.com/intern/wiki/Python/Cinder/Onboarding/Tutorial/Lazy_Imports/Troubleshooting/ For example, this pattern is not guaranteed to work: ``` import torch.optim ... torch.optim._multi_tensor.Adam # may fail to resolve _multi_tensor ``` And this is guaranteed to work: ``` import torch.optim._multi_tensor ... torch.optim._multi_tensor.Adam # will always work ``` A recent change to `PyTorch` changed module initialization logic in a way that exposed this issue. But the code has been working for years? This is the nature of undefined behavior, any change in the environment (in this the `PyTorch` code base can make it fail. Differential Revision: D58881291 --- classy_vision/optim/adamw_mt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/classy_vision/optim/adamw_mt.py b/classy_vision/optim/adamw_mt.py index 917e320f6..e511ae444 100644 --- a/classy_vision/optim/adamw_mt.py +++ b/classy_vision/optim/adamw_mt.py @@ -7,6 +7,7 @@ from typing import Any, Dict, Tuple import torch.optim +from torch.optim import _multi_tensor from . import ClassyOptimizer, register_optimizer @@ -30,7 +31,7 @@ def __init__( self._amsgrad = amsgrad def prepare(self, param_groups) -> None: - self.optimizer = torch.optim._multi_tensor.AdamW( + self.optimizer = _multi_tensor.AdamW( param_groups, lr=self._lr, betas=self._betas,