Skip to content

Commit f1c9b64

Browse files
jiayulufacebook-github-bot
authored andcommitted
fix format (#3358)
Summary: Pull Request resolved: #3358 this is a follow up to D81154653. fix code format Reviewed By: aliafzal Differential Revision: D81817621 fbshipit-source-id: aea266b14d29ae97a3bbb36dbced4d5ccc29ab22
1 parent 4013381 commit f1c9b64

File tree

1 file changed

+4
-2
lines changed

1 file changed

+4
-2
lines changed

torchrec/distributed/utils.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -696,7 +696,7 @@ def quantize_embedding_modules(
696696
sharded_embs = _group_sharded_modules(module)
697697
sharded_embs.sort(key=weights_bytes_in_emb_kernel)
698698
logger.info(
699-
f"convert embedding modules to converted_dtype={converted_dtype.value} quantization"
699+
f"[TorchRec] Converting embedding modules to converted_dtype={converted_dtype.value} quantization"
700700
)
701701
converted_sparse_dtype = data_type_to_sparse_type(converted_dtype)
702702

@@ -727,7 +727,9 @@ def recreate_embedding_modules(
727727
sharded_embs.sort(key=weights_bytes_in_emb_kernel)
728728

729729
for emb_kernel in sharded_embs:
730-
converted_sparse_dtype = self._emb_kernel_to_sparse_dtype[emb_kernel] # pyre-ignore [6]: Incompatible parameter type
730+
converted_sparse_dtype = self._emb_kernel_to_sparse_dtype[
731+
emb_kernel # pyre-ignore [6]: Incompatible parameter type
732+
]
731733

732734
emb_kernel.weights_dev = _convert_weights( # pyre-ignore [16]
733735
emb_kernel.weights_dev, # pyre-ignore [6]

0 commit comments

Comments
 (0)