Skip to content

Commit

Permalink
Add test
Browse files Browse the repository at this point in the history
Signed-off-by: Vibhu Jawa <[email protected]>
  • Loading branch information
VibhuJawa committed Oct 3, 2024
1 parent 7018229 commit 5569026
Showing 1 changed file with 30 additions and 0 deletions.
30 changes: 30 additions & 0 deletions tests/op/test_fit_memory_estimate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import pytest
from sklearn.linear_model import LinearRegression

from crossfit.backend.torch.hf.memory_curve_utils import fit_memory_estimate_curve

transformers = pytest.importorskip("transformers")
torch = pytest.importorskip("torch")
rmm_torch_allocator = pytest.importorskip(
"rmm.allocators.torch", reason="rmm_torch_allocator is not available."
).rmm_torch_allocator

MODEL_NAME = "microsoft/deberta-v3-base"

# Have to do it globally
# TODO: Ask for better way
torch.cuda.memory.change_current_allocator(rmm_torch_allocator)


def test_fit_memory_estimate_curve(tmp_path):
# Setup
mem_model_path = tmp_path / "test_memory_model.joblib"
model = transformers.AutoModel.from_pretrained(MODEL_NAME).to("cuda")
result = fit_memory_estimate_curve(
model=model, path_or_name=MODEL_NAME, mem_model_path=str(mem_model_path)
)
# Assertions
assert isinstance(result, LinearRegression)
assert result.coef_.shape == (3,) # [batch_size, seq_len, seq_len**2]
assert isinstance(result.intercept_, float)
assert mem_model_path.exists()

0 comments on commit 5569026

Please sign in to comment.