From c69d1ace13a6bba3168173e2ae9bac19ab864df3 Mon Sep 17 00:00:00 2001 From: Sarah Yurick Date: Tue, 13 Aug 2024 13:16:37 -0700 Subject: [PATCH 1/2] check for pd series --- crossfit/op/tokenize.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crossfit/op/tokenize.py b/crossfit/op/tokenize.py index 6a64855..958a0d7 100644 --- a/crossfit/op/tokenize.py +++ b/crossfit/op/tokenize.py @@ -17,6 +17,7 @@ import cudf import cupy as cp +import pandas as pd import torch from cudf.core.subword_tokenizer import SubwordTokenizer, _cast_to_appropriate_type from cudf.utils.hash_vocab_utils import hash_vocab @@ -61,6 +62,8 @@ def tokenize_strings(self, sentences, max_length=None): self.padding_side = tokenizer.padding_side self.pad_token_id = tokenizer.pad_token_id + if isinstance(sentences, pd.Series): + sentences = cudf.from_pandas(sentences) if isinstance(sentences, cudf.Series): sentences = sentences.to_arrow().to_pylist() From 1aa28549b2f0538a29cd3da3497c2a74be0626d0 Mon Sep 17 00:00:00 2001 From: Sarah Yurick Date: Tue, 13 Aug 2024 14:21:44 -0700 Subject: [PATCH 2/2] add pytest --- crossfit/op/tokenize.py | 4 ++-- tests/op/test_tokenize.py | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/crossfit/op/tokenize.py b/crossfit/op/tokenize.py index 958a0d7..901f754 100644 --- a/crossfit/op/tokenize.py +++ b/crossfit/op/tokenize.py @@ -62,10 +62,10 @@ def tokenize_strings(self, sentences, max_length=None): self.padding_side = tokenizer.padding_side self.pad_token_id = tokenizer.pad_token_id - if isinstance(sentences, pd.Series): - sentences = cudf.from_pandas(sentences) if isinstance(sentences, cudf.Series): sentences = sentences.to_arrow().to_pylist() + elif isinstance(sentences, pd.Series): + sentences = sentences.to_list() with torch.no_grad(): tokenized_data = tokenizer.batch_encode_plus( diff --git a/tests/op/test_tokenize.py b/tests/op/test_tokenize.py index 6123208..8c93bca 100644 --- a/tests/op/test_tokenize.py +++ b/tests/op/test_tokenize.py @@ -18,6 +18,8 @@ cp = pytest.importorskip("cupy") cudf = pytest.importorskip("cudf") dask_cudf = pytest.importorskip("dask_cudf") +dd = pytest.importorskip("dask.dataframe") +pd = pytest.importorskip("pandas") transformers = pytest.importorskip("transformers") torch = pytest.importorskip("torch") @@ -144,3 +146,12 @@ def test_clip_tokens_no_clipping_needed(): assert result["attention_mask"].shape == (2, 3) assert torch.equal(result["input_ids"].to("cpu"), torch.tensor([[1, 2, 3], [4, 5, 6]])) assert torch.equal(result["attention_mask"].to("cpu"), torch.tensor([[1, 1, 1], [1, 1, 1]])) + + +def test_tokenize_strings_cpu(model_name="microsoft/deberta-v3-base"): + model = cf.HFModel(model_name) + tokenizer = op.Tokenizer(model, cols=["text"], tokenizer_type="spm") + input_strings = ["hello world", "this is a sentence"] + ddf = dd.from_pandas(pd.DataFrame({"text": input_strings}), npartitions=1) + results = tokenizer(ddf) + results = results.compute()