Skip to content

Commit

Permalink
Merge pull request #108 from joaonadkarni/update-dependencies
Browse files Browse the repository at this point in the history
Make dependencies more loose
  • Loading branch information
jalammar authored Aug 15, 2024
2 parents e292773 + d3998ba commit 2a38a13
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 23 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build_and_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
python-version: [3.6, 3.7, 3.8]
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]

steps:
- uses: actions/checkout@v2
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
# ignore files dumped in tests
tmp/

*.py[cod]
__pycache__

Expand Down
22 changes: 11 additions & 11 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
matplotlib~=3.3.1
numpy~=1.19.1
ipython~=7.16.1
scikit-learn~=0.24.2
seaborn~=0.11.0
transformers~=4.6.1
pytest~=6.1.2
setuptools~=49.6.0
torch~=1.9.0
PyYAML==5.4.1
captum==0.4.1
matplotlib>=3.3
numpy>=1.19
ipython>=7.16
scikit-learn>=0.24.2,<2
seaborn>=0.11
transformers~=4.6
pytest>=6.1.2
setuptools>=49.6.0
torch>=1.9.0,<3
PyYAML>=6.0
captum~=0.4.1
10 changes: 6 additions & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ def read(*names, **kwargs):
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
Expand All @@ -63,12 +65,12 @@ def read(*names, **kwargs):
keywords=[
'Natural Language Processing', 'Explainable AI', 'keyword3',
],
python_requires='!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
python_requires='!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*',
install_requires=[
"transformers ~= 4.2",
"seaborn ~= 0.11",
"scikit-learn~=0.23",
"PyYAML~=5.4",
"seaborn >= 0.11",
"scikit-learn>=0.23,<2",
"PyYAML>=6.0",
"captum ~= 0.4"
],
extras_require={
Expand Down
15 changes: 8 additions & 7 deletions src/ecco/lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,12 +199,12 @@ def generate(self, input_str: str,
raise ValueError(
"max_length set to {} while input token has more tokens ({}). Consider increasing max_length" \
.format(max_length, cur_len))

# Get decoder input ids
if self.model_type == 'enc-dec': # FIXME: only done because causal LMs like GPT-2 have the _prepare_decoder_input_ids_for_generation method but do not use it
assert len(input_ids.size()) == 2 # will break otherwise
if version.parse(transformers.__version__) >= version.parse('4.13'):

# following the code in https://github.com/huggingface/transformers/blob/d0c1aebea467af499331234e7b285a6bf91ea073/tests/generation/test_utils.py#L2099
model_kwargs = self.model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
decoder_input_ids, model_kwargs = self.model._prepare_decoder_input_ids_for_generation(
Expand Down Expand Up @@ -649,7 +649,7 @@ def display_input_sequence(self, input_ids):
parentDiv: '{viz_id}',
data: {json.dumps(data)},
tokenization_config: {json.dumps(self.model_config['tokenizer_config'])}
}})
}}, function (err) {{
console.log(err);
Expand Down Expand Up @@ -729,10 +729,11 @@ def sample_output_token(scores, do_sample, temperature, top_k, top_p):
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
next_token_logscores = transformers.generation_utils. \
top_k_top_p_filtering(scores,
top_k=top_k,
top_p=top_p)
if version.parse(transformers.__version__) >= version.parse('4.25.1'):
top_k_top_p_filtering_fn = transformers.generation.utils.top_k_top_p_filtering
else:
top_k_top_p_filtering_fn = transformers.generation_utils.top_k_top_p_filtering
next_token_logscores = top_k_top_p_filtering_fn(scores, top_k=top_k, top_p=top_p)
# Sample
probs = F.softmax(next_token_logscores, dim=-1)

Expand Down

0 comments on commit 2a38a13

Please sign in to comment.