Skip to content

Commit 291f535

Browse files
Pin numpy v1 for onnxruntime (#1921)
* fix offline ci * pin numpy v1 for now * pin numpy 1 in exporters as well * pin numpy v1 everywhere for transfromers
1 parent 2db03d4 commit 291f535

File tree

2 files changed

+25
-21
lines changed

2 files changed

+25
-21
lines changed

.github/workflows/test_offline.yml

+23-19
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@ name: Offline usage / Python - Test
22

33
on:
44
push:
5-
branches: [ main ]
5+
branches: [main]
66
pull_request:
7-
branches: [ main ]
7+
branches: [main]
88

99
concurrency:
1010
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -15,29 +15,33 @@ jobs:
1515
strategy:
1616
fail-fast: false
1717
matrix:
18-
python-version: [3.9]
18+
python-version: [3.8, 3.9]
1919
os: [ubuntu-20.04]
2020

2121
runs-on: ${{ matrix.os }}
2222
steps:
23-
- uses: actions/checkout@v2
24-
- name: Setup Python ${{ matrix.python-version }}
25-
uses: actions/setup-python@v2
26-
with:
27-
python-version: ${{ matrix.python-version }}
28-
- name: Install dependencies for pytorch export
29-
run: |
30-
pip install .[tests,exporters,onnxruntime]
31-
- name: Test with unittest
32-
run: |
33-
HF_HOME=/tmp/ huggingface-cli download hf-internal-testing/tiny-random-gpt2
23+
- name: Checkout code
24+
uses: actions/checkout@v4
3425

35-
HF_HOME=/tmp/ HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation
26+
- name: Setup Python ${{ matrix.python-version }}
27+
uses: actions/setup-python@v5
28+
with:
29+
python-version: ${{ matrix.python-version }}
3630

37-
huggingface-cli download hf-internal-testing/tiny-random-gpt2
31+
- name: Install dependencies for pytorch export
32+
run: |
33+
pip install .[tests,exporters,onnxruntime]
3834
39-
HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation
35+
- name: Test with pytest
36+
run: |
37+
HF_HOME=/tmp/ huggingface-cli download hf-internal-testing/tiny-random-gpt2
4038
41-
pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv
39+
HF_HOME=/tmp/ HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation
4240
43-
HF_HUB_OFFLINE=1 pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv
41+
huggingface-cli download hf-internal-testing/tiny-random-gpt2
42+
43+
HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation
44+
45+
pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv
46+
47+
HF_HUB_OFFLINE=1 pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv

setup.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
"transformers[sentencepiece]>=4.26.0,<4.42.0",
1919
"torch>=1.11",
2020
"packaging",
21-
"numpy",
21+
"numpy<2.0", # transformers requires numpy<2.0 https://github.com/huggingface/transformers/pull/31569
2222
"huggingface_hub>=0.8.0",
2323
"datasets",
2424
]
@@ -79,10 +79,10 @@
7979
"openvino": "optimum-intel[openvino]>=1.16.0",
8080
"nncf": "optimum-intel[nncf]>=1.16.0",
8181
"neural-compressor": "optimum-intel[neural-compressor]>=1.16.0",
82-
"graphcore": "optimum-graphcore",
8382
"habana": ["optimum-habana", "transformers >= 4.38.0, < 4.39.0"],
8483
"neuron": ["optimum-neuron[neuron]>=0.0.20", "transformers >= 4.36.2, < 4.42.0"],
8584
"neuronx": ["optimum-neuron[neuronx]>=0.0.20", "transformers >= 4.36.2, < 4.42.0"],
85+
"graphcore": "optimum-graphcore",
8686
"furiosa": "optimum-furiosa",
8787
"amd": "optimum-amd",
8888
"dev": TESTS_REQUIRE + QUALITY_REQUIRE,

0 commit comments

Comments
 (0)