@@ -2,9 +2,9 @@ name: Offline usage / Python - Test
2
2
3
3
on :
4
4
push :
5
- branches : [ main ]
5
+ branches : [main]
6
6
pull_request :
7
- branches : [ main ]
7
+ branches : [main]
8
8
9
9
concurrency :
10
10
group : ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -15,29 +15,33 @@ jobs:
15
15
strategy :
16
16
fail-fast : false
17
17
matrix :
18
- python-version : [3.9]
18
+ python-version : [3.8, 3. 9]
19
19
os : [ubuntu-20.04]
20
20
21
21
runs-on : ${{ matrix.os }}
22
22
steps :
23
- - uses : actions/checkout@v2
24
- - name : Setup Python ${{ matrix.python-version }}
25
- uses : actions/setup-python@v2
26
- with :
27
- python-version : ${{ matrix.python-version }}
28
- - name : Install dependencies for pytorch export
29
- run : |
30
- pip install .[tests,exporters,onnxruntime]
31
- - name : Test with unittest
32
- run : |
33
- HF_HOME=/tmp/ huggingface-cli download hf-internal-testing/tiny-random-gpt2
23
+ - name : Checkout code
24
+ uses : actions/checkout@v4
34
25
35
- HF_HOME=/tmp/ HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation
26
+ - name : Setup Python ${{ matrix.python-version }}
27
+ uses : actions/setup-python@v5
28
+ with :
29
+ python-version : ${{ matrix.python-version }}
36
30
37
- huggingface-cli download hf-internal-testing/tiny-random-gpt2
31
+ - name : Install dependencies for pytorch export
32
+ run : |
33
+ pip install .[tests,exporters,onnxruntime]
38
34
39
- HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation
35
+ - name : Test with pytest
36
+ run : |
37
+ HF_HOME=/tmp/ huggingface-cli download hf-internal-testing/tiny-random-gpt2
40
38
41
- pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv
39
+ HF_HOME=/tmp/ HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation
42
40
43
- HF_HUB_OFFLINE=1 pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv
41
+ huggingface-cli download hf-internal-testing/tiny-random-gpt2
42
+
43
+ HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation
44
+
45
+ pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv
46
+
47
+ HF_HUB_OFFLINE=1 pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv
0 commit comments