fix(materialize): onnx loading with torch model available (#134) #304
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: CI | |
on: | |
pull_request: | |
branches: [main] | |
types: [opened, synchronize] | |
push: | |
branches: [main] | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.ref }} | |
cancel-in-progress: true | |
jobs: | |
pytest: | |
runs-on: ubuntu-latest | |
steps: | |
- uses: actions/checkout@v3 | |
- uses: ./.github/actions/setup | |
with: | |
mode: all | |
- name: Test Doc | |
run: | | |
cd doc && python bug_summary.py | |
- name: Test core | |
run: | | |
pytest -x tests/core | |
- name: Test PyTorch | |
run: | | |
pip install -r requirements/sys/torch.txt --pre --upgrade | |
pip install -r requirements/sys/onnx.txt --pre --upgrade | |
pip install -r requirements/sys/tvm.txt --pre --upgrade | |
pip install -r requirements/sys/onnxruntime.txt --pre --upgrade | |
pytest -x tests/torch | |
yes | python nnsmith/cli/model_gen.py debug.viz=true model.type=torch mgen.method=symbolic | |
yes | python nnsmith/cli/model_gen.py debug.viz=true model.type=torch mgen.method=symbolic-cinit | |
yes | python nnsmith/cli/model_gen.py debug.viz=true model.type=torch backend.type="pt2 backend@inductor" mgen.method=concolic | |
yes | python nnsmith/cli/model_gen.py model.type=torch mgen.method=symbolic-cinit mgen.rank_choices="[4]" mgen.dtype_choices="[f32]" mgen.include="[core.NCHWConv2d, core.ReLU]" mgen.patch_requires=./tests/mock/requires_patch.py | |
yes | python nnsmith/cli/model_gen.py model.type=torch mgen.method=symbolic-cinit mgen.rank_choices="[4]" mgen.dtype_choices="[f32]" mgen.include="[core.NCHWConv2d, core.ReLU]" mgen.patch_requires=./tests/mock/requires_patch.py backend.type=torchjit | |
yes | python nnsmith/cli/model_gen.py model.type=torch mgen.method=symbolic-cinit mgen.rank_choices="[4]" mgen.dtype_choices="[f32]" mgen.include="[core.NCHWConv2d, core.ReLU]" mgen.patch_requires=./tests/mock/requires_patch.py backend.type=torchjit mgen.grad_check=true | |
yes | python nnsmith/cli/model_gen.py model.type=torch mgen.method=symbolic-cinit mgen.rank_choices="[4]" mgen.dtype_choices="[f32]" mgen.include="[core.NCHWConv2d, core.ReLU]" mgen.patch_requires=./tests/mock/requires_patch.py backend.type=pt2 mgen.grad_check=true | |
- name: Test ONNX + ONNXRuntime | |
run: | | |
pytest -x tests/onnxruntime | |
yes | python nnsmith/cli/model_gen.py model.type=onnx mgen.method=symbolic | |
yes | python nnsmith/cli/model_gen.py model.type=onnx backend.type=onnxruntime mgen.method=concolic | |
python nnsmith/cli/model_exec.py model.type=onnx backend.type=onnxruntime model.path=nnsmith_output/model.onnx | |
- name: Test ONNX + TVM | |
run: | | |
pytest -x tests/tvm | |
- name: Test ONNX + TRT | |
run: | | |
pytest -x tests/tensorrt | |
- name: Test TensorFlow | |
run: | | |
pip install -r requirements/sys/tensorflow.txt --pre --upgrade | |
pytest -x tests/tensorflow --log-cli-level=DEBUG | |
yes | python nnsmith/cli/model_gen.py model.type=tensorflow mgen.method=symbolic | |
python nnsmith/cli/model_exec.py model.type=tensorflow backend.type=xla model.path=nnsmith_output/model/ | |
yes | python nnsmith/cli/model_gen.py model.type=tensorflow mgen.method=concolic | |
python nnsmith/cli/model_exec.py model.type=tensorflow backend.type=xla model.path=nnsmith_output/model/ | |
yes | python nnsmith/cli/fuzz.py fuzz.time=10s fuzz.root=fuzz_report model.type=tensorflow backend.type=xla filter.type="[nan,inf,test_fn,test_cls]" filter.patch=./tests/mock/filter_patch.py |