Skip to content

[Optimize] 1.5x times faster training of MLP #74

[Optimize] 1.5x times faster training of MLP

[Optimize] 1.5x times faster training of MLP #74

Workflow file for this run

name: CI
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
test:
name: ${{ matrix.lisp }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
lisp: [sbcl-bin]
os: [ubuntu-latest]
target:
- normal
archcpu:
- AVX2
steps:
- uses: actions/checkout@v1
- name: Setting up environments
env:
LISP: ${{ matrix.lisp }}
run: |
curl -L https://raw.githubusercontent.com/roswell/roswell/master/scripts/install-for-ci.sh | sh
- name: Update $PATH
run: |
echo $PATH
echo "PATH=$HOME/bin:$PATH" >> $GITHUB_ENV
- name: Check $PATH
run: echo $PATH
- name: Downloading OpenBLAS
run: |
sudo apt install libblas-dev
- name: Operating tests
run: |
ros config set dynamic-space-size 4gb
ros --eval '(progn (defparameter cl-user::*cl-waffe-config* `((:libblas "libblas.so"))))' ./roswell/cl-waffe2-test.ros
ros --eval '(progn (defparameter cl-user::*cl-waffe-config* `((:libblas "libblas.so"))))' ./roswell/cl-waffe2-test.ros vm-test