diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index bd615c61..16583f48 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -27,6 +27,7 @@ jobs: run: | python -m pip install --upgrade pip pip install flake8 pytest + pip install -e . if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - name: Lint with flake8 run: | @@ -34,7 +35,3 @@ jobs: - name: Test with pytest run: | pytest - - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v3 - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/aspen/modelloader.py b/aspen/modelloader.py index 49a0484d..4477d42f 100644 --- a/aspen/modelloader.py +++ b/aspen/modelloader.py @@ -122,10 +122,10 @@ def load_random_lora_7b_weight(model: LlamaModel, adapter_name, "lora_B", lora_b_weight) -def save_lora_model(model: LlamaModel, config: Dict[str, str]): +def save_lora_model(model: LlamaModel, config: Dict[str, str], dir_suffix=""): for lora_config in config["lora"]: lora_name = lora_config["name"] - lora_output_dir = lora_config["output"] + lora_output_dir = lora_config["output"] + "_" + dir_suffix if not os.path.exists(lora_output_dir): os.makedirs(lora_output_dir) diff --git a/legacy.py b/legacy.py index 23b57211..57bd542d 100644 --- a/legacy.py +++ b/legacy.py @@ -80,6 +80,6 @@ def init_lora_model(llama_model: aspen.LlamaModel): optimizer.step() if step_cnt % config["save_step"] == 0: - aspen.save_lora_model(llama_model, config) + aspen.save_lora_model(llama_model, config, f"{step_cnt}") aspen.save_lora_model(llama_model, config) diff --git a/requirements.txt b/requirements.txt index 8e348949..625740b2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ xformers transformers bitsandbytes sentencepiece +scipy