From 81a3e6d9bcd966968d2383bb03a7a4edb2dcbd9d Mon Sep 17 00:00:00 2001 From: Haodong Duan Date: Tue, 19 Mar 2024 23:15:43 +0800 Subject: [PATCH] [Improvement] Support load_dotenv (#120) * update * update * update precommit-config * update pre-commit * update * pre-commit to format code * update * support load_env * Merge branch 'main' of github.com:open-compas * update * update * update --- Quickstart.md | 22 +++++++++++++++++++++- requirements.txt | 1 + run.py | 1 + vlmeval/__init__.py | 2 ++ vlmeval/evaluate/misc.py | 2 ++ vlmeval/smp/misc.py | 21 +++++++++++++++++++++ 6 files changed, 48 insertions(+), 1 deletion(-) diff --git a/Quickstart.md b/Quickstart.md index c6adcd51c..5bf8c2720 100644 --- a/Quickstart.md +++ b/Quickstart.md @@ -4,7 +4,9 @@ Before running the evaluation script, you need to **configure** the VLMs and set After that, you can use a single script `run.py` to inference and evaluate multiple VLMs and benchmarks at a same time. -## Step0. Installation +## Step0. Installation & Setup essential keys + +**Installation. ** ```bash git clone https://github.com/open-compass/VLMEvalKit.git @@ -12,6 +14,24 @@ cd VLMEvalKit pip install -e . ``` +**Setup Keys.** + +- To infer with API models (GPT-4v, Gemini-Pro-V, etc.) or use LLM APIs as the **judge or choice extractor**, you need to first setup API keys. You can place the required keys in `$VLMEvalKit/.env` or directly set them as the environment variable. If you choose to create a `.env` file, its content will look like: + + ```bash + # The .env file, place it under $VLMEvalKit + # Alles-apin-token, for intra-org use only + ALLES= + # API Keys of Proprietary VLMs + DASHSCOPE_API_KEY= + GOOGLE_API_KEY= + OPENAI_API_KEY= + OPENAI_API_BASE= + STEPAI_API_KEY= + ``` + +- Fill the blanks with your API keys (if necessary). Those API keys will be automatically loaded when doing the inference and evaluation. + ## Step1. Configuration **VLM Configuration**: All VLMs are configured in `vlmeval/config.py`, for some VLMs, you need to configure the code root (MiniGPT-4, PandaGPT, etc.) or the model_weight root (LLaVA-v1-7B, etc.) before conducting the evaluation. During evaluation, you should use the model name specified in `supported_VLM` in `vlmeval/config.py` to select the VLM. For MiniGPT-4 and InstructBLIP, you also need to modify the config files in `vlmeval/vlm/misc` to configure LLM path and ckpt path. diff --git a/requirements.txt b/requirements.txt index 83350f42f..ffe0011f9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,7 @@ pandas>=1.5.3 pillow portalocker pycocoevalcap +python-dotenv requests rich seaborn diff --git a/run.py b/run.py index 126870346..0358fef89 100644 --- a/run.py +++ b/run.py @@ -144,4 +144,5 @@ def main(): if __name__ == '__main__': + load_env() main() diff --git a/vlmeval/__init__.py b/vlmeval/__init__.py index b8e96c287..336a61a61 100644 --- a/vlmeval/__init__.py +++ b/vlmeval/__init__.py @@ -9,3 +9,5 @@ from .utils import * from .vlm import * from .config import * + +load_env() diff --git a/vlmeval/evaluate/misc.py b/vlmeval/evaluate/misc.py index 423dce13c..d900d7f35 100644 --- a/vlmeval/evaluate/misc.py +++ b/vlmeval/evaluate/misc.py @@ -1,10 +1,12 @@ import os from vlmeval.api import OpenAIWrapper, OpenAIWrapperInternal +from vlmeval.smp import load_env INTERNAL = os.environ.get('INTERNAL', 0) def build_judge(version, **kwargs): + load_env() model_map = { 'gpt-4-turbo': 'gpt-4-1106-preview', 'gpt-4-0613': 'gpt-4-0613', diff --git a/vlmeval/smp/misc.py b/vlmeval/smp/misc.py index 2a6fa7fc0..8a0a021cd 100644 --- a/vlmeval/smp/misc.py +++ b/vlmeval/smp/misc.py @@ -147,3 +147,24 @@ def run_command(cmd): if isinstance(cmd, str): cmd = cmd.split() return subprocess.check_output(cmd) + +def load_env(): + try: + import vlmeval + except ImportError: + warnings.warn('VLMEval is not installed. Failed to import environment variables from .env file. ') + return + pth = osp.realpath(vlmeval.__path__[0]) + pth = osp.join(pth, '../.env') + pth = osp.realpath(pth) + if not osp.exists(pth): + warnings.warn(f'Did not detect the .env file at {pth}, failed to load. ') + return + + from dotenv import dotenv_values + values = dotenv_values(pth) + for k, v in values.items(): + if v is not None and len(v): + os.environ[k] = v + print(f'API Keys successfully loaded from {pth}') + return