diff --git a/.github/workflows/docker/hpu.dockerfile b/.github/workflows/docker/hpu.dockerfile deleted file mode 100644 index b400ef8b..00000000 --- a/.github/workflows/docker/hpu.dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM vault.habana.ai/gaudi-docker/1.18.0/ubuntu22.04/habanalabs/pytorch-installer-2.4.0:latest as hpu - -ENV LANG=en_US.UTF-8 -ENV PYTHONPATH=/root:/usr/lib/habanalabs/ -ARG REPO=https://github.com/intel/genaieval.git -ARG REPO_PATH="" -ARG BRANCH=main - -RUN apt-get update && \ - apt-get install git-lfs && \ - git-lfs install - -# Download code -SHELL ["/bin/bash", "--login", "-c"] -RUN mkdir -p /genaieval -COPY ${REPO_PATH} /genaieval -RUN if [ "$REPO_PATH" == "" ]; then rm -rf /genaieval/* && rm -rf /genaieval/.* ; git clone --single-branch --branch=${BRANCH} ${REPO} /genaieval ; fi - -# Build From Source -RUN pip install --upgrade pip setuptools==69.5.1 -RUN cd /genaieval && \ - pip install -r requirements.txt && \ - python setup.py install && \ - pip install --upgrade-strategy eager optimum[habana] && \ - pip list - -WORKDIR /genaieval/ diff --git a/docker/hpu.dockerfile b/docker/hpu.dockerfile index 294eee1d..5f3863e7 100644 --- a/docker/hpu.dockerfile +++ b/docker/hpu.dockerfile @@ -19,7 +19,7 @@ RUN pip install --upgrade pip setuptools==69.5.1 # Build From Source RUN cd /GenAIEval && \ - pip install -r requirements.txt && \ + pip install -r requirements_hpu.txt && \ python setup.py install && \ pip list diff --git a/evals/evaluation/lm_evaluation_harness/lm_eval/models/huggingface.py b/evals/evaluation/lm_evaluation_harness/lm_eval/models/huggingface.py index bc1868a6..7098ec4b 100644 --- a/evals/evaluation/lm_evaluation_harness/lm_eval/models/huggingface.py +++ b/evals/evaluation/lm_evaluation_harness/lm_eval/models/huggingface.py @@ -1176,11 +1176,14 @@ def __init__(self, *args, **kwargs): else: self.static_shapes = False + # TODO + """ if self.static_shapes: print("use hpu graphs.") from habana_frameworks.torch.hpu import wrap_in_hpu_graph self._model = wrap_in_hpu_graph(self._model) + """ print("lm-eval warmup starting for Gaudi.") self.warm_up() diff --git a/requirements_hpu.txt b/requirements_hpu.txt new file mode 100644 index 00000000..a27fd635 --- /dev/null +++ b/requirements_hpu.txt @@ -0,0 +1,23 @@ +bigcode-eval@git+https://github.com/bigcode-project/bigcode-evaluation-harness.git +click +deepdiff +deepeval==1.4.0 +docker +evaluate +flask +jieba +kubernetes +langchain_community +langchain_huggingface +lm-eval==0.4.3 +locust +numpy < 2.0 +optimum-habana +prometheus_client +pytest +pyyaml +ragas==v0.1.19 +requests +rouge_score +sseclient-py +transformers diff --git a/tests/test_lm_eval.py b/tests/test_lm_eval.py index bc9c21a6..2523ed39 100644 --- a/tests/test_lm_eval.py +++ b/tests/test_lm_eval.py @@ -13,12 +13,9 @@ class TestLMEval(unittest.TestCase): def test_lm_eval(self): model_name_or_path = "facebook/opt-125m" - user_model = AutoModelForCausalLM.from_pretrained(model_name_or_path) - tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) args = LMEvalParser( model="hf", - user_model=user_model, - tokenizer=tokenizer, + model_args=f"pretrained={model_name_or_path}", tasks="lambada_openai", device="cpu", batch_size=1,