Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
175 changes: 163 additions & 12 deletions comps/guardrails/src/guardrails/requirements-cpu.txt

Large diffs are not rendered by default.

219 changes: 182 additions & 37 deletions comps/guardrails/src/guardrails/requirements-gpu.txt

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions comps/image2image/src/Dockerfile.intel_hpu
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ ENV PYTHONPATH=/home/user:/usr/lib/habanalabs/:/home/user/optimum-habana

# Install requirements and optimum habana, numpy==1.23.5 required by vault.habana.ai/gaudi-docker/1.20.1/ubuntu22.04/habanalabs/pytorch-installer-2.6.0
ARG uvpip='uv pip install --system --no-cache-dir'
RUN pip install --no-cache-dir --upgrade pip setuptools && \
RUN pip install --no-cache-dir --upgrade pip setuptools uv && \
$uvpip -r /home/user/comps/image2image/src/requirements-cpu.txt && \
$uvpip numpy==1.23.5 && \
$uvpip numpy==1.26.4 && \
$uvpip optimum[habana]

USER user
Expand Down
6 changes: 3 additions & 3 deletions comps/llms/src/text-generation/Dockerfile.intel_hpu
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ COPY comps /home/user/comps

ARG uvpip='uv pip install --system --no-cache-dir'
RUN pip install --no-cache-dir --upgrade pip setuptools uv && \
pip install --no-cache-dir --upgrade-strategy eager optimum[habana] && \
$uvpip git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0
pip install --no-cache-dir optimum-habana && \
pip install --no-cache-dir git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0

RUN git clone ${REPO} --depth 1 --branch ${REPO_VER} /home/user/optimum-habana && rm -rf /home/user/optimum-habana/.git

Expand All @@ -34,7 +34,7 @@ RUN $uvpip -r requirements.txt

WORKDIR /home/user/comps/llms/src/text-generation/
RUN $uvpip -r requirements.txt && \
$uvpip --upgrade --force-reinstall pydantic numpy==1.25
$uvpip --upgrade --force-reinstall pydantic numpy==1.26.4 transformers==4.49.0

ENV PYTHONPATH=/root:/home/user
ENV HABANA_LOGS=/home/user/logs
Expand Down
5 changes: 3 additions & 2 deletions comps/llms/src/text-generation/Dockerfile.intel_hpu_phi4
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,14 @@ COPY comps /home/user/comps
ARG uvpip='uv pip install --system --no-cache-dir'
RUN pip install --no-cache-dir --upgrade pip setuptools uv && \
pip install --no-cache-dir --upgrade-strategy eager optimum[habana] && \
$uvpip git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0
pip install --no-cache-dir git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0

RUN $uvpip git+https://github.com/huggingface/optimum-habana.git@transformers_future

WORKDIR /home/user/comps/llms/src/text-generation
RUN $uvpip -r requirements.txt && \
$uvpip soundfile peft backoff
$uvpip soundfile peft backoff && \
$uvpip numpy==1.26.4

ENV PYTHONPATH=/root:/home/user
ENV HABANA_LOGS=/home/user/logs
Expand Down
3 changes: 2 additions & 1 deletion comps/router/src/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@ COPY comps /home/user/comps
ARG uvpip='uv pip install --system --no-cache-dir'
RUN pip install --no-cache-dir --upgrade pip uv && \
$uvpip -r /home/user/comps/router/src/requirements.txt && \
$uvpip torch --index-url https://download.pytorch.org/whl/cpu && \
git clone --depth 1 https://github.com/lm-sys/RouteLLM.git /tmp/RouteLLM && \
patch -p1 -d /tmp/RouteLLM < /home/user/comps/router/src/hf_compatibility.patch && \
$uvpip /tmp/RouteLLM && rm -rf /tmp/RouteLLM
$uvpip /tmp/RouteLLM && rm -rf /tmp/RouteLLM

# Make imports work
ENV PYTHONPATH=/home/user
Expand Down
4 changes: 2 additions & 2 deletions comps/text2cypher/src/Dockerfile.intel_hpu
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@ COPY comps /root/comps

ARG uvpip='uv pip install --system --no-cache-dir'
RUN pip install --no-cache-dir --upgrade pip setuptools uv && \
pip install --no-cache-dir --upgrade-strategy eager optimum[habana] && \
pip install --no-cache-dir optimum-habana && \
$uvpip git+https://github.com/HabanaAI/DeepSpeed.git@1.19.0

RUN git clone --depth 1 --branch ${REPO_VER} ${REPO}

WORKDIR /root/comps/text2cypher/src
RUN $uvpip -r requirements-cpu.txt && \
$uvpip --upgrade --force-reinstall pydantic numpy==1.26.3
$uvpip --upgrade --force-reinstall pydantic numpy==1.26.3 transformers==4.49.0

# Set environment variables
ENV PYTHONPATH=/root:/usr/lib/habanalabs/:/root/optimum-habana
Expand Down
44 changes: 15 additions & 29 deletions comps/text2cypher/src/requirements-cpu.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# This file was autogenerated by uv via the following command:
# uv pip compile --index-strategy unsafe-best-match ./comps/text2cypher/src/requirements.in --universal -o ./comps/text2cypher/src/requirements-cpu.txt
accelerate==1.7.0
accelerate==1.8.0
# via
# -r ./comps/text2cypher/src/requirements.in
# peft
Expand Down Expand Up @@ -85,6 +85,7 @@ deprecated==1.2.18
# via
# banks
# llama-index-core
# llama-index-instrumentation
dirtyjson==1.0.8
# via llama-index-core
distro==1.9.0
Expand All @@ -102,7 +103,6 @@ fastapi==0.115.13
filelock==3.18.0
# via
# huggingface-hub
# torch
# transformers
filetype==1.2.0
# via
Expand All @@ -116,7 +116,6 @@ fsspec==2025.5.1
# via
# huggingface-hub
# llama-index-core
# torch
googleapis-common-protos==1.70.0
# via
# opentelemetry-exporter-otlp-proto-grpc
Expand Down Expand Up @@ -173,14 +172,13 @@ jaxlib==0.6.2
jinja2==3.1.6
# via
# banks
# torch
jiter==0.10.0
# via openai
joblib==1.5.1
# via
# nltk
# scikit-learn
json-repair==0.46.2
json-repair==0.47.1
# via -r ./comps/text2cypher/src/requirements.in
jsonpatch==1.33
# via langchain-core
Expand Down Expand Up @@ -224,15 +222,15 @@ llama-cloud==0.1.26
# llama-index-indices-managed-llama-cloud
llama-cloud-services==0.6.34
# via llama-parse
llama-index==0.12.42
llama-index==0.12.43
# via -r ./comps/text2cypher/src/requirements.in
llama-index-agent-openai==0.4.11
# via
# llama-index
# llama-index-program-openai
llama-index-cli==0.4.3
# via llama-index
llama-index-core==0.12.42
llama-index-core==0.12.43
# via
# llama-cloud-services
# llama-index
Expand Down Expand Up @@ -263,6 +261,8 @@ llama-index-graph-stores-neo4j==0.4.6
# via -r ./comps/text2cypher/src/requirements.in
llama-index-indices-managed-llama-cloud==0.7.7
# via llama-index
llama-index-instrumentation==0.1.0
# via llama-index-workflows
llama-index-llms-huggingface==0.5.0
# via -r ./comps/text2cypher/src/requirements.in
llama-index-llms-huggingface-api==0.5.0
Expand All @@ -287,6 +287,8 @@ llama-index-readers-file==0.4.8
# via llama-index
llama-index-readers-llama-parse==0.4.0
# via llama-index
llama-index-workflows==0.2.2
# via llama-index-core
llama-parse==0.6.34
# via llama-index-readers-llama-parse
lxml==5.4.0
Expand Down Expand Up @@ -330,12 +332,10 @@ nest-asyncio==1.6.0
networkx==3.4.2 ; python_full_version < '3.11'
# via
# llama-index-core
# torch
# trimesh
networkx==3.5 ; python_full_version >= '3.11'
# via
# llama-index-core
# torch
# trimesh
nltk==3.9.1
# via
Expand Down Expand Up @@ -486,6 +486,8 @@ pydantic==2.11.7
# llama-cloud
# llama-cloud-services
# llama-index-core
# llama-index-instrumentation
# llama-index-workflows
# openai
# pydantic-settings
# unstructured-client
Expand Down Expand Up @@ -586,8 +588,9 @@ sentence-transformers==4.1.0
# via
# -r ./comps/text2cypher/src/requirements.in
# llama-index-embeddings-huggingface
setuptools==80.9.0 ; python_full_version >= '3.12'
# via torch
setuptools==80.9.0
# via
# llama-index-core
shapely==2.1.1
# via trimesh
shortuuid==1.0.13
Expand Down Expand Up @@ -616,8 +619,6 @@ striprtf==0.0.26
# via llama-index-readers-file
svg-path==6.3
# via trimesh
sympy==1.13.1
# via torch
tenacity==9.1.2
# via
# langchain-community
Expand All @@ -631,20 +632,6 @@ tokenizers==0.21.1
# via
# langchain-huggingface
# transformers
torch==2.6.0 ; sys_platform == 'darwin'
# via
# accelerate
# llama-index-llms-huggingface
# peft
# sentence-transformers
# transformers
torch==2.6.0+cpu ; sys_platform != 'darwin'
# via
# accelerate
# llama-index-llms-huggingface
# peft
# sentence-transformers
# transformers
tqdm==4.67.1
# via
# huggingface-hub
Expand Down Expand Up @@ -693,7 +680,6 @@ typing-extensions==4.14.0
# rich
# sentence-transformers
# sqlalchemy
# torch
# typing-inspect
# typing-inspection
# unstructured
Expand All @@ -713,7 +699,7 @@ unstructured==0.17.2
# via -r ./comps/text2cypher/src/requirements.in
unstructured-client==0.36.0
# via unstructured
urllib3==2.4.0
urllib3==2.5.0
# via
# -r ./comps/text2cypher/src/requirements.in
# requests
Expand Down
3 changes: 1 addition & 2 deletions comps/third_parties/speecht5/src/Dockerfile.intel_hpu
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ COPY --chown=user:user comps /home/user/comps
ARG uvpip='uv pip install --system --no-cache-dir'
RUN pip install --no-cache-dir --upgrade pip setuptools uv && \
$uvpip -r /home/user/comps/third_parties/speecht5/src/requirements-cpu.txt && \
$uvpip numpy==1.23.5 && \
$uvpip --upgrade transformers && \
$uvpip numpy==1.26.4 && \
$uvpip optimum[habana]

ENV PYTHONPATH=$PYTHONPATH:/home/user
Expand Down
3 changes: 2 additions & 1 deletion comps/third_parties/video-llama/src/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@ RUN mkdir /home/user/model && chown user:user -R /home/user/model
COPY --chown=user:user comps /home/user/comps
WORKDIR /home/user/comps/third_parties/video-llama/src

# install the fixed torch version again after install cpu requirements, make sure the deps are compatible
ARG uvpip='uv pip install --system --no-cache-dir'
RUN pip install --no-cache-dir --upgrade pip setuptools uv && \
if [ ${ARCH} = "cpu" ]; then \
$uvpip torch==2.5.1 torchaudio~=2.5.1 torchvision==0.20.1 --index-url https://download.pytorch.org/whl/cpu; \
$uvpip -r /home/user/comps/third_parties/video-llama/src/requirements-cpu.txt; \
$uvpip torch==2.5.1 torchvision~=2.5.1 torchaudio==0.20.1 --index-url https://download.pytorch.org/whl/cpu; \
else \
$uvpip -r /home/user/comps/third_parties/video-llama/src/requirements-gpu.txt; \
fi
Expand Down
Loading
Loading