From ad925a17cf051128a64c54cd0cc1ba05b305aee3 Mon Sep 17 00:00:00 2001 From: Travis Cline Date: Mon, 27 Nov 2023 23:35:19 -0800 Subject: [PATCH 1/5] Simplify requirements file As of ed0997173f98eaf8f4edf7ba5fe8f15c6b877fd3 the pytorch 1 requirements/support has been pulled (or at least undocumented). This simplifies specification/management of the pip requirements --- .github/workflows/test-build.yaml | 5 ++--- .gitignore | 5 ++--- Makefile | 9 +++++++++ README.md | 6 +++--- pyproject.toml | 2 +- requirements/pt2.txt => requirements.txt | 0 6 files changed, 17 insertions(+), 10 deletions(-) create mode 100644 Makefile rename requirements/pt2.txt => requirements.txt (100%) diff --git a/.github/workflows/test-build.yaml b/.github/workflows/test-build.yaml index 8aabe376..698fdb36 100644 --- a/.github/workflows/test-build.yaml +++ b/.github/workflows/test-build.yaml @@ -13,7 +13,6 @@ jobs: fail-fast: false matrix: python-version: ["3.8", "3.10"] - requirements-file: ["pt2", "pt13"] steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} @@ -23,5 +22,5 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -r requirements/${{ matrix.requirements-file }}.txt - pip install . \ No newline at end of file + pip install -r requirements.txt + pip install . diff --git a/.gitignore b/.gitignore index 5506c38d..4abb5b6e 100644 --- a/.gitignore +++ b/.gitignore @@ -3,12 +3,11 @@ *.py[cod] # envs -.pt13 -.pt2 +.venv # directories /checkpoints /dist /outputs /build -/src \ No newline at end of file +/src diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..63611d99 --- /dev/null +++ b/Makefile @@ -0,0 +1,9 @@ +# Contains helpful make targets for development + +.venv: requirements.txt ## Create a virtual environment and install dependencies + python3 -m venv --clear .venv + .venv/bin/pip install -r requirements.txt + +.PHONY: clean +clean: ## Remove the virtual environment + @rm -rf .venv diff --git a/README.md b/README.md index 61fa687b..ad4afced 100644 --- a/README.md +++ b/README.md @@ -99,9 +99,9 @@ This is assuming you have navigated to the `generative-models` root after clonin ```shell # install required packages from pypi -python3 -m venv .pt2 -source .pt2/bin/activate -pip3 install -r requirements/pt2.txt +python3 -m venv .venv +source .venv/bin/activate +pip3 install -r requirements.txt ``` #### 3. Install `sgm` diff --git a/pyproject.toml b/pyproject.toml index 2cc50216..89c38d1e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,6 +43,6 @@ dependencies = [ [tool.hatch.envs.ci.scripts] test-inference = [ "pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.2+cu118 --index-url https://download.pytorch.org/whl/cu118", - "pip install -r requirements/pt2.txt", + "pip install -r requirements.txt", "pytest -v tests/inference/test_inference.py {args}", ] diff --git a/requirements/pt2.txt b/requirements.txt similarity index 100% rename from requirements/pt2.txt rename to requirements.txt From d38b43d134a35ab3ef6b5ce0675788f4c6c47eaa Mon Sep 17 00:00:00 2001 From: Travis Cline Date: Tue, 28 Nov 2023 00:15:18 -0800 Subject: [PATCH 2/5] requirements: Adopt pip-compile --- Makefile | 20 ++ requirements.in | 39 ++ requirements.txt | 452 ++++++++++++++++++++++-- scripts/Dockerfile.compile-requirements | 14 + 4 files changed, 496 insertions(+), 29 deletions(-) create mode 100644 requirements.in create mode 100644 scripts/Dockerfile.compile-requirements diff --git a/Makefile b/Makefile index 63611d99..7aceb615 100644 --- a/Makefile +++ b/Makefile @@ -2,8 +2,28 @@ .venv: requirements.txt ## Create a virtual environment and install dependencies python3 -m venv --clear .venv + .venv/bin/pip install wheel pip-tools + .venv/bin/pip-compile requirements.in .venv/bin/pip install -r requirements.txt +.PHONY: compile-requirements +compile-requirements: .venv ## Compile requirements.in to requirements.txt + .venv/bin/pip-compile requirements.in + .venv/bin/pip install -r requirements.txt + +.PHONY: compile-requirements-linux +compile-requirements-linux: ## Compile requirements.in to requirements.txt (in a linux container) + # Build the docker image + docker build --platform=linux/amd64 \ + -t sd-compile-requirements \ + -f scripts/Dockerfile.compile-requirements \ + . + # Run the docker image (to copy the requirements.txt file out) + docker run --platform=linux/amd64 \ + -v $(PWD):/app \ + -t sd-compile-requirements \ + cp /tmp/requirements.txt requirements.txt + .PHONY: clean clean: ## Remove the virtual environment @rm -rf .venv diff --git a/requirements.in b/requirements.in new file mode 100644 index 00000000..1946d42a --- /dev/null +++ b/requirements.in @@ -0,0 +1,39 @@ +black==23.7.0 +chardet==5.1.0 +clip @ git+https://github.com/openai/CLIP.git +einops>=0.6.1 +fairscale>=0.4.13 +fire>=0.5.0 +fsspec>=2023.6.0 +invisible-watermark>=0.2.0 +kornia==0.6.9 +matplotlib>=3.7.2 +natsort>=8.4.0 +ninja>=1.11.1 +numpy>=1.24.4 +omegaconf>=2.3.0 +open-clip-torch>=2.20.0 +opencv-python==4.6.0.66 +pandas>=2.0.3 +pillow>=9.5.0 +pudb>=2022.1.3 +pytorch-lightning==2.0.1 +pyyaml>=6.0.1 +scipy>=1.10.1 +streamlit>=0.73.1 +tensorboardx==2.6 +timm>=0.9.2 +tokenizers==0.12.1 +torch>=2.0.1 +torchaudio>=2.0.2 +torchdata==0.6.1 +torchmetrics>=1.0.1 +torchvision>=0.15.2 +tqdm>=4.65.0 +transformers==4.19.1 +triton==2.0.0; sys_platform == "linux" +urllib3<1.27,>=1.25.4 +wandb>=0.15.6 +webdataset>=0.2.33 +wheel>=0.41.0 +xformers>=0.0.20 diff --git a/requirements.txt b/requirements.txt index 003a5264..fb700dab 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,39 +1,433 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile requirements.in +# +aiohttp==3.9.1 + # via fsspec +aiosignal==1.3.1 + # via aiohttp +altair==5.2.0 + # via streamlit +antlr4-python3-runtime==4.9.3 + # via omegaconf +appdirs==1.4.4 + # via wandb +async-timeout==4.0.3 + # via aiohttp +attrs==23.1.0 + # via + # aiohttp + # jsonschema + # referencing black==23.7.0 + # via -r requirements.in +blinker==1.7.0 + # via streamlit +braceexpand==0.1.7 + # via webdataset +cachetools==5.3.2 + # via streamlit +certifi==2023.11.17 + # via + # requests + # sentry-sdk chardet==5.1.0 + # via -r requirements.in +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via + # black + # streamlit + # wandb clip @ git+https://github.com/openai/CLIP.git -einops>=0.6.1 -fairscale>=0.4.13 -fire>=0.5.0 -fsspec>=2023.6.0 -invisible-watermark>=0.2.0 + # via -r requirements.in +cmake==3.27.7 + # via triton +contourpy==1.2.0 + # via matplotlib +cycler==0.12.1 + # via matplotlib +docker-pycreds==0.4.0 + # via wandb +einops==0.7.0 + # via -r requirements.in +fairscale==0.4.13 + # via -r requirements.in +filelock==3.13.1 + # via + # huggingface-hub + # torch + # transformers + # triton +fire==0.5.0 + # via -r requirements.in +fonttools==4.45.1 + # via matplotlib +frozenlist==1.4.0 + # via + # aiohttp + # aiosignal +fsspec[http]==2023.10.0 + # via + # -r requirements.in + # huggingface-hub + # pytorch-lightning +ftfy==6.1.3 + # via + # clip + # open-clip-torch +gitdb==4.0.11 + # via gitpython +gitpython==3.1.40 + # via + # streamlit + # wandb +huggingface-hub==0.19.4 + # via + # open-clip-torch + # timm + # transformers +idna==3.6 + # via + # requests + # yarl +importlib-metadata==6.8.0 + # via streamlit +invisible-watermark==0.2.0 + # via -r requirements.in +jedi==0.19.1 + # via pudb +jinja2==3.1.2 + # via + # altair + # pydeck + # torch +jsonschema==4.20.0 + # via altair +jsonschema-specifications==2023.11.1 + # via jsonschema +kiwisolver==1.4.5 + # via matplotlib kornia==0.6.9 -matplotlib>=3.7.2 -natsort>=8.4.0 -ninja>=1.11.1 -numpy>=1.24.4 -omegaconf>=2.3.0 -open-clip-torch>=2.20.0 + # via -r requirements.in +lightning-utilities==0.10.0 + # via + # pytorch-lightning + # torchmetrics +lit==17.0.5 + # via triton +markdown-it-py==3.0.0 + # via rich +markupsafe==2.1.3 + # via jinja2 +matplotlib==3.8.2 + # via -r requirements.in +mdurl==0.1.2 + # via markdown-it-py +mpmath==1.3.0 + # via sympy +multidict==6.0.4 + # via + # aiohttp + # yarl +mypy-extensions==1.0.0 + # via black +natsort==8.4.0 + # via -r requirements.in +networkx==3.2.1 + # via torch +ninja==1.11.1.1 + # via -r requirements.in +numpy==1.26.2 + # via + # -r requirements.in + # altair + # contourpy + # fairscale + # invisible-watermark + # matplotlib + # opencv-python + # pandas + # pyarrow + # pydeck + # pytorch-lightning + # pywavelets + # scipy + # streamlit + # tensorboardx + # torchmetrics + # torchvision + # transformers + # webdataset + # xformers +nvidia-cublas-cu11==11.10.3.66 + # via + # nvidia-cudnn-cu11 + # nvidia-cusolver-cu11 + # torch +nvidia-cuda-cupti-cu11==11.7.101 + # via torch +nvidia-cuda-nvrtc-cu11==11.7.99 + # via torch +nvidia-cuda-runtime-cu11==11.7.99 + # via torch +nvidia-cudnn-cu11==8.5.0.96 + # via torch +nvidia-cufft-cu11==10.9.0.58 + # via torch +nvidia-curand-cu11==10.2.10.91 + # via torch +nvidia-cusolver-cu11==11.4.0.1 + # via torch +nvidia-cusparse-cu11==11.7.4.91 + # via torch +nvidia-nccl-cu11==2.14.3 + # via torch +nvidia-nvtx-cu11==11.7.91 + # via torch +omegaconf==2.3.0 + # via -r requirements.in +open-clip-torch==2.23.0 + # via -r requirements.in opencv-python==4.6.0.66 -pandas>=2.0.3 -pillow>=9.5.0 -pudb>=2022.1.3 + # via + # -r requirements.in + # invisible-watermark +packaging==23.2 + # via + # altair + # black + # huggingface-hub + # kornia + # lightning-utilities + # matplotlib + # pudb + # pytorch-lightning + # streamlit + # tensorboardx + # transformers +pandas==2.1.3 + # via + # -r requirements.in + # altair + # streamlit +parso==0.8.3 + # via jedi +pathspec==0.11.2 + # via black +pillow==10.1.0 + # via + # -r requirements.in + # invisible-watermark + # matplotlib + # streamlit + # torchvision +platformdirs==4.0.0 + # via black +protobuf==3.20.3 + # via + # open-clip-torch + # streamlit + # tensorboardx + # wandb +psutil==5.9.6 + # via wandb +pudb==2023.1 + # via -r requirements.in +pyarrow==14.0.1 + # via streamlit +pydeck==0.8.1b0 + # via streamlit +pygments==2.17.2 + # via + # pudb + # rich +pyparsing==3.1.1 + # via matplotlib +python-dateutil==2.8.2 + # via + # matplotlib + # pandas + # streamlit pytorch-lightning==2.0.1 -pyyaml>=6.0.1 -scipy>=1.10.1 -streamlit>=0.73.1 + # via -r requirements.in +pytz==2023.3.post1 + # via pandas +pywavelets==1.5.0 + # via invisible-watermark +pyyaml==6.0.1 + # via + # -r requirements.in + # huggingface-hub + # omegaconf + # pytorch-lightning + # timm + # transformers + # wandb + # webdataset +referencing==0.31.0 + # via + # jsonschema + # jsonschema-specifications +regex==2023.10.3 + # via + # clip + # open-clip-torch + # transformers +requests==2.31.0 + # via + # fsspec + # huggingface-hub + # streamlit + # torchdata + # torchvision + # transformers + # wandb +rich==13.7.0 + # via streamlit +rpds-py==0.13.1 + # via + # jsonschema + # referencing +safetensors==0.4.1 + # via timm +scipy==1.11.4 + # via -r requirements.in +sentencepiece==0.1.99 + # via open-clip-torch +sentry-sdk==1.37.1 + # via wandb +setproctitle==1.3.3 + # via wandb +six==1.16.0 + # via + # docker-pycreds + # fire + # python-dateutil +smmap==5.0.1 + # via gitdb +streamlit==1.28.2 + # via -r requirements.in +sympy==1.12 + # via torch +tenacity==8.2.3 + # via streamlit tensorboardx==2.6 -timm>=0.9.2 + # via -r requirements.in +termcolor==2.3.0 + # via fire +timm==0.9.12 + # via + # -r requirements.in + # open-clip-torch tokenizers==0.12.1 -torch>=2.0.1 -torchaudio>=2.0.2 + # via + # -r requirements.in + # transformers +toml==0.10.2 + # via streamlit +tomli==2.0.1 + # via black +toolz==0.12.0 + # via altair +torch==2.0.1 + # via + # -r requirements.in + # clip + # fairscale + # invisible-watermark + # kornia + # open-clip-torch + # pytorch-lightning + # timm + # torchaudio + # torchdata + # torchmetrics + # torchvision + # triton + # xformers +torchaudio==2.0.2 + # via -r requirements.in torchdata==0.6.1 -torchmetrics>=1.0.1 -torchvision>=0.15.2 -tqdm>=4.65.0 + # via -r requirements.in +torchmetrics==1.2.0 + # via + # -r requirements.in + # pytorch-lightning +torchvision==0.15.2 + # via + # -r requirements.in + # clip + # open-clip-torch + # timm +tornado==6.3.3 + # via streamlit +tqdm==4.66.1 + # via + # -r requirements.in + # clip + # huggingface-hub + # open-clip-torch + # pytorch-lightning + # transformers transformers==4.19.1 -triton==2.0.0 -urllib3<1.27,>=1.25.4 -wandb>=0.15.6 -webdataset>=0.2.33 -wheel>=0.41.0 -xformers>=0.0.20 + # via -r requirements.in +triton==2.0.0 ; sys_platform == "linux" + # via + # -r requirements.in + # torch +typing-extensions==4.8.0 + # via + # altair + # huggingface-hub + # lightning-utilities + # pytorch-lightning + # streamlit + # torch +tzdata==2023.3 + # via pandas +tzlocal==5.2 + # via streamlit +urllib3==1.26.18 + # via + # -r requirements.in + # requests + # sentry-sdk + # torchdata +urwid==2.2.3 + # via + # pudb + # urwid-readline +urwid-readline==0.13 + # via pudb +validators==0.22.0 + # via streamlit +wandb==0.16.0 + # via -r requirements.in +watchdog==3.0.0 + # via streamlit +wcwidth==0.2.12 + # via ftfy +webdataset==0.2.79 + # via -r requirements.in +wheel==0.42.0 + # via + # -r requirements.in + # nvidia-cublas-cu11 + # nvidia-cuda-cupti-cu11 + # nvidia-cuda-runtime-cu11 + # nvidia-curand-cu11 + # nvidia-cusparse-cu11 + # nvidia-nvtx-cu11 +xformers==0.0.22 + # via -r requirements.in +yarl==1.9.3 + # via aiohttp +zipp==3.17.0 + # via importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/scripts/Dockerfile.compile-requirements b/scripts/Dockerfile.compile-requirements new file mode 100644 index 00000000..0d3bb99a --- /dev/null +++ b/scripts/Dockerfile.compile-requirements @@ -0,0 +1,14 @@ +# Dockerfile for compiling requirements.txt +# See top level Makefile for more details on how to use this file. +FROM python:3.10 + +WORKDIR /app + +RUN pip install -U pip +RUN pip install wheel pip-tools + +COPY requirements.in /app/requirements.in +COPY requirements.txt /app/requirements.txt +COPY Makefile /app/Makefile +RUN make .venv +RUN cp -r requirements.txt /tmp/requirements.txt From e7fe2d09cae7b93c6c70c1baf461f91c7dfa9a20 Mon Sep 17 00:00:00 2001 From: Travis Cline Date: Tue, 28 Nov 2023 01:30:17 -0800 Subject: [PATCH 3/5] requirements: Iterate on requirements compilation --- Makefile | 44 ++++++++++++++++++++----- pyproject.toml | 1 - requirements.in | 1 + scripts/Dockerfile.compile-requirements | 35 ++++++++++++++++++-- 4 files changed, 69 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index 7aceb615..9f93e3af 100644 --- a/Makefile +++ b/Makefile @@ -1,20 +1,27 @@ # Contains helpful make targets for development +UNAME_S?=$(shell uname -s) +REQUIREMENTS_FILE?=requirements.txt +CUDA_DOCKER_VERSION?=11.8.0 +ifeq ($(UNAME_S), Darwin) + REQUIREMENTS_FILE=requirements-macos.txt +endif -.venv: requirements.txt ## Create a virtual environment and install dependencies +.venv: requirements.in ## Create a virtual environment and install dependencies python3 -m venv --clear .venv .venv/bin/pip install wheel pip-tools - .venv/bin/pip-compile requirements.in - .venv/bin/pip install -r requirements.txt + .venv/bin/pip-compile requirements.in --output-file=$(REQUIREMENTS_FILE) + .venv/bin/pip install -r $(REQUIREMENTS_FILE) .PHONY: compile-requirements compile-requirements: .venv ## Compile requirements.in to requirements.txt - .venv/bin/pip-compile requirements.in - .venv/bin/pip install -r requirements.txt + .venv/bin/pip-compile requirements.in --output-file=$(REQUIREMENTS_FILE) -.PHONY: compile-requirements-linux -compile-requirements-linux: ## Compile requirements.in to requirements.txt (in a linux container) +.PHONY: compile-requirements-docker +compile-requirements-docker: ## Compile requirements.in to requirements.txt (in a docker container) # Build the docker image docker build --platform=linux/amd64 \ + --build-arg CUDA_DOCKER_VERSION=$(CUDA_DOCKER_VERSION) \ + --target final \ -t sd-compile-requirements \ -f scripts/Dockerfile.compile-requirements \ . @@ -22,7 +29,28 @@ compile-requirements-linux: ## Compile requirements.in to requirements.txt (in a docker run --platform=linux/amd64 \ -v $(PWD):/app \ -t sd-compile-requirements \ - cp /tmp/requirements.txt requirements.txt + cp /tmp/requirements.txt $(REQUIREMENTS_FILE) + +.PHONY: test +test: test-inference ## Run tests + +.PHONY: test-inference +test-inference: .venv ## Run inference tests + .venv/bin/pytest -v tests/inference/test_inference.py + +.PHONY: test-inference-docker +test-inference-docker: ## Run inference tests (in a docker container) + # Build the docker image + docker build --platform=linux/amd64 \ + --build-arg CUDA_DOCKER_VERSION=$(CUDA_DOCKER_VERSION) \ + --target test-inference \ + -t sd-test-inference \ + -f scripts/Dockerfile.compile-requirements \ + . + # Run the docker image + docker run --platform=linux/amd64 \ + -v $(PWD):/app \ + -t sd-test-inference .PHONY: clean clean: ## Remove the virtual environment diff --git a/pyproject.toml b/pyproject.toml index 89c38d1e..80ac82e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,6 @@ dependencies = [ [tool.hatch.envs.ci.scripts] test-inference = [ - "pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.2+cu118 --index-url https://download.pytorch.org/whl/cu118", "pip install -r requirements.txt", "pytest -v tests/inference/test_inference.py {args}", ] diff --git a/requirements.in b/requirements.in index 1946d42a..616bb5c7 100644 --- a/requirements.in +++ b/requirements.in @@ -1,3 +1,4 @@ +--find-links https://download.pytorch.org/whl/torch_stable.html black==23.7.0 chardet==5.1.0 clip @ git+https://github.com/openai/CLIP.git diff --git a/scripts/Dockerfile.compile-requirements b/scripts/Dockerfile.compile-requirements index 0d3bb99a..f2887c8b 100644 --- a/scripts/Dockerfile.compile-requirements +++ b/scripts/Dockerfile.compile-requirements @@ -1,14 +1,43 @@ # Dockerfile for compiling requirements.txt # See top level Makefile for more details on how to use this file. -FROM python:3.10 + +ARG CUDA_DOCKER_VERSION=11.8.0 +ARG CUDA_UBUNTU_VERSION=ubuntu22.04 +FROM nvidia/cuda:${CUDA_DOCKER_VERSION}-devel-${CUDA_UBUNTU_VERSION} as build WORKDIR /app +RUN apt-get update # 2021-11-28 +RUN apt-get -y install python3-pip python3-venv +RUN apt-get -y install git + RUN pip install -U pip RUN pip install wheel pip-tools COPY requirements.in /app/requirements.in COPY requirements.txt /app/requirements.txt COPY Makefile /app/Makefile -RUN make .venv -RUN cp -r requirements.txt /tmp/requirements.txt +RUN --mount=type=cache,target=/root/.cache/pip \ + --mount=type=cache,target=/root/.cache/pip-tools \ + make .venv + +RUN cp requirements.txt /tmp/requirements.txt + +# Layer to test inference +FROM nvidia/cuda:${CUDA_DOCKER_VERSION}-devel-${CUDA_UBUNTU_VERSION} as test-inference + +RUN apt-get update # 2021-11-28 +RUN apt-get -y install python3-pip python3-venv +COPY --from=build /app/.venv /app/.venv +COPY Makefile /app/Makefile +RUN make test-inference + +# Final layer +FROM nvidia/cuda:${CUDA_DOCKER_VERSION}-runtime-${CUDA_UBUNTU_VERSION} as final + +WORKDIR /app + +RUN apt-get update # 2021-11-28 +RUN apt-get -y install python3-pip python3-venv +COPY --from=build /app/.venv /app/.venv +COPY --from=build /tmp/requirements.txt /app/requirements.txt From e3e02a6196a6fa28ba2899c6a8892266d4c46301 Mon Sep 17 00:00:00 2001 From: Travis Cline Date: Tue, 28 Nov 2023 01:36:12 -0800 Subject: [PATCH 4/5] requirements: iterate --- Makefile | 3 +++ scripts/Dockerfile.compile-requirements | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9f93e3af..90fdb367 100644 --- a/Makefile +++ b/Makefile @@ -55,3 +55,6 @@ test-inference-docker: ## Run inference tests (in a docker container) .PHONY: clean clean: ## Remove the virtual environment @rm -rf .venv + +.DELETE_ON_ERROR: ## Configure make to delete the target of a rule if it has an error + diff --git a/scripts/Dockerfile.compile-requirements b/scripts/Dockerfile.compile-requirements index f2887c8b..3f8fd7a0 100644 --- a/scripts/Dockerfile.compile-requirements +++ b/scripts/Dockerfile.compile-requirements @@ -40,4 +40,4 @@ WORKDIR /app RUN apt-get update # 2021-11-28 RUN apt-get -y install python3-pip python3-venv COPY --from=build /app/.venv /app/.venv -COPY --from=build /tmp/requirements.txt /app/requirements.txt +COPY --from=build /tmp/requirements.txt /tmp/requirements.txt From 187e74dbe155b5f8054e5e2464e10b7d065ce65d Mon Sep 17 00:00:00 2001 From: Travis Cline Date: Tue, 28 Nov 2023 21:41:05 -0800 Subject: [PATCH 5/5] requirements: iterate further --- .github/workflows/test-inference.yml | 4 +--- Makefile | 9 +++++---- requirements.in => requirements/requirements.in | 2 +- requirements.txt => requirements/requirements.txt | 0 scripts/Dockerfile.compile-requirements | 6 +++--- scripts/cuda-version.sh | 15 +++++++++++++++ 6 files changed, 25 insertions(+), 11 deletions(-) rename requirements.in => requirements/requirements.in (97%) rename requirements.txt => requirements/requirements.txt (100%) create mode 100755 scripts/cuda-version.sh diff --git a/.github/workflows/test-inference.yml b/.github/workflows/test-inference.yml index 88b879cc..49312373 100644 --- a/.github/workflows/test-inference.yml +++ b/.github/workflows/test-inference.yml @@ -2,9 +2,7 @@ name: Test inference on: pull_request: - push: - branches: - - main + push: {} jobs: test: diff --git a/Makefile b/Makefile index 90fdb367..4a7b2555 100644 --- a/Makefile +++ b/Makefile @@ -6,15 +6,15 @@ ifeq ($(UNAME_S), Darwin) REQUIREMENTS_FILE=requirements-macos.txt endif -.venv: requirements.in ## Create a virtual environment and install dependencies +.venv: requirements/requirements.in ## Create a virtual environment and install dependencies python3 -m venv --clear .venv .venv/bin/pip install wheel pip-tools - .venv/bin/pip-compile requirements.in --output-file=$(REQUIREMENTS_FILE) + .venv/bin/pip-compile requirements/requirements.in --output-file=requirements/$(REQUIREMENTS_FILE) .venv/bin/pip install -r $(REQUIREMENTS_FILE) .PHONY: compile-requirements compile-requirements: .venv ## Compile requirements.in to requirements.txt - .venv/bin/pip-compile requirements.in --output-file=$(REQUIREMENTS_FILE) + .venv/bin/pip-compile requirements/requirements.in --output-file=requirements/$(REQUIREMENTS_FILE) .PHONY: compile-requirements-docker compile-requirements-docker: ## Compile requirements.in to requirements.txt (in a docker container) @@ -27,9 +27,10 @@ compile-requirements-docker: ## Compile requirements.in to requirements.txt (in . # Run the docker image (to copy the requirements.txt file out) docker run --platform=linux/amd64 \ + --gpus all \ -v $(PWD):/app \ -t sd-compile-requirements \ - cp /tmp/requirements.txt $(REQUIREMENTS_FILE) + cp /tmp/requirements.txt requirements/$(REQUIREMENTS_FILE) .PHONY: test test: test-inference ## Run tests diff --git a/requirements.in b/requirements/requirements.in similarity index 97% rename from requirements.in rename to requirements/requirements.in index 616bb5c7..5c96fe41 100644 --- a/requirements.in +++ b/requirements/requirements.in @@ -37,4 +37,4 @@ urllib3<1.27,>=1.25.4 wandb>=0.15.6 webdataset>=0.2.33 wheel>=0.41.0 -xformers>=0.0.20 +xformers diff --git a/requirements.txt b/requirements/requirements.txt similarity index 100% rename from requirements.txt rename to requirements/requirements.txt diff --git a/scripts/Dockerfile.compile-requirements b/scripts/Dockerfile.compile-requirements index 3f8fd7a0..11a56e67 100644 --- a/scripts/Dockerfile.compile-requirements +++ b/scripts/Dockerfile.compile-requirements @@ -14,14 +14,14 @@ RUN apt-get -y install git RUN pip install -U pip RUN pip install wheel pip-tools -COPY requirements.in /app/requirements.in -COPY requirements.txt /app/requirements.txt +COPY requirements/ /app/requirements/ COPY Makefile /app/Makefile + RUN --mount=type=cache,target=/root/.cache/pip \ --mount=type=cache,target=/root/.cache/pip-tools \ make .venv -RUN cp requirements.txt /tmp/requirements.txt +RUN cp requirements/requirements.txt /tmp/requirements.txt # Layer to test inference FROM nvidia/cuda:${CUDA_DOCKER_VERSION}-devel-${CUDA_UBUNTU_VERSION} as test-inference diff --git a/scripts/cuda-version.sh b/scripts/cuda-version.sh new file mode 100755 index 00000000..f459a87a --- /dev/null +++ b/scripts/cuda-version.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -euo pipefail +if nvcc --version 2&> /dev/null; then + # Determine CUDA version using default nvcc binary + CUDA_VERSION=$(nvcc --version | sed -n 's/^.*release \([0-9]\+\.[0-9]\+\).*$/\1/p'); +elif /usr/local/cuda/bin/nvcc --version 2&> /dev/null; then + # Determine CUDA version using /usr/local/cuda/bin/nvcc binary + CUDA_VERSION=$(/usr/local/cuda/bin/nvcc --version | sed -n 's/^.*release \([0-9]\+\.[0-9]\+\).*$/\1/p'); +elif [ -f "/usr/local/cuda/version.txt" ]; then + # Determine CUDA version using /usr/local/cuda/version.txt file + CUDA_VERSION=$(cat /usr/local/cuda/version.txt | sed 's/.* \([0-9]\+\.[0-9]\+\).*/\1/') +else + CUDA_VERSION="" +fi +echo $CUDA_VERSION