diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 00000000..6c7471a3
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,33 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ""
+labels: ""
+assignees: ""
+---
+
+### Describe the bug
+
+A clear and concise description of what the bug is.
+
+### To reproduce
+
+Steps to reproduce the behavior:
+
+1. Deploy the NGINX Docker image using ...
+2. View output/logs/configuration on ...
+3. See error
+
+### Expected behavior
+
+A clear and concise description of what you expected to happen.
+
+### Your environment
+
+- Version/release of Docker and method of installation (e.g. Docker Desktop / Docker Server)
+- Version/tag of the NGINX Docker image (e.g. `nginx:alpine`)
+- Target deployment platform (e.g. OpenShift / Kubernetes / Docker Compose / etc...)
+
+### Additional context
+
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000..e2242abb
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,23 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ""
+labels: ""
+assignees: ""
+---
+
+### Is your feature request related to a problem? Please describe
+
+A clear and concise description of what the problem is. Ex. I'm always frustrated when ...
+
+### Describe the solution you'd like
+
+A clear and concise description of what you want to happen.
+
+### Describe alternatives you've considered
+
+A clear and concise description of any alternative solutions or features you've considered.
+
+### Additional context
+
+Add any other context or screenshots about the feature request here.
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 00000000..e869fe51
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,12 @@
+### Proposed changes
+
+Describe the use case and detail of the change. If this PR addresses an issue on GitHub, make sure to include a link to that issue using one of the [supported keywords](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue) here in this description (not in the title of the PR).
+
+### Checklist
+
+Before creating a PR, run through this checklist and mark each as complete:
+- [ ] I have read the [`CONTRIBUTING`](https://github.com/nginxinc/docker-nginx/blob/master/CONTRIBUTING.md) document
+- [ ] I have run `./update.sh` and ensured all entrypoint/Dockerfile template changes have been applied to the relevant image entrypoint scripts & Dockerfiles
+- [ ] If applicable, I have added tests that prove my fix is effective or that my feature works
+- [ ] If applicable, I have checked that any relevant tests pass after adding my changes
+- [ ] I have updated any relevant documentation
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 00000000..37d7a621
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,49 @@
+name: GitHub CI
+
+on:
+ pull_request:
+ push:
+ schedule:
+ - cron: 0 10 * * Mon
+
+defaults:
+ run:
+ shell: 'bash -Eeuo pipefail -x {0}'
+
+jobs:
+
+ generate-jobs:
+ name: Generate Jobs
+ runs-on: ubuntu-latest
+ outputs:
+ strategy: ${{ steps.generate-jobs.outputs.strategy }}
+ steps:
+ - uses: actions/checkout@v3
+ - uses: docker-library/bashbrew@v0.1.12
+ - id: generate-jobs
+ name: Generate Jobs
+ run: |
+ strategy="$(GITHUB_REPOSITORY=nginx "$BASHBREW_SCRIPTS/github-actions/generate.sh")"
+ strategy="$(GITHUB_REPOSITORY=nginx "$BASHBREW_SCRIPTS/github-actions/munge-i386.sh" -c <<<"$strategy")"
+ echo "strategy=$strategy" >> "$GITHUB_OUTPUT"
+ jq . <<<"$strategy" # sanity check / debugging aid
+
+ test:
+ needs: generate-jobs
+ strategy: ${{ fromJson(needs.generate-jobs.outputs.strategy) }}
+ name: ${{ matrix.name }}
+ runs-on: ${{ matrix.os }}
+ steps:
+ - uses: actions/checkout@v3
+ - name: Prepare Environment
+ run: ${{ matrix.runs.prepare }}
+ - name: Pull Dependencies
+ run: ${{ matrix.runs.pull }}
+ - name: Build ${{ matrix.name }}
+ run: ${{ matrix.runs.build }}
+ - name: History ${{ matrix.name }}
+ run: ${{ matrix.runs.history }}
+ - name: Test ${{ matrix.name }}
+ run: ${{ matrix.runs.test }}
+ - name: '"docker images"'
+ run: ${{ matrix.runs.images }}
diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml
new file mode 100644
index 00000000..99dc9860
--- /dev/null
+++ b/.github/workflows/sync.yml
@@ -0,0 +1,44 @@
+name: Sync DockerHub with AWS ECR
+
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: 23 20 * * *
+
+defaults:
+ run:
+ shell: 'bash -Eeuo pipefail -x {0}'
+
+jobs:
+ sync-awsecr:
+ name: Sync Docker Hub to AWS ECR Public
+ runs-on: ubuntu-24.04
+ permissions:
+ id-token: write
+ contents: read
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - name: Configure AWS credentials
+ uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
+ with:
+ role-to-assume: ${{ secrets.AWS_ROLE_PUBLIC_ECR }}
+ aws-region: us-east-1
+
+ - name: Login to Amazon ECR Public
+ id: login-ecr-public
+ uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
+ with:
+ registry-type: public
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Build, tag, and push docker image to Amazon ECR Public
+ run: |
+ ./sync-awsecr.sh > sync-real.sh
+ chmod +x sync-real.sh
+ ./sync-real.sh
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..751553b3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+*.bak
diff --git a/.test/config.sh b/.test/config.sh
new file mode 100755
index 00000000..e371f404
--- /dev/null
+++ b/.test/config.sh
@@ -0,0 +1,11 @@
+imageTests+=(
+ [nginx]='
+ ipv6
+ static
+ templates
+ templates-resolver
+ templates-resolver-ipv6
+ workers
+ modules
+ '
+)
diff --git a/.test/tests/ipv6/expected-std-out.txt b/.test/tests/ipv6/expected-std-out.txt
new file mode 100644
index 00000000..f16a0876
--- /dev/null
+++ b/.test/tests/ipv6/expected-std-out.txt
@@ -0,0 +1,2 @@
+
Welcome to nginx!
+10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
diff --git a/.test/tests/ipv6/run.sh b/.test/tests/ipv6/run.sh
new file mode 100755
index 00000000..0235db6b
--- /dev/null
+++ b/.test/tests/ipv6/run.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ exit 0
+fi
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+cid="$(docker run -d "$image")"
+trap "docker rm -vf $cid > /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/index.html' | grep 'Welcome to nginx!
'
+
+docker logs $cid 2>&1 | grep "Enabled listen on IPv6"
diff --git a/mainline/alpine-perl/nginx.conf b/.test/tests/modules/nginx.conf.sme
similarity index 53%
rename from mainline/alpine-perl/nginx.conf
rename to .test/tests/modules/nginx.conf.sme
index e4bad8db..dab10145 100644
--- a/mainline/alpine-perl/nginx.conf
+++ b/.test/tests/modules/nginx.conf.sme
@@ -1,16 +1,17 @@
-
user nginx;
-worker_processes 1;
+worker_processes auto;
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
+load_module modules/ndk_http_module.so;
+load_module modules/ngx_http_echo_module.so;
+load_module modules/ngx_http_set_misc_module.so;
+error_log /var/log/nginx/error.log notice;
+pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
-
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
@@ -21,12 +22,13 @@ http {
access_log /var/log/nginx/access.log main;
- sendfile on;
- #tcp_nopush on;
-
- keepalive_timeout 65;
-
- #gzip on;
+ server {
+ listen 80 default_server;
+ location /hello {
+ set $raw "hello";
+ set_sha1 $digest $raw;
- include /etc/nginx/conf.d/*.conf;
+ echo $digest;
+ }
+ }
}
diff --git a/.test/tests/modules/run.sh b/.test/tests/modules/run.sh
new file mode 100755
index 00000000..257cdd55
--- /dev/null
+++ b/.test/tests/modules/run.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+echo $dir
+
+image="$1"
+
+case "$image" in
+ *-perl)
+ ;;
+ *)
+ echo >&2 "skipping non-leaf image: $image"
+ exit
+ ;;
+esac
+
+dockerfile="Dockerfile"
+case "$image" in
+ *alpine*)
+ dockerfile="$dockerfile.alpine"
+ ;;
+esac
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+modulesImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+docker build --build-arg NGINX_FROM_IMAGE="$image" --build-arg ENABLED_MODULES="ndk set-misc echo" -t "$modulesImage" -f "modules/$dockerfile" "$GITHUB_WORKSPACE/modules"
+
+serverImage="${modulesImage}-sme"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/hello' | grep 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
diff --git a/.test/tests/static/run.sh b/.test/tests/static/run.sh
new file mode 100755
index 00000000..f026bedb
--- /dev/null
+++ b/.test/tests/static/run.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+cid="$(docker run -d "$image")"
+trap "docker rm -vf $cid > /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/index.html' | grep 'Welcome to nginx!
'
diff --git a/.test/tests/templates-resolver-ipv6/expected-std-out.txt b/.test/tests/templates-resolver-ipv6/expected-std-out.txt
new file mode 100644
index 00000000..38bfee85
--- /dev/null
+++ b/.test/tests/templates-resolver-ipv6/expected-std-out.txt
@@ -0,0 +1 @@
+example.com - OK
diff --git a/.test/tests/templates-resolver-ipv6/run.sh b/.test/tests/templates-resolver-ipv6/run.sh
new file mode 100755
index 00000000..88476d65
--- /dev/null
+++ b/.test/tests/templates-resolver-ipv6/run.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ exit 0
+fi
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create a new Docker network
+nid="$(docker network create --ipv6 --subnet fd0c:7e57::/64 nginx-test-ipv6-network)"
+
+_network_exit_handler() {
+ docker network rm -f $nid > /dev/null
+}
+
+# Create an instance of the container-under-test
+serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null
+}
+_exit_handler() { _container_exit_handler; _network_exit_handler; }
+trap "_exit_handler" EXIT
+
+ipv6cid="$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.GlobalIPv6Address}}{{end}}' $cid)"
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --network "$nid" \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to "::[$ipv6cid]:" "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/resolver-templates' | grep 'example.com - OK'
diff --git a/.test/tests/templates-resolver-ipv6/server.conf.template b/.test/tests/templates-resolver-ipv6/server.conf.template
new file mode 100644
index 00000000..70835560
--- /dev/null
+++ b/.test/tests/templates-resolver-ipv6/server.conf.template
@@ -0,0 +1,10 @@
+resolver ${NGINX_LOCAL_RESOLVERS};
+
+server {
+ listen 80;
+ listen [::]:80;
+ server_name ${NGINX_MY_SERVER_NAME};
+ default_type text/plain;
+ location = / { return 200 'OK\n'; }
+ location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
+}
diff --git a/.test/tests/templates-resolver/run.sh b/.test/tests/templates-resolver/run.sh
new file mode 100755
index 00000000..041f7abd
--- /dev/null
+++ b/.test/tests/templates-resolver/run.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/resolver-templates' | grep 'example.com - OK'
diff --git a/.test/tests/templates-resolver/server.conf.template b/.test/tests/templates-resolver/server.conf.template
new file mode 100644
index 00000000..04a0c085
--- /dev/null
+++ b/.test/tests/templates-resolver/server.conf.template
@@ -0,0 +1,9 @@
+resolver ${NGINX_LOCAL_RESOLVERS};
+
+server {
+ listen 80;
+ server_name ${NGINX_MY_SERVER_NAME};
+ default_type text/plain;
+ location = / { return 200 'OK\n'; }
+ location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
+}
diff --git a/.test/tests/templates/run.sh b/.test/tests/templates/run.sh
new file mode 100755
index 00000000..c43aa1db
--- /dev/null
+++ b/.test/tests/templates/run.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/templates' | grep 'example.com - OK'
diff --git a/.test/tests/templates/server.conf.template b/.test/tests/templates/server.conf.template
new file mode 100644
index 00000000..6b00bed6
--- /dev/null
+++ b/.test/tests/templates/server.conf.template
@@ -0,0 +1,7 @@
+server {
+ listen 80;
+ server_name ${NGINX_MY_SERVER_NAME};
+ default_type text/plain;
+ location = / { return 200 'OK\n'; }
+ location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
+}
diff --git a/.test/tests/workers/expected-std-out.txt b/.test/tests/workers/expected-std-out.txt
new file mode 100644
index 00000000..9f1d3ac3
--- /dev/null
+++ b/.test/tests/workers/expected-std-out.txt
@@ -0,0 +1,2 @@
+example.com - OK
+# Commented out by 30-tune-worker-processes.sh
diff --git a/.test/tests/workers/run.sh b/.test/tests/workers/run.sh
new file mode 100755
index 00000000..50def70c
--- /dev/null
+++ b/.test/tests/workers/run.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/worker-templates' | grep 'example.com - OK'
+
+result="$(docker exec $cid grep "Commented out by" /etc/nginx/nginx.conf)"
+
+echo "$result" | cut -d\ -f 1-5
diff --git a/.test/tests/workers/server.conf.template b/.test/tests/workers/server.conf.template
new file mode 100644
index 00000000..6b00bed6
--- /dev/null
+++ b/.test/tests/workers/server.conf.template
@@ -0,0 +1,7 @@
+server {
+ listen 80;
+ server_name ${NGINX_MY_SERVER_NAME};
+ default_type text/plain;
+ location = / { return 200 'OK\n'; }
+ location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
+}
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..78354a27
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+- Using welcoming and inclusive language
+- Being respectful of differing viewpoints and experiences
+- Gracefully accepting constructive criticism
+- Focusing on what is best for the community
+- Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+- The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+- Trolling, insulting/derogatory comments, and personal or political attacks
+- Public or private harassment
+- Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+- Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the moderation team at . All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4,
+available at
+
+For answers to common questions about this code of conduct, see
+
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000..b4b86358
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,53 @@
+# Contributing Guidelines
+
+The following is a set of guidelines for contributing to the Docker NGINX image. We really appreciate that you are considering contributing!
+
+#### Table Of Contents
+
+[Getting Started](#getting-started)
+
+[Contributing](#contributing)
+
+[Code Guidelines](#code-guidelines)
+
+[Code of Conduct](https://github.com/nginxinc/docker-nginx/blob/master/CODE_OF_CONDUCT.md)
+
+## Getting Started
+
+Follow our [how to use this image guide](https://hub.docker.com/_/nginx/) to get the Docker NGINX image up and running.
+
+## Contributing
+
+### Report a Bug
+
+To report a bug, open an issue on GitHub with the label `bug` using the available bug report issue template. Please ensure the bug has not already been reported. **If the bug is a potential security vulnerability, please report it using our [security policy](https://github.com/nginxinc/docker-nginx/blob/master/SECURITY.md).**
+
+### Suggest a Feature or Enhancement
+
+To suggest a feature or enhancement, please create an issue on GitHub with the label `enhancement` using the available [feature request template](https://github.com/nginxinc/docker-nginx/blob/master/.github/feature_request_template.md). Please ensure the feature or enhancement has not already been suggested.
+
+### Open a Pull Request
+
+- Fork the repo, create a branch, implement your changes, add any relevant tests, submit a PR when your changes are **tested** and ready for review.
+- Fill in [our pull request template](https://github.com/nginxinc/docker-nginx/blob/master/.github/pull_request_template.md).
+
+Note: if you'd like to implement a new feature, please consider creating a [feature request issue](https://github.com/nginxinc/docker-nginx/blob/master/.github/feature_request_template.md) first to start a discussion about the feature.
+
+## Code Guidelines
+
+### Git Guidelines
+
+- Keep a clean, concise and meaningful git commit history on your branch (within reason), rebasing locally and squashing before submitting a PR.
+- If possible and/or relevant, use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format when writing a commit message, so that changelogs can be automatically generated
+- Follow the guidelines of writing a good commit message as described here and summarised in the next few points:
+ - In the subject line, use the present tense ("Add feature" not "Added feature").
+ - In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to...").
+ - Limit the subject line to 72 characters or less.
+ - Reference issues and pull requests liberally after the subject line.
+ - Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`).
+
+### Docker Guidelines
+
+- Update any entrypoint scripts via the the scripts contained in the `/entrypoint` directory.
+- Update any Dockerfiles via the Dockerfile templates in the root directory (e.g. `Dockerfile-alpine.template`).
+- Run the `./update.sh` script to apply all entrypoint/Dockerfile template changes to the relevant image entrypoints & Dockerfiles.
diff --git a/Dockerfile-alpine-otel.template b/Dockerfile-alpine-otel.template
new file mode 100644
index 00000000..7b4bc8f6
--- /dev/null
+++ b/Dockerfile-alpine-otel.template
@@ -0,0 +1,66 @@
+FROM nginx:%%NGINX_VERSION%%-alpine
+
+ENV OTEL_VERSION %%OTEL_VERSION%%
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ cmake \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ xz \
+ re2-dev \
+ c-ares-dev \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
+ && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf %%REVISION%%.tar.gz \
+ && cd pkg-oss-%%REVISION%% \
+ && cd alpine \
+ && make %%BUILDTARGET%% \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/Dockerfile-alpine-perl.template b/Dockerfile-alpine-perl.template
new file mode 100644
index 00000000..6fc37dea
--- /dev/null
+++ b/Dockerfile-alpine-perl.template
@@ -0,0 +1,61 @@
+FROM nginx:%%NGINX_VERSION%%-alpine
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ perl-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
+ && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf %%REVISION%%.tar.gz \
+ && cd pkg-oss-%%REVISION%% \
+ && cd alpine \
+ && make %%BUILDTARGET%% \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/Dockerfile-alpine-slim.template b/Dockerfile-alpine-slim.template
new file mode 100644
index 00000000..896b9a5c
--- /dev/null
+++ b/Dockerfile-alpine-slim.template
@@ -0,0 +1,117 @@
+FROM alpine:%%ALPINE_VERSION%%
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION %%NGINX_VERSION%%
+ENV PKG_RELEASE %%PKG_RELEASE%%
+ENV DYNPKG_RELEASE %%DYNPKG_RELEASE%%
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && addgroup -g 101 -S nginx \
+ && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ set -x \
+ && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
+ && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
+ && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
+ echo "key verification succeeded!"; \
+ mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
+ else \
+ echo "key verification failed!"; \
+ exit 1; \
+ fi \
+ && apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
+ && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf %%REVISION%%.tar.gz \
+ && cd pkg-oss-%%REVISION%% \
+ && cd alpine \
+ && make %%BUILDTARGET%% \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Bring in gettext so we can get `envsubst`, then throw
+# the rest away. To do this, we need to install `gettext`
+# then move `envsubst` out of the way so `gettext` can
+# be deleted completely, then move `envsubst` back.
+ && apk add --no-cache --virtual .gettext gettext \
+ && mv /usr/bin/envsubst /tmp/ \
+ \
+ && runDeps="$( \
+ scanelf --needed --nobanner /tmp/envsubst \
+ | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
+ | sort -u \
+ | xargs -r apk info --installed \
+ | sort -u \
+ )" \
+ && apk add --no-cache $runDeps \
+ && apk del --no-network .gettext \
+ && mv /tmp/envsubst /usr/local/bin/ \
+# Bring in tzdata so users could set the timezones through the environment
+# variables
+ && apk add --no-cache tzdata \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/Dockerfile-alpine.template b/Dockerfile-alpine.template
new file mode 100644
index 00000000..bc77dfd6
--- /dev/null
+++ b/Dockerfile-alpine.template
@@ -0,0 +1,69 @@
+FROM nginx:%%NGINX_VERSION%%-alpine-slim
+
+ENV NJS_VERSION %%NJS_VERSION%%
+ENV NJS_RELEASE %%NJS_RELEASE%%
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ libxslt-dev \
+ gd-dev \
+ geoip-dev \
+ libedit-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
+ && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf %%REVISION%%.tar.gz \
+ && cd pkg-oss-%%REVISION%% \
+ && cd alpine \
+ && make %%BUILDTARGET%% \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Bring in curl and ca-certificates to make registering on DNS SD easier
+ && apk add --no-cache curl ca-certificates
diff --git a/Dockerfile-debian-otel.template b/Dockerfile-debian-otel.template
new file mode 100644
index 00000000..709836c1
--- /dev/null
+++ b/Dockerfile-debian-otel.template
@@ -0,0 +1,89 @@
+FROM nginx:%%NGINX_VERSION%%
+
+ENV OTEL_VERSION %%OTEL_VERSION%%
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="%%REVISION%%" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in %%BUILDTARGET%%; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make %%BUILDTARGET%% \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/Dockerfile-debian-perl.template b/Dockerfile-debian-perl.template
new file mode 100644
index 00000000..84cf99f6
--- /dev/null
+++ b/Dockerfile-debian-perl.template
@@ -0,0 +1,87 @@
+FROM nginx:%%NGINX_VERSION%%
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="%%REVISION%%" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in %%BUILDTARGET%%; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make %%BUILDTARGET%% \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/Dockerfile-debian.template b/Dockerfile-debian.template
new file mode 100644
index 00000000..9138f4c9
--- /dev/null
+++ b/Dockerfile-debian.template
@@ -0,0 +1,135 @@
+FROM debian:%%DEBIAN_VERSION%%-slim
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION %%NGINX_VERSION%%
+ENV NJS_VERSION %%NJS_VERSION%%
+ENV NJS_RELEASE %%NJS_RELEASE%%
+ENV PKG_RELEASE %%PKG_RELEASE%%
+ENV DYNPKG_RELEASE %%DYNPKG_RELEASE%%
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && groupadd --system --gid 101 nginx \
+ && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
+ && \
+ NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ export GNUPGHOME="$(mktemp -d)"; \
+ found=''; \
+ for NGINX_GPGKEY in $NGINX_GPGKEYS; do \
+ for server in \
+ hkp://keyserver.ubuntu.com:80 \
+ pgp.mit.edu \
+ ; do \
+ echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
+ gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
+ done; \
+ test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
+ done; \
+ gpg1 --export "$NGINX_GPGKEYS" > "$NGINX_GPGKEY_PATH" ; \
+ rm -rf "$GNUPGHOME"; \
+ apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
+ && dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="%%REVISION%%" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in %%BUILDTARGET%%; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make %%BUILDTARGET%% \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/LICENSE b/LICENSE
index bc1d673f..f5af4aac 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (C) 2011-2016 Nginx, Inc.
+Copyright (C) 2011-2023 F5, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/README.md b/README.md
index f0d253c2..24d8e79c 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,38 @@
+[![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active)
+[![Community Support](https://badgen.net/badge/support/community/cyan?icon=awesome)](https://github.com/nginxinc/docker-nginx/blob/master/SUPPORT.md)
+
# About this Repo
-This is the Git repo of the official Docker image for [nginx](https://registry.hub.docker.com/_/nginx/). See the
-Hub page for the full readme on how to use the Docker image and for information
-regarding contributing and issues.
+## Maintained by: [the NGINX Docker Maintainers](https://github.com/nginxinc/docker-nginx)
+
+This is the Git repo of the [Docker "Official Image"](https://github.com/docker-library/official-images#what-are-official-images) for [`nginx`](https://hub.docker.com/_/nginx/). See [the Docker Hub page](https://hub.docker.com/_/nginx/) for the full readme on how to use this Docker image and for information regarding contributing and issues.
+
+The [full image description on Docker Hub](https://hub.docker.com/_/nginx/) is generated/maintained over in [the docker-library/docs repository](https://github.com/docker-library/docs), specifically in [the `nginx` directory](https://github.com/docker-library/docs/tree/master/nginx).
+
+The changelog for NGINX releases is available at [nginx.org changes page](https://nginx.org/en/CHANGES).
+
+## See a change merged here that doesn't show up on Docker Hub yet?
+
+For more information about the full official images change lifecycle, see [the "An image's source changed in Git, now what?" FAQ entry](https://github.com/docker-library/faq#an-images-source-changed-in-git-now-what).
+
+For outstanding `nginx` image PRs, check [PRs with the "library/nginx" label on the official-images repository](https://github.com/docker-library/official-images/labels/library%2Fnginx). For the current "source of truth" for [`nginx`](https://hub.docker.com/_/nginx/), see [the `library/nginx` file in the official-images repository](https://github.com/docker-library/official-images/blob/master/library/nginx).
+
+## Contributing
+
+Please see the [contributing guide](https://github.com/nginxinc/docker-nginx/blob/master/CONTRIBUTING.md) for guidelines on how to best contribute to this project.
+
+## License
+
+[BSD 2-Clause](https://github.com/nginxinc/docker-nginx/blob/master/LICENSE)
+
+© [F5, Inc.](https://www.f5.com/) 2023
+
+---
-The full readme is generated over in [docker-library/docs](https://github.com/docker-library/docs),
-specificially in [docker-library/docs/nginx](https://github.com/docker-library/docs/tree/master/nginx).
+- [![build status badge](https://img.shields.io/github/actions/workflow/status/nginxinc/docker-nginx/ci.yml?branch=master&label=GitHub%20CI)](https://github.com/nginxinc/docker-nginx/actions?query=workflow%3A%22GitHub+CI%22+branch%3Amaster)
+| Build | Status | Badges | (per-arch) |
+|:-:|:-:|:-:|:-:|
+| [![amd64 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/amd64/job/nginx.svg?label=amd64)](https://doi-janky.infosiftr.net/job/multiarch/job/amd64/job/nginx/) | [![arm32v5 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/arm32v5/job/nginx.svg?label=arm32v5)](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v5/job/nginx/) | [![arm32v6 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/arm32v6/job/nginx.svg?label=arm32v6)](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v6/job/nginx/) | [![arm32v7 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/arm32v7/job/nginx.svg?label=arm32v7)](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v7/job/nginx/) |
+| [![arm64v8 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/arm64v8/job/nginx.svg?label=arm64v8)](https://doi-janky.infosiftr.net/job/multiarch/job/arm64v8/job/nginx/) | [![i386 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/i386/job/nginx.svg?label=i386)](https://doi-janky.infosiftr.net/job/multiarch/job/i386/job/nginx/) | [![mips64le build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/mips64le/job/nginx.svg?label=mips64le)](https://doi-janky.infosiftr.net/job/multiarch/job/mips64le/job/nginx/) | [![ppc64le build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/ppc64le/job/nginx.svg?label=ppc64le)](https://doi-janky.infosiftr.net/job/multiarch/job/ppc64le/job/nginx/) |
+| [![s390x build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/s390x/job/nginx.svg?label=s390x)](https://doi-janky.infosiftr.net/job/multiarch/job/s390x/job/nginx/) | [![put-shared build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/put-shared/job/light/job/nginx.svg?label=put-shared)](https://doi-janky.infosiftr.net/job/put-shared/job/light/job/nginx/) |
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 00000000..47a42e26
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,14 @@
+# Security Policy
+
+## Latest Versions
+
+We advise users to run or update to the most recent release of the NGINX Docker image. Older versions of the NGINX Docker image may not have all enhancements and/or bug fixes applied to them.
+
+## Reporting a Vulnerability
+
+The F5 Security Incident Response Team (F5 SIRT) has an email alias that makes it easy to report potential security vulnerabilities.
+
+- If you’re an F5 customer with an active support contract, please contact [F5 Technical Support](https://www.f5.com/services/support).
+- If you aren’t an F5 customer, please report any potential or current instances of security vulnerabilities with any F5 product to the F5 Security Incident Response Team at .
+
+For more information visit [https://www.f5.com/services/support/report-a-vulnerability](https://www.f5.com/services/support/report-a-vulnerability)
diff --git a/SUPPORT.md b/SUPPORT.md
new file mode 100644
index 00000000..2a6b505d
--- /dev/null
+++ b/SUPPORT.md
@@ -0,0 +1,37 @@
+# Support
+
+## Ask a Question
+
+We use GitHub for tracking bugs and feature requests related to all the Docker NGINX images (including all variants and container registries).
+
+Don't know how something in this project works? Curious if this project can achieve your desired functionality? Please open an issue on GitHub with the label `question`.
+
+## NGINX Specific Questions and/or Issues
+
+This isn't the right place to get support for NGINX specific questions, but the following resources are available below. Thanks for your understanding!
+
+### Community Slack
+
+We have a community [Slack](https://nginxcommunity.slack.com/)!
+
+If you are not a member, click [here](https://community.nginx.org/joinslack) to sign up (and let us know if the link does not seem to be working!)
+
+Once you join, check out the `#beginner-questions` and `nginx-users` channels :)
+
+### Documentation
+
+For a comprehensive list of all NGINX directives, check out .
+
+For a comprehensive list of admin and deployment guides for all NGINX products, check out .
+
+### Mailing List
+
+Want to get in touch with the NGINX development team directly? Try using the relevant mailing list found at !
+
+## Contributing
+
+Please see the [contributing guide](https://github.com/nginxinc/docker-nginx/blob/master/CONTRIBUTING.md) for guidelines on how to best contribute to this project.
+
+## Commercial Support
+
+Commercial support for this project may be available. Please get in touch with [NGINX sales](https://www.nginx.com/contact-sales/) or check your contract details for more info!
diff --git a/entrypoint/10-listen-on-ipv6-by-default.sh b/entrypoint/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 00000000..b90bf0c9
--- /dev/null
+++ b/entrypoint/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/entrypoint/15-local-resolvers.envsh b/entrypoint/15-local-resolvers.envsh
new file mode 100755
index 00000000..e830ddac
--- /dev/null
+++ b/entrypoint/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/entrypoint/20-envsubst-on-templates.sh b/entrypoint/20-envsubst-on-templates.sh
new file mode 100755
index 00000000..3804165c
--- /dev/null
+++ b/entrypoint/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/entrypoint/30-tune-worker-processes.sh b/entrypoint/30-tune-worker-processes.sh
new file mode 100755
index 00000000..defb994f
--- /dev/null
+++ b/entrypoint/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/entrypoint/docker-entrypoint.sh b/entrypoint/docker-entrypoint.sh
new file mode 100755
index 00000000..8ea04f21
--- /dev/null
+++ b/entrypoint/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/generate-stackbrew-library.sh b/generate-stackbrew-library.sh
index 4d70a428..bfb45a8a 100755
--- a/generate-stackbrew-library.sh
+++ b/generate-stackbrew-library.sh
@@ -3,16 +3,15 @@ set -eu
declare -A aliases
aliases=(
- [mainline]='1 1.15 latest'
- [stable]='1.14'
+ [mainline]='1 1.27 latest'
+ [stable]='1.26'
)
self="$(basename "$BASH_SOURCE")"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
-base=stretch
+base=debian
-versions=( */ )
-versions=( "${versions[@]%/}" )
+versions=( mainline stable )
# get the most recent commit which modified any of "$@"
fileCommit() {
@@ -51,10 +50,11 @@ join() {
}
for version in "${versions[@]}"; do
+ debian_otel="debian-otel"
+ alpine_otel="alpine-otel"
commit="$(dirCommit "$version/$base")"
fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
- fullVersion="${fullVersion%[.-]*}"
versionAliases=( $fullVersion )
if [ "$version" != "$fullVersion" ]; then
@@ -62,39 +62,81 @@ for version in "${versions[@]}"; do
fi
versionAliases+=( ${aliases[$version]:-} )
+ debianVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk -F"[-:]" '$1 == "FROM debian" { print $2; exit }')"
+ debianAliases=( ${versionAliases[@]/%/-$debianVersion} )
+ debianAliases=( "${debianAliases[@]//latest-/}" )
+
echo
cat <<-EOE
- Tags: $(join ', ' "${versionAliases[@]}")
- Architectures: amd64, arm32v7, arm64v8, i386, ppc64le, s390x
+ Tags: $(join ', ' "${versionAliases[@]}"), $(join ', ' "${debianAliases[@]}")
+ Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x
GitCommit: $commit
Directory: $version/$base
EOE
- for variant in stretch-perl; do
+ for variant in debian-perl; do
commit="$(dirCommit "$version/$variant")"
variantAliases=( "${versionAliases[@]/%/-perl}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
+ variantAliases=( "${variantAliases[@]//latest-/}" )
+
+ echo
+ cat <<-EOE
+ Tags: $(join ', ' "${variantAliases[@]}")
+ Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x
+ GitCommit: $commit
+ Directory: $version/$variant
+ EOE
+ done
+
+ for variant in $debian_otel; do
+ commit="$(dirCommit "$version/$variant")"
+
+ variantAliases=( "${versionAliases[@]/%/-otel}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
+ variantAliases=( "${variantAliases[@]//latest-/}" )
+
+ echo
+ cat <<-EOE
+ Tags: $(join ', ' "${variantAliases[@]}")
+ Architectures: amd64, arm64v8
+ GitCommit: $commit
+ Directory: $version/$variant
+ EOE
+ done
+
+
+ commit="$(dirCommit "$version/alpine-slim")"
+ alpineVersion="$(git show "$commit":"$version/alpine-slim/Dockerfile" | awk -F: '$1 == "FROM alpine" { print $2; exit }')"
+
+ for variant in alpine alpine-perl alpine-slim; do
+ commit="$(dirCommit "$version/$variant")"
+
+ variantAliases=( "${versionAliases[@]/%/-$variant}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
variantAliases=( "${variantAliases[@]//latest-/}" )
echo
cat <<-EOE
Tags: $(join ', ' "${variantAliases[@]}")
- Architectures: amd64, arm32v7, arm64v8, i386, ppc64le, s390x
+ Architectures: arm64v8, arm32v6, arm32v7, ppc64le, s390x, i386, amd64, riscv64
GitCommit: $commit
Directory: $version/$variant
EOE
done
- for variant in alpine alpine-perl; do
+ for variant in $alpine_otel; do
commit="$(dirCommit "$version/$variant")"
variantAliases=( "${versionAliases[@]/%/-$variant}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
variantAliases=( "${variantAliases[@]//latest-/}" )
echo
cat <<-EOE
Tags: $(join ', ' "${variantAliases[@]}")
- Architectures: amd64, arm32v6, arm64v8, i386, ppc64le, s390x
+ Architectures: amd64, arm64v8
GitCommit: $commit
Directory: $version/$variant
EOE
diff --git a/mainline/alpine-otel/Dockerfile b/mainline/alpine-otel/Dockerfile
new file mode 100644
index 00000000..27238791
--- /dev/null
+++ b/mainline/alpine-otel/Dockerfile
@@ -0,0 +1,77 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.27.3-alpine
+
+ENV OTEL_VERSION 0.1.0
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ nginx-module-otel=${NGINX_VERSION}.${OTEL_VERSION}-r${PKG_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ cmake \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ xz \
+ re2-dev \
+ c-ares-dev \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz \
+ && PKGOSSCHECKSUM=\"1e546bd15d7bc68e1772ecb6a73e29ba108ee5554a28928e57af038a9e8fc4f5cd35708ce89ad1dfaac97d870e663d32ef41045611d30b20d38b46816e3ab535 *0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r 0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf 0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz \
+ && cd pkg-oss-0286c5190d972a49bffc9bf247885dd510ce8181 \
+ && cd alpine \
+ && make module-otel \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/mainline/alpine-perl/Dockerfile b/mainline/alpine-perl/Dockerfile
index efeebbf6..6f02b3cc 100644
--- a/mainline/alpine-perl/Dockerfile
+++ b/mainline/alpine-perl/Dockerfile
@@ -1,151 +1,72 @@
-FROM alpine:3.7
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.27.3-alpine
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.15.2
-
-RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
- && CONFIG="\
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --modules-path=/usr/lib/nginx/modules \
- --conf-path=/etc/nginx/nginx.conf \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --http-client-body-temp-path=/var/cache/nginx/client_temp \
- --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
- --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
- --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
- --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
- --user=nginx \
- --group=nginx \
- --with-http_ssl_module \
- --with-http_realip_module \
- --with-http_addition_module \
- --with-http_sub_module \
- --with-http_dav_module \
- --with-http_flv_module \
- --with-http_mp4_module \
- --with-http_gunzip_module \
- --with-http_gzip_static_module \
- --with-http_random_index_module \
- --with-http_secure_link_module \
- --with-http_stub_status_module \
- --with-http_auth_request_module \
- --with-http_xslt_module=dynamic \
- --with-http_image_filter_module=dynamic \
- --with-http_geoip_module=dynamic \
- --with-http_perl_module=dynamic \
- --with-threads \
- --with-stream \
- --with-stream_ssl_module \
- --with-stream_ssl_preread_module \
- --with-stream_realip_module \
- --with-stream_geoip_module=dynamic \
- --with-http_slice_module \
- --with-mail \
- --with-mail_ssl_module \
- --with-compat \
- --with-file-aio \
- --with-http_v2_module \
- " \
- && addgroup -S nginx \
- && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \
- && apk add --no-cache --virtual .build-deps \
- gcc \
- libc-dev \
- make \
- openssl-dev \
- pcre-dev \
- zlib-dev \
- linux-headers \
- curl \
- gnupg \
- libxslt-dev \
- gd-dev \
- geoip-dev \
- perl-dev \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \
- && export GNUPGHOME="$(mktemp -d)" \
- && found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $GPG_KEYS from $server"; \
- gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \
- gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \
- && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \
- && mkdir -p /usr/src \
- && tar -zxC /usr/src -f nginx.tar.gz \
- && rm nginx.tar.gz \
- && cd /usr/src/nginx-$NGINX_VERSION \
- && ./configure $CONFIG --with-debug \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && mv objs/nginx objs/nginx-debug \
- && mv objs/ngx_http_xslt_filter_module.so objs/ngx_http_xslt_filter_module-debug.so \
- && mv objs/ngx_http_image_filter_module.so objs/ngx_http_image_filter_module-debug.so \
- && mv objs/ngx_http_geoip_module.so objs/ngx_http_geoip_module-debug.so \
- && mv objs/ngx_http_perl_module.so objs/ngx_http_perl_module-debug.so \
- && mv objs/ngx_stream_geoip_module.so objs/ngx_stream_geoip_module-debug.so \
- && ./configure $CONFIG \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && make install \
- && rm -rf /etc/nginx/html/ \
- && mkdir /etc/nginx/conf.d/ \
- && mkdir -p /usr/share/nginx/html/ \
- && install -m644 html/index.html /usr/share/nginx/html/ \
- && install -m644 html/50x.html /usr/share/nginx/html/ \
- && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \
- && install -m755 objs/ngx_http_xslt_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_xslt_filter_module-debug.so \
- && install -m755 objs/ngx_http_image_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_image_filter_module-debug.so \
- && install -m755 objs/ngx_http_geoip_module-debug.so /usr/lib/nginx/modules/ngx_http_geoip_module-debug.so \
- && install -m755 objs/ngx_http_perl_module-debug.so /usr/lib/nginx/modules/ngx_http_perl_module-debug.so \
- && install -m755 objs/ngx_stream_geoip_module-debug.so /usr/lib/nginx/modules/ngx_stream_geoip_module-debug.so \
- && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \
- && strip /usr/sbin/nginx* \
- && strip /usr/lib/nginx/modules/*.so \
- && rm -rf /usr/src/nginx-$NGINX_VERSION \
- \
- # Bring in gettext so we can get `envsubst`, then throw
- # the rest away. To do this, we need to install `gettext`
- # then move `envsubst` out of the way so `gettext` can
- # be deleted completely, then move `envsubst` back.
- && apk add --no-cache --virtual .gettext gettext \
- && mv /usr/bin/envsubst /tmp/ \
- \
- && runDeps="$( \
- scanelf --needed --nobanner /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \
- | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
- | sort -u \
- | xargs -r apk info --installed \
- | sort -u \
- )" \
- && apk add --no-cache --virtual .nginx-rundeps $runDeps \
- && apk del .build-deps \
- && apk del .gettext \
- && mv /tmp/envsubst /usr/local/bin/ \
- \
- # Bring in tzdata so users could set the timezones through the environment
- # variables
- && apk add --no-cache tzdata \
- \
- # forward request and error logs to docker log collector
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-COPY nginx.conf /etc/nginx/nginx.conf
-COPY nginx.vh.default.conf /etc/nginx/conf.d/default.conf
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-perl=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ perl-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz \
+ && PKGOSSCHECKSUM=\"1e546bd15d7bc68e1772ecb6a73e29ba108ee5554a28928e57af038a9e8fc4f5cd35708ce89ad1dfaac97d870e663d32ef41045611d30b20d38b46816e3ab535 *0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r 0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf 0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz \
+ && cd pkg-oss-0286c5190d972a49bffc9bf247885dd510ce8181 \
+ && cd alpine \
+ && make module-perl \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/mainline/alpine-perl/nginx.vh.default.conf b/mainline/alpine-perl/nginx.vh.default.conf
deleted file mode 100644
index 299c622a..00000000
--- a/mainline/alpine-perl/nginx.vh.default.conf
+++ /dev/null
@@ -1,45 +0,0 @@
-server {
- listen 80;
- server_name localhost;
-
- #charset koi8-r;
- #access_log /var/log/nginx/host.access.log main;
-
- location / {
- root /usr/share/nginx/html;
- index index.html index.htm;
- }
-
- #error_page 404 /404.html;
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- # proxy the PHP scripts to Apache listening on 127.0.0.1:80
- #
- #location ~ \.php$ {
- # proxy_pass http://127.0.0.1;
- #}
-
- # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
- #
- #location ~ \.php$ {
- # root html;
- # fastcgi_pass 127.0.0.1:9000;
- # fastcgi_index index.php;
- # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
- # include fastcgi_params;
- #}
-
- # deny access to .htaccess files, if Apache's document root
- # concurs with nginx's one
- #
- #location ~ /\.ht {
- # deny all;
- #}
-}
-
diff --git a/mainline/alpine-slim/10-listen-on-ipv6-by-default.sh b/mainline/alpine-slim/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 00000000..b90bf0c9
--- /dev/null
+++ b/mainline/alpine-slim/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/mainline/alpine-slim/15-local-resolvers.envsh b/mainline/alpine-slim/15-local-resolvers.envsh
new file mode 100755
index 00000000..e830ddac
--- /dev/null
+++ b/mainline/alpine-slim/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/mainline/alpine-slim/20-envsubst-on-templates.sh b/mainline/alpine-slim/20-envsubst-on-templates.sh
new file mode 100755
index 00000000..3804165c
--- /dev/null
+++ b/mainline/alpine-slim/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/mainline/alpine-slim/30-tune-worker-processes.sh b/mainline/alpine-slim/30-tune-worker-processes.sh
new file mode 100755
index 00000000..defb994f
--- /dev/null
+++ b/mainline/alpine-slim/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/mainline/alpine-slim/Dockerfile b/mainline/alpine-slim/Dockerfile
new file mode 100644
index 00000000..58e01411
--- /dev/null
+++ b/mainline/alpine-slim/Dockerfile
@@ -0,0 +1,123 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM alpine:3.20
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION 1.27.3
+ENV PKG_RELEASE 1
+ENV DYNPKG_RELEASE 1
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && addgroup -g 101 -S nginx \
+ && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ set -x \
+ && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
+ && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
+ && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
+ echo "key verification succeeded!"; \
+ mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
+ else \
+ echo "key verification failed!"; \
+ exit 1; \
+ fi \
+ && apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz \
+ && PKGOSSCHECKSUM=\"1e546bd15d7bc68e1772ecb6a73e29ba108ee5554a28928e57af038a9e8fc4f5cd35708ce89ad1dfaac97d870e663d32ef41045611d30b20d38b46816e3ab535 *0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r 0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf 0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz \
+ && cd pkg-oss-0286c5190d972a49bffc9bf247885dd510ce8181 \
+ && cd alpine \
+ && make base \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Bring in gettext so we can get `envsubst`, then throw
+# the rest away. To do this, we need to install `gettext`
+# then move `envsubst` out of the way so `gettext` can
+# be deleted completely, then move `envsubst` back.
+ && apk add --no-cache --virtual .gettext gettext \
+ && mv /usr/bin/envsubst /tmp/ \
+ \
+ && runDeps="$( \
+ scanelf --needed --nobanner /tmp/envsubst \
+ | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
+ | sort -u \
+ | xargs -r apk info --installed \
+ | sort -u \
+ )" \
+ && apk add --no-cache $runDeps \
+ && apk del --no-network .gettext \
+ && mv /tmp/envsubst /usr/local/bin/ \
+# Bring in tzdata so users could set the timezones through the environment
+# variables
+ && apk add --no-cache tzdata \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/mainline/alpine-slim/docker-entrypoint.sh b/mainline/alpine-slim/docker-entrypoint.sh
new file mode 100755
index 00000000..8ea04f21
--- /dev/null
+++ b/mainline/alpine-slim/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/mainline/alpine/Dockerfile b/mainline/alpine/Dockerfile
index c1b42a5c..6c82f99a 100644
--- a/mainline/alpine/Dockerfile
+++ b/mainline/alpine/Dockerfile
@@ -1,146 +1,79 @@
-FROM alpine:3.7
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.27.3-alpine-slim
-LABEL maintainer="NGINX Docker Maintainers "
+ENV NJS_VERSION 0.8.8
+ENV NJS_RELEASE 1
-ENV NGINX_VERSION 1.15.2
-
-RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
- && CONFIG="\
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --modules-path=/usr/lib/nginx/modules \
- --conf-path=/etc/nginx/nginx.conf \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --http-client-body-temp-path=/var/cache/nginx/client_temp \
- --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
- --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
- --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
- --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
- --user=nginx \
- --group=nginx \
- --with-http_ssl_module \
- --with-http_realip_module \
- --with-http_addition_module \
- --with-http_sub_module \
- --with-http_dav_module \
- --with-http_flv_module \
- --with-http_mp4_module \
- --with-http_gunzip_module \
- --with-http_gzip_static_module \
- --with-http_random_index_module \
- --with-http_secure_link_module \
- --with-http_stub_status_module \
- --with-http_auth_request_module \
- --with-http_xslt_module=dynamic \
- --with-http_image_filter_module=dynamic \
- --with-http_geoip_module=dynamic \
- --with-threads \
- --with-stream \
- --with-stream_ssl_module \
- --with-stream_ssl_preread_module \
- --with-stream_realip_module \
- --with-stream_geoip_module=dynamic \
- --with-http_slice_module \
- --with-mail \
- --with-mail_ssl_module \
- --with-compat \
- --with-file-aio \
- --with-http_v2_module \
- " \
- && addgroup -S nginx \
- && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \
- && apk add --no-cache --virtual .build-deps \
- gcc \
- libc-dev \
- make \
- openssl-dev \
- pcre-dev \
- zlib-dev \
- linux-headers \
- curl \
- gnupg \
- libxslt-dev \
- gd-dev \
- geoip-dev \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \
- && export GNUPGHOME="$(mktemp -d)" \
- && found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $GPG_KEYS from $server"; \
- gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \
- gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \
- && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \
- && mkdir -p /usr/src \
- && tar -zxC /usr/src -f nginx.tar.gz \
- && rm nginx.tar.gz \
- && cd /usr/src/nginx-$NGINX_VERSION \
- && ./configure $CONFIG --with-debug \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && mv objs/nginx objs/nginx-debug \
- && mv objs/ngx_http_xslt_filter_module.so objs/ngx_http_xslt_filter_module-debug.so \
- && mv objs/ngx_http_image_filter_module.so objs/ngx_http_image_filter_module-debug.so \
- && mv objs/ngx_http_geoip_module.so objs/ngx_http_geoip_module-debug.so \
- && mv objs/ngx_stream_geoip_module.so objs/ngx_stream_geoip_module-debug.so \
- && ./configure $CONFIG \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && make install \
- && rm -rf /etc/nginx/html/ \
- && mkdir /etc/nginx/conf.d/ \
- && mkdir -p /usr/share/nginx/html/ \
- && install -m644 html/index.html /usr/share/nginx/html/ \
- && install -m644 html/50x.html /usr/share/nginx/html/ \
- && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \
- && install -m755 objs/ngx_http_xslt_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_xslt_filter_module-debug.so \
- && install -m755 objs/ngx_http_image_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_image_filter_module-debug.so \
- && install -m755 objs/ngx_http_geoip_module-debug.so /usr/lib/nginx/modules/ngx_http_geoip_module-debug.so \
- && install -m755 objs/ngx_stream_geoip_module-debug.so /usr/lib/nginx/modules/ngx_stream_geoip_module-debug.so \
- && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \
- && strip /usr/sbin/nginx* \
- && strip /usr/lib/nginx/modules/*.so \
- && rm -rf /usr/src/nginx-$NGINX_VERSION \
- \
- # Bring in gettext so we can get `envsubst`, then throw
- # the rest away. To do this, we need to install `gettext`
- # then move `envsubst` out of the way so `gettext` can
- # be deleted completely, then move `envsubst` back.
- && apk add --no-cache --virtual .gettext gettext \
- && mv /usr/bin/envsubst /tmp/ \
- \
- && runDeps="$( \
- scanelf --needed --nobanner --format '%n#p' /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \
- | tr ',' '\n' \
- | sort -u \
- | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
- )" \
- && apk add --no-cache --virtual .nginx-rundeps $runDeps \
- && apk del .build-deps \
- && apk del .gettext \
- && mv /tmp/envsubst /usr/local/bin/ \
- \
- # Bring in tzdata so users could set the timezones through the environment
- # variables
- && apk add --no-cache tzdata \
- \
- # forward request and error logs to docker log collector
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-COPY nginx.conf /etc/nginx/nginx.conf
-COPY nginx.vh.default.conf /etc/nginx/conf.d/default.conf
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ libxslt-dev \
+ gd-dev \
+ geoip-dev \
+ libedit-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz \
+ && PKGOSSCHECKSUM=\"1e546bd15d7bc68e1772ecb6a73e29ba108ee5554a28928e57af038a9e8fc4f5cd35708ce89ad1dfaac97d870e663d32ef41045611d30b20d38b46816e3ab535 *0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r 0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf 0286c5190d972a49bffc9bf247885dd510ce8181.tar.gz \
+ && cd pkg-oss-0286c5190d972a49bffc9bf247885dd510ce8181 \
+ && cd alpine \
+ && make module-geoip module-image-filter module-njs module-xslt \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Bring in curl and ca-certificates to make registering on DNS SD easier
+ && apk add --no-cache curl ca-certificates
diff --git a/mainline/alpine/nginx.conf b/mainline/alpine/nginx.conf
deleted file mode 100644
index e4bad8db..00000000
--- a/mainline/alpine/nginx.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-
-user nginx;
-worker_processes 1;
-
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
-
-
-events {
- worker_connections 1024;
-}
-
-
-http {
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /var/log/nginx/access.log main;
-
- sendfile on;
- #tcp_nopush on;
-
- keepalive_timeout 65;
-
- #gzip on;
-
- include /etc/nginx/conf.d/*.conf;
-}
diff --git a/mainline/alpine/nginx.vh.default.conf b/mainline/alpine/nginx.vh.default.conf
deleted file mode 100644
index 299c622a..00000000
--- a/mainline/alpine/nginx.vh.default.conf
+++ /dev/null
@@ -1,45 +0,0 @@
-server {
- listen 80;
- server_name localhost;
-
- #charset koi8-r;
- #access_log /var/log/nginx/host.access.log main;
-
- location / {
- root /usr/share/nginx/html;
- index index.html index.htm;
- }
-
- #error_page 404 /404.html;
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- # proxy the PHP scripts to Apache listening on 127.0.0.1:80
- #
- #location ~ \.php$ {
- # proxy_pass http://127.0.0.1;
- #}
-
- # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
- #
- #location ~ \.php$ {
- # root html;
- # fastcgi_pass 127.0.0.1:9000;
- # fastcgi_index index.php;
- # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
- # include fastcgi_params;
- #}
-
- # deny access to .htaccess files, if Apache's document root
- # concurs with nginx's one
- #
- #location ~ /\.ht {
- # deny all;
- #}
-}
-
diff --git a/mainline/debian-otel/Dockerfile b/mainline/debian-otel/Dockerfile
new file mode 100644
index 00000000..30b6c041
--- /dev/null
+++ b/mainline/debian-otel/Dockerfile
@@ -0,0 +1,100 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.27.3
+
+ENV OTEL_VERSION 0.1.0
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ nginx-module-otel=${NGINX_VERSION}+${OTEL_VERSION}-${PKG_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="0286c5190d972a49bffc9bf247885dd510ce8181" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="1e546bd15d7bc68e1772ecb6a73e29ba108ee5554a28928e57af038a9e8fc4f5cd35708ce89ad1dfaac97d870e663d32ef41045611d30b20d38b46816e3ab535 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in module-otel; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make module-otel \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/mainline/debian-perl/Dockerfile b/mainline/debian-perl/Dockerfile
new file mode 100644
index 00000000..d11af157
--- /dev/null
+++ b/mainline/debian-perl/Dockerfile
@@ -0,0 +1,98 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.27.3
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-perl=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="0286c5190d972a49bffc9bf247885dd510ce8181" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="1e546bd15d7bc68e1772ecb6a73e29ba108ee5554a28928e57af038a9e8fc4f5cd35708ce89ad1dfaac97d870e663d32ef41045611d30b20d38b46816e3ab535 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in module-perl; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make module-perl \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/mainline/debian/10-listen-on-ipv6-by-default.sh b/mainline/debian/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 00000000..b90bf0c9
--- /dev/null
+++ b/mainline/debian/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/mainline/debian/15-local-resolvers.envsh b/mainline/debian/15-local-resolvers.envsh
new file mode 100755
index 00000000..e830ddac
--- /dev/null
+++ b/mainline/debian/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/mainline/debian/20-envsubst-on-templates.sh b/mainline/debian/20-envsubst-on-templates.sh
new file mode 100755
index 00000000..3804165c
--- /dev/null
+++ b/mainline/debian/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/mainline/debian/30-tune-worker-processes.sh b/mainline/debian/30-tune-worker-processes.sh
new file mode 100755
index 00000000..defb994f
--- /dev/null
+++ b/mainline/debian/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/mainline/debian/Dockerfile b/mainline/debian/Dockerfile
new file mode 100644
index 00000000..c25a780d
--- /dev/null
+++ b/mainline/debian/Dockerfile
@@ -0,0 +1,145 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM debian:bookworm-slim
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION 1.27.3
+ENV NJS_VERSION 0.8.8
+ENV NJS_RELEASE 1~bookworm
+ENV PKG_RELEASE 1~bookworm
+ENV DYNPKG_RELEASE 1~bookworm
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && groupadd --system --gid 101 nginx \
+ && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
+ && \
+ NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ export GNUPGHOME="$(mktemp -d)"; \
+ found=''; \
+ for NGINX_GPGKEY in $NGINX_GPGKEYS; do \
+ for server in \
+ hkp://keyserver.ubuntu.com:80 \
+ pgp.mit.edu \
+ ; do \
+ echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
+ gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
+ done; \
+ test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
+ done; \
+ gpg1 --export "$NGINX_GPGKEYS" > "$NGINX_GPGKEY_PATH" ; \
+ rm -rf "$GNUPGHOME"; \
+ apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
+ && dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="0286c5190d972a49bffc9bf247885dd510ce8181" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="1e546bd15d7bc68e1772ecb6a73e29ba108ee5554a28928e57af038a9e8fc4f5cd35708ce89ad1dfaac97d870e663d32ef41045611d30b20d38b46816e3ab535 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in base module-geoip module-image-filter module-njs module-xslt; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make base module-geoip module-image-filter module-njs module-xslt \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/mainline/debian/docker-entrypoint.sh b/mainline/debian/docker-entrypoint.sh
new file mode 100755
index 00000000..8ea04f21
--- /dev/null
+++ b/mainline/debian/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/mainline/stretch-perl/Dockerfile b/mainline/stretch-perl/Dockerfile
deleted file mode 100644
index 7c34189d..00000000
--- a/mainline/stretch-perl/Dockerfile
+++ /dev/null
@@ -1,100 +0,0 @@
-FROM debian:stretch-slim
-
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.15.2-1~stretch
-ENV NJS_VERSION 1.15.2.0.2.2-1~stretch
-
-RUN set -x \
- && apt-get update \
- && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 apt-transport-https ca-certificates \
- && \
- NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
- found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
- apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
- apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
- && dpkgArch="$(dpkg --print-architecture)" \
- && nginxPackages=" \
- nginx=${NGINX_VERSION} \
- nginx-module-xslt=${NGINX_VERSION} \
- nginx-module-geoip=${NGINX_VERSION} \
- nginx-module-image-filter=${NGINX_VERSION} \
- nginx-module-perl=${NGINX_VERSION} \
- nginx-module-njs=${NJS_VERSION} \
- " \
- && case "$dpkgArch" in \
- amd64|i386) \
-# arches officialy built by upstream
- echo "deb https://nginx.org/packages/mainline/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- && apt-get update \
- ;; \
- *) \
-# we're on an architecture upstream doesn't officially build for
-# let's build binaries from the published source packages
- echo "deb-src https://nginx.org/packages/mainline/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- \
-# new directory for storing sources and .deb files
- && tempDir="$(mktemp -d)" \
- && chmod 777 "$tempDir" \
-# (777 to ensure APT's "_apt" user can access it too)
- \
-# save list of currently-installed packages so build dependencies can be cleanly removed later
- && savedAptMark="$(apt-mark showmanual)" \
- \
-# build .deb files from upstream's source packages (which are verified by apt-get)
- && apt-get update \
- && apt-get build-dep -y $nginxPackages \
- && ( \
- cd "$tempDir" \
- && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
- apt-get source --compile $nginxPackages \
- ) \
-# we don't remove APT lists here because they get re-downloaded and removed later
- \
-# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
-# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
- && apt-mark showmanual | xargs apt-mark auto > /dev/null \
- && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
- \
-# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
- && ls -lAFh "$tempDir" \
- && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
- && grep '^Package: ' "$tempDir/Packages" \
- && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
-# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
-# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
-# ...
-# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
- && apt-get -o Acquire::GzipIndexes=false update \
- ;; \
- esac \
- \
- && apt-get install --no-install-recommends --no-install-suggests -y \
- $nginxPackages \
- gettext-base \
- && apt-get remove --purge --auto-remove -y apt-transport-https ca-certificates && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
- \
-# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
- && if [ -n "$tempDir" ]; then \
- apt-get purge -y --auto-remove \
- && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
- fi
-
-# forward request and error logs to docker log collector
-RUN ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
diff --git a/mainline/stretch/Dockerfile b/mainline/stretch/Dockerfile
deleted file mode 100644
index a7b5ce89..00000000
--- a/mainline/stretch/Dockerfile
+++ /dev/null
@@ -1,99 +0,0 @@
-FROM debian:stretch-slim
-
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.15.2-1~stretch
-ENV NJS_VERSION 1.15.2.0.2.2-1~stretch
-
-RUN set -x \
- && apt-get update \
- && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 apt-transport-https ca-certificates \
- && \
- NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
- found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
- apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
- apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
- && dpkgArch="$(dpkg --print-architecture)" \
- && nginxPackages=" \
- nginx=${NGINX_VERSION} \
- nginx-module-xslt=${NGINX_VERSION} \
- nginx-module-geoip=${NGINX_VERSION} \
- nginx-module-image-filter=${NGINX_VERSION} \
- nginx-module-njs=${NJS_VERSION} \
- " \
- && case "$dpkgArch" in \
- amd64|i386) \
-# arches officialy built by upstream
- echo "deb https://nginx.org/packages/mainline/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- && apt-get update \
- ;; \
- *) \
-# we're on an architecture upstream doesn't officially build for
-# let's build binaries from the published source packages
- echo "deb-src https://nginx.org/packages/mainline/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- \
-# new directory for storing sources and .deb files
- && tempDir="$(mktemp -d)" \
- && chmod 777 "$tempDir" \
-# (777 to ensure APT's "_apt" user can access it too)
- \
-# save list of currently-installed packages so build dependencies can be cleanly removed later
- && savedAptMark="$(apt-mark showmanual)" \
- \
-# build .deb files from upstream's source packages (which are verified by apt-get)
- && apt-get update \
- && apt-get build-dep -y $nginxPackages \
- && ( \
- cd "$tempDir" \
- && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
- apt-get source --compile $nginxPackages \
- ) \
-# we don't remove APT lists here because they get re-downloaded and removed later
- \
-# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
-# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
- && apt-mark showmanual | xargs apt-mark auto > /dev/null \
- && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
- \
-# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
- && ls -lAFh "$tempDir" \
- && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
- && grep '^Package: ' "$tempDir/Packages" \
- && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
-# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
-# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
-# ...
-# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
- && apt-get -o Acquire::GzipIndexes=false update \
- ;; \
- esac \
- \
- && apt-get install --no-install-recommends --no-install-suggests -y \
- $nginxPackages \
- gettext-base \
- && apt-get remove --purge --auto-remove -y apt-transport-https ca-certificates && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
- \
-# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
- && if [ -n "$tempDir" ]; then \
- apt-get purge -y --auto-remove \
- && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
- fi
-
-# forward request and error logs to docker log collector
-RUN ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
diff --git a/modules/Dockerfile b/modules/Dockerfile
new file mode 100644
index 00000000..61573c17
--- /dev/null
+++ b/modules/Dockerfile
@@ -0,0 +1,79 @@
+ARG NGINX_FROM_IMAGE=nginx:mainline
+FROM ${NGINX_FROM_IMAGE} as builder
+
+ARG ENABLED_MODULES
+
+SHELL ["/bin/bash", "-exo", "pipefail", "-c"]
+
+RUN if [ "$ENABLED_MODULES" = "" ]; then \
+ echo "No additional modules enabled, exiting"; \
+ exit 1; \
+ fi
+
+COPY ./ /modules/
+
+RUN apt-get update \
+ && apt-get install -y --no-install-suggests --no-install-recommends \
+ patch make wget git devscripts debhelper dpkg-dev \
+ quilt lsb-release build-essential libxml2-utils xsltproc \
+ equivs git g++ libparse-recdescent-perl \
+ && XSLSCRIPT_SHA512="f7194c5198daeab9b3b0c3aebf006922c7df1d345d454bd8474489ff2eb6b4bf8e2ffe442489a45d1aab80da6ecebe0097759a1e12cc26b5f0613d05b7c09ffa *stdin" \
+ && wget -O /tmp/xslscript.pl https://raw.githubusercontent.com/nginx/xslscript/9204424259c343ca08a18a78915f40f28025e093/xslscript.pl \
+ && if [ "$(cat /tmp/xslscript.pl | openssl sha512 -r)" = "$XSLSCRIPT_SHA512" ]; then \
+ echo "XSLScript checksum verification succeeded!"; \
+ chmod +x /tmp/xslscript.pl; \
+ mv /tmp/xslscript.pl /usr/local/bin/; \
+ else \
+ echo "XSLScript checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && git clone -b ${NGINX_VERSION}-${PKG_RELEASE%%~*} https://github.com/nginx/pkg-oss/ \
+ && cd pkg-oss \
+ && mkdir /tmp/packages \
+ && for module in $ENABLED_MODULES; do \
+ echo "Building $module for nginx-$NGINX_VERSION"; \
+ if [ -d /modules/$module ]; then \
+ echo "Building $module from user-supplied sources"; \
+ # check if module sources file is there and not empty
+ if [ ! -s /modules/$module/source ]; then \
+ echo "No source file for $module in modules/$module/source, exiting"; \
+ exit 1; \
+ fi; \
+ # some modules require build dependencies
+ if [ -f /modules/$module/build-deps ]; then \
+ echo "Installing $module build dependencies"; \
+ apt-get update && apt-get install -y --no-install-suggests --no-install-recommends $(cat /modules/$module/build-deps | xargs); \
+ fi; \
+ # if a module has a build dependency that is not in a distro, provide a
+ # shell script to fetch/build/install those
+ # note that shared libraries produced as a result of this script will
+ # not be copied from the builder image to the main one so build static
+ if [ -x /modules/$module/prebuild ]; then \
+ echo "Running prebuild script for $module"; \
+ /modules/$module/prebuild; \
+ fi; \
+ /pkg-oss/build_module.sh -v $NGINX_VERSION -f -y -o /tmp/packages -n $module $(cat /modules/$module/source); \
+ BUILT_MODULES="$BUILT_MODULES $(echo $module | tr '[A-Z]' '[a-z]' | tr -d '[/_\-\.\t ]')"; \
+ elif make -C /pkg-oss/debian list | grep -P "^$module\s+\d" > /dev/null; then \
+ echo "Building $module from pkg-oss sources"; \
+ cd /pkg-oss/debian; \
+ make rules-module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" debuild-module-$module/nginx-$NGINX_VERSION/debian/control; \
+ make module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
+ find ../../ -maxdepth 1 -mindepth 1 -type f -name "*.deb" -exec mv -v {} /tmp/packages/ \;; \
+ BUILT_MODULES="$BUILT_MODULES $module"; \
+ else \
+ echo "Don't know how to build $module module, exiting"; \
+ exit 1; \
+ fi; \
+ done \
+ && echo "BUILT_MODULES=\"$BUILT_MODULES\"" > /tmp/packages/modules.env
+
+FROM ${NGINX_FROM_IMAGE}
+RUN --mount=type=bind,target=/tmp/packages/,source=/tmp/packages/,from=builder \
+ apt-get update \
+ && . /tmp/packages/modules.env \
+ && for module in $BUILT_MODULES; do \
+ apt-get install --no-install-suggests --no-install-recommends -y /tmp/packages/nginx-module-${module}_${NGINX_VERSION}*.deb; \
+ done \
+ && rm -rf /var/lib/apt/lists/
diff --git a/modules/Dockerfile.alpine b/modules/Dockerfile.alpine
new file mode 100644
index 00000000..9b305fff
--- /dev/null
+++ b/modules/Dockerfile.alpine
@@ -0,0 +1,69 @@
+ARG NGINX_FROM_IMAGE=nginx:mainline-alpine
+FROM ${NGINX_FROM_IMAGE} as builder
+
+ARG ENABLED_MODULES
+
+SHELL ["/bin/ash", "-exo", "pipefail", "-c"]
+
+RUN if [ "$ENABLED_MODULES" = "" ]; then \
+ echo "No additional modules enabled, exiting"; \
+ exit 1; \
+ fi
+
+COPY ./ /modules/
+
+RUN apk update \
+ && apk add linux-headers openssl-dev pcre2-dev zlib-dev openssl abuild \
+ musl-dev libxslt libxml2-utils make mercurial gcc unzip git \
+ xz g++ coreutils curl \
+ # allow abuild as a root user \
+ && printf "#!/bin/sh\\nSETFATTR=true /usr/bin/abuild -F \"\$@\"\\n" > /usr/local/bin/abuild \
+ && chmod +x /usr/local/bin/abuild \
+ && git clone -b ${NGINX_VERSION}-${PKG_RELEASE} https://github.com/nginx/pkg-oss/ \
+ && cd pkg-oss \
+ && mkdir /tmp/packages \
+ && for module in $ENABLED_MODULES; do \
+ echo "Building $module for nginx-$NGINX_VERSION"; \
+ if [ -d /modules/$module ]; then \
+ echo "Building $module from user-supplied sources"; \
+ # check if module sources file is there and not empty
+ if [ ! -s /modules/$module/source ]; then \
+ echo "No source file for $module in modules/$module/source, exiting"; \
+ exit 1; \
+ fi; \
+ # some modules require build dependencies
+ if [ -f /modules/$module/build-deps ]; then \
+ echo "Installing $module build dependencies"; \
+ apk update && apk add $(cat /modules/$module/build-deps | xargs); \
+ fi; \
+ # if a module has a build dependency that is not in a distro, provide a
+ # shell script to fetch/build/install those
+ # note that shared libraries produced as a result of this script will
+ # not be copied from the builder image to the main one so build static
+ if [ -x /modules/$module/prebuild ]; then \
+ echo "Running prebuild script for $module"; \
+ /modules/$module/prebuild; \
+ fi; \
+ /pkg-oss/build_module.sh -v $NGINX_VERSION -f -y -o /tmp/packages -n $module $(cat /modules/$module/source); \
+ BUILT_MODULES="$BUILT_MODULES $(echo $module | tr '[A-Z]' '[a-z]' | tr -d '[/_\-\.\t ]')"; \
+ elif make -C /pkg-oss/alpine list | grep -E "^$module\s+\d+" > /dev/null; then \
+ echo "Building $module from pkg-oss sources"; \
+ cd /pkg-oss/alpine; \
+ make abuild-module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
+ apk add $(. ./abuild-module-$module/APKBUILD; echo $makedepends;); \
+ make module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
+ find ~/packages -type f -name "*.apk" -exec mv -v {} /tmp/packages/ \;; \
+ BUILT_MODULES="$BUILT_MODULES $module"; \
+ else \
+ echo "Don't know how to build $module module, exiting"; \
+ exit 1; \
+ fi; \
+ done \
+ && echo "BUILT_MODULES=\"$BUILT_MODULES\"" > /tmp/packages/modules.env
+
+FROM ${NGINX_FROM_IMAGE}
+RUN --mount=type=bind,target=/tmp/packages/,source=/tmp/packages/,from=builder \
+ . /tmp/packages/modules.env \
+ && for module in $BUILT_MODULES; do \
+ apk add --no-cache --allow-untrusted /tmp/packages/nginx-module-${module}-${NGINX_VERSION}*.apk; \
+ done
diff --git a/modules/README.md b/modules/README.md
new file mode 100644
index 00000000..624b97cc
--- /dev/null
+++ b/modules/README.md
@@ -0,0 +1,182 @@
+# Adding third-party modules to nginx official image
+
+It's possible to extend a mainline image with third-party modules either from
+your own instuctions following a simple filesystem layout/syntax using
+`build_module.sh` helper script, or falling back to package sources from
+[pkg-oss](https://github.com/nginx/pkg-oss).
+
+## Requirements
+
+To use the Dockerfiles provided here,
+[Docker BuildKit](https://docs.docker.com/build/buildkit/) is required.
+This is enabled by default as of version 23.0; for earlier versions this can be
+enabled by setting the environment variable `DOCKER_BUILDKIT` to `1`.
+
+If you can not or do not want to use BuildKit, you can use a previous version
+of these files, see for example
+https://github.com/nginxinc/docker-nginx/tree/4bf0763f4977fff7e9648add59e0540088f3ca9f/modules.
+
+## Usage
+
+```
+$ docker build --build-arg ENABLED_MODULES="ndk lua" -t my-nginx-with-lua .
+```
+This command will attempt to build an image called `my-nginx-with-lua` based on
+official nginx docker hub image with two modules: `ndk` and `lua`.
+By default, a Debian-based image will be used. If you wish to use Alpine
+instead, add `-f Dockerfile.alpine` to the command line. By default, mainline
+images are used as a base, but it's possible to specify a different image by
+providing `NGINX_FROM_IMAGE` build argument, e.g. `--build-arg
+NGINX_FROM_IMAGE=nginx:stable`.
+
+The build script will look for module build definition files on filesystem
+directory under the same name as the module (and resulting package) and if
+those are not found will try to look up requested modules in the pkg-oss
+repository.
+
+For well-known modules we maintain a set of build sources packages over at
+`pkg-oss`, so it's probably a good idea to rely on those instead of providing
+your own implementation.
+
+As of the time of writing this README, the following modules and their versions
+are available from `pkg-oss` repository:
+
+```
+/pkg-oss $ LC_ALL=C make -C debian list-all-modules
+auth-spnego 1.1.1-1
+brotli 1.0.0-1
+encrypted-session 0.09-1
+fips-check 0.1-1
+geoip 1.25.5-1
+geoip2 3.4-1
+headers-more 0.35-1
+image-filter 1.25.5-1
+lua 0.10.26-1
+ndk 0.3.3-1
+njs 0.8.4-2
+opentracing 0.33.0-1
+otel 0.1.0-1
+passenger 6.0.19-1
+perl 1.25.5-1
+rtmp 1.2.2-1
+set-misc 0.33-1
+subs-filter 0.6.4-1
+xslt 1.25.5-1
+```
+
+If you still want to provide your own instructions for a specific module,
+organize the build directory in a following way, e.g. for `echo` module:
+
+```
+docker-nginx/modules $ tree echo
+echo
+├── build-deps
+├── prebuild
+└── source
+
+0 directories, 3 files
+```
+
+The scripts expect one file to always exist for a module you wish to build
+manually: `source`. It should contain a link to a zip/tarball source code of a
+module you want to build. In `build-deps` you can specify build dependencies
+for a module as found in Debian or Alpine repositories. `prebuild` is a shell
+script (make it `chmod +x prebuild`!) that will be executed prior to building
+the module but after installing the dependencies, so it can be used to install
+additional build dependencies if they are not available from Debian or Alpine.
+Keep in mind that those dependencies wont be automatically copied to the
+resulting image and if you're building a library, build it statically.
+
+Once the build is done in the builder image, the built packages are copied over
+to resulting image and installed via apt/apk. The resulting image will be
+tagged and can be used the same way as an official docker hub image.
+
+Note that we can not provide any support for those modifications and in no way
+guarantee they will work as nice as a build without third-party modules. If
+you encounter any issues running your image with the modules enabled, please
+reproduce with a vanilla image first.
+
+## Examples
+
+### docker-compose with pre-packaged modules
+
+If desired modules are already packaged in
+[pkg-oss](https://github.com/nginx/pkg-oss/) - e.g. `debian/Makefile.module-*`
+exists for a given module, you can use this example.
+
+1. Create a directory for your project:
+
+```
+mkdir myapp
+cd myapp
+````
+
+2. Populate the build context for a custom nginx image:
+
+```
+mkdir my-nginx
+curl -o my-nginx/Dockerfile https://raw.githubusercontent.com/nginxinc/docker-nginx/master/modules/Dockerfile
+```
+
+3. Create a `docker-compose.yml` file:
+
+```
+cat > docker-compose.yml << __EOF__
+version: "3.3"
+services:
+ web:
+ build:
+ context: ./my-nginx/
+ args:
+ ENABLED_MODULES: ndk lua
+ image: my-nginx-with-lua:v1
+ ports:
+ - "80:8000"
+__EOF__
+```
+
+Now, running `docker-compose up --build -d` will build the image and run the application for you.
+
+### docker-compose with a non-packaged module
+
+If a needed module is not available via `pkg-oss`, you can use this example.
+
+We're going to build the image with [ngx_cache_purge](https://github.com/FRiCKLE/ngx_cache_purge) module.
+
+The steps are similar to a previous example, with a notable difference of
+providing a URL to fetch the module source code from.
+
+1. Create a directory for your project:
+
+```
+mkdir myapp-cache
+cd myapp-cache
+````
+
+2. Populate the build context for a custom nginx image:
+
+```
+mkdir my-nginx
+curl -o my-nginx/Dockerfile https://raw.githubusercontent.com/nginxinc/docker-nginx/master/modules/Dockerfile
+mkdir my-nginx/cachepurge
+echo "https://github.com/FRiCKLE/ngx_cache_purge/archive/2.3.tar.gz" > my-nginx/cachepurge/source
+```
+
+3. Create a `docker-compose.yml` file:
+
+```
+cat > docker-compose.yml << __EOF__
+version: "3.3"
+services:
+ web:
+ build:
+ context: ./my-nginx/
+ args:
+ ENABLED_MODULES: cachepurge
+ image: my-nginx-with-cachepurge:v1
+ ports:
+ - "80:8080"
+__EOF__
+```
+
+Now, running `docker-compose up --build -d` will build the image and run the application for you.
diff --git a/modules/echo/build-deps b/modules/echo/build-deps
new file mode 100644
index 00000000..1ccfbc2f
--- /dev/null
+++ b/modules/echo/build-deps
@@ -0,0 +1 @@
+make gcc
diff --git a/modules/echo/prebuild b/modules/echo/prebuild
new file mode 100755
index 00000000..cd2864b0
--- /dev/null
+++ b/modules/echo/prebuild
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# if a module has a build dependency that is not in debian/alpine
+# use this script to fetch/build/install them
+#
+# note that shared libraries produced as a result of this script will
+# not be copied from the builder image to the resulting one, so you need to
+# build them statically
+
+echo "No prebuild stage required - all dependencies are satisfied already!"
+
+exit 0
diff --git a/modules/echo/source b/modules/echo/source
new file mode 100644
index 00000000..3a6ad274
--- /dev/null
+++ b/modules/echo/source
@@ -0,0 +1 @@
+https://github.com/openresty/echo-nginx-module/archive/v0.62.tar.gz
diff --git a/stable/alpine-otel/Dockerfile b/stable/alpine-otel/Dockerfile
new file mode 100644
index 00000000..ccfa9139
--- /dev/null
+++ b/stable/alpine-otel/Dockerfile
@@ -0,0 +1,77 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.26.2-alpine
+
+ENV OTEL_VERSION 0.1.0
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ nginx-module-otel=${NGINX_VERSION}.${OTEL_VERSION}-r${PKG_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ cmake \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ xz \
+ re2-dev \
+ c-ares-dev \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz \
+ && PKGOSSCHECKSUM=\"315e9e9040253396ebd9f540557e69cda7d9754a7895c3bf04fbf79d43be8d56e8efc6c22c21c87632039340080511179946456bbc4660e8faf171d130b475a6 *f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz \
+ && cd pkg-oss-f43e929dc7a6111ef5d9ecb281a75749f7934261 \
+ && cd alpine \
+ && make module-otel \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/stable/alpine-perl/Dockerfile b/stable/alpine-perl/Dockerfile
index f5a2d014..f64de368 100644
--- a/stable/alpine-perl/Dockerfile
+++ b/stable/alpine-perl/Dockerfile
@@ -1,151 +1,72 @@
-FROM alpine:3.7
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.26.2-alpine
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.14.0
-
-RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
- && CONFIG="\
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --modules-path=/usr/lib/nginx/modules \
- --conf-path=/etc/nginx/nginx.conf \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --http-client-body-temp-path=/var/cache/nginx/client_temp \
- --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
- --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
- --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
- --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
- --user=nginx \
- --group=nginx \
- --with-http_ssl_module \
- --with-http_realip_module \
- --with-http_addition_module \
- --with-http_sub_module \
- --with-http_dav_module \
- --with-http_flv_module \
- --with-http_mp4_module \
- --with-http_gunzip_module \
- --with-http_gzip_static_module \
- --with-http_random_index_module \
- --with-http_secure_link_module \
- --with-http_stub_status_module \
- --with-http_auth_request_module \
- --with-http_xslt_module=dynamic \
- --with-http_image_filter_module=dynamic \
- --with-http_geoip_module=dynamic \
- --with-http_perl_module=dynamic \
- --with-threads \
- --with-stream \
- --with-stream_ssl_module \
- --with-stream_ssl_preread_module \
- --with-stream_realip_module \
- --with-stream_geoip_module=dynamic \
- --with-http_slice_module \
- --with-mail \
- --with-mail_ssl_module \
- --with-compat \
- --with-file-aio \
- --with-http_v2_module \
- " \
- && addgroup -S nginx \
- && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \
- && apk add --no-cache --virtual .build-deps \
- gcc \
- libc-dev \
- make \
- openssl-dev \
- pcre-dev \
- zlib-dev \
- linux-headers \
- curl \
- gnupg \
- libxslt-dev \
- gd-dev \
- geoip-dev \
- perl-dev \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \
- && export GNUPGHOME="$(mktemp -d)" \
- && found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $GPG_KEYS from $server"; \
- gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \
- gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \
- && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \
- && mkdir -p /usr/src \
- && tar -zxC /usr/src -f nginx.tar.gz \
- && rm nginx.tar.gz \
- && cd /usr/src/nginx-$NGINX_VERSION \
- && ./configure $CONFIG --with-debug \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && mv objs/nginx objs/nginx-debug \
- && mv objs/ngx_http_xslt_filter_module.so objs/ngx_http_xslt_filter_module-debug.so \
- && mv objs/ngx_http_image_filter_module.so objs/ngx_http_image_filter_module-debug.so \
- && mv objs/ngx_http_geoip_module.so objs/ngx_http_geoip_module-debug.so \
- && mv objs/ngx_http_perl_module.so objs/ngx_http_perl_module-debug.so \
- && mv objs/ngx_stream_geoip_module.so objs/ngx_stream_geoip_module-debug.so \
- && ./configure $CONFIG \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && make install \
- && rm -rf /etc/nginx/html/ \
- && mkdir /etc/nginx/conf.d/ \
- && mkdir -p /usr/share/nginx/html/ \
- && install -m644 html/index.html /usr/share/nginx/html/ \
- && install -m644 html/50x.html /usr/share/nginx/html/ \
- && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \
- && install -m755 objs/ngx_http_xslt_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_xslt_filter_module-debug.so \
- && install -m755 objs/ngx_http_image_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_image_filter_module-debug.so \
- && install -m755 objs/ngx_http_geoip_module-debug.so /usr/lib/nginx/modules/ngx_http_geoip_module-debug.so \
- && install -m755 objs/ngx_http_perl_module-debug.so /usr/lib/nginx/modules/ngx_http_perl_module-debug.so \
- && install -m755 objs/ngx_stream_geoip_module-debug.so /usr/lib/nginx/modules/ngx_stream_geoip_module-debug.so \
- && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \
- && strip /usr/sbin/nginx* \
- && strip /usr/lib/nginx/modules/*.so \
- && rm -rf /usr/src/nginx-$NGINX_VERSION \
- \
- # Bring in gettext so we can get `envsubst`, then throw
- # the rest away. To do this, we need to install `gettext`
- # then move `envsubst` out of the way so `gettext` can
- # be deleted completely, then move `envsubst` back.
- && apk add --no-cache --virtual .gettext gettext \
- && mv /usr/bin/envsubst /tmp/ \
- \
- && runDeps="$( \
- scanelf --needed --nobanner /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \
- | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
- | sort -u \
- | xargs -r apk info --installed \
- | sort -u \
- )" \
- && apk add --no-cache --virtual .nginx-rundeps $runDeps \
- && apk del .build-deps \
- && apk del .gettext \
- && mv /tmp/envsubst /usr/local/bin/ \
- \
- # Bring in tzdata so users could set the timezones through the environment
- # variables
- && apk add --no-cache tzdata \
- \
- # forward request and error logs to docker log collector
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-COPY nginx.conf /etc/nginx/nginx.conf
-COPY nginx.vh.default.conf /etc/nginx/conf.d/default.conf
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-perl=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ perl-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz \
+ && PKGOSSCHECKSUM=\"315e9e9040253396ebd9f540557e69cda7d9754a7895c3bf04fbf79d43be8d56e8efc6c22c21c87632039340080511179946456bbc4660e8faf171d130b475a6 *f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz \
+ && cd pkg-oss-f43e929dc7a6111ef5d9ecb281a75749f7934261 \
+ && cd alpine \
+ && make module-perl \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/stable/alpine-perl/nginx.conf b/stable/alpine-perl/nginx.conf
deleted file mode 100644
index e4bad8db..00000000
--- a/stable/alpine-perl/nginx.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-
-user nginx;
-worker_processes 1;
-
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
-
-
-events {
- worker_connections 1024;
-}
-
-
-http {
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /var/log/nginx/access.log main;
-
- sendfile on;
- #tcp_nopush on;
-
- keepalive_timeout 65;
-
- #gzip on;
-
- include /etc/nginx/conf.d/*.conf;
-}
diff --git a/stable/alpine-perl/nginx.vh.default.conf b/stable/alpine-perl/nginx.vh.default.conf
deleted file mode 100644
index 299c622a..00000000
--- a/stable/alpine-perl/nginx.vh.default.conf
+++ /dev/null
@@ -1,45 +0,0 @@
-server {
- listen 80;
- server_name localhost;
-
- #charset koi8-r;
- #access_log /var/log/nginx/host.access.log main;
-
- location / {
- root /usr/share/nginx/html;
- index index.html index.htm;
- }
-
- #error_page 404 /404.html;
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- # proxy the PHP scripts to Apache listening on 127.0.0.1:80
- #
- #location ~ \.php$ {
- # proxy_pass http://127.0.0.1;
- #}
-
- # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
- #
- #location ~ \.php$ {
- # root html;
- # fastcgi_pass 127.0.0.1:9000;
- # fastcgi_index index.php;
- # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
- # include fastcgi_params;
- #}
-
- # deny access to .htaccess files, if Apache's document root
- # concurs with nginx's one
- #
- #location ~ /\.ht {
- # deny all;
- #}
-}
-
diff --git a/stable/alpine-slim/10-listen-on-ipv6-by-default.sh b/stable/alpine-slim/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 00000000..b90bf0c9
--- /dev/null
+++ b/stable/alpine-slim/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/stable/alpine-slim/15-local-resolvers.envsh b/stable/alpine-slim/15-local-resolvers.envsh
new file mode 100755
index 00000000..e830ddac
--- /dev/null
+++ b/stable/alpine-slim/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/stable/alpine-slim/20-envsubst-on-templates.sh b/stable/alpine-slim/20-envsubst-on-templates.sh
new file mode 100755
index 00000000..3804165c
--- /dev/null
+++ b/stable/alpine-slim/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/stable/alpine-slim/30-tune-worker-processes.sh b/stable/alpine-slim/30-tune-worker-processes.sh
new file mode 100755
index 00000000..defb994f
--- /dev/null
+++ b/stable/alpine-slim/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/stable/alpine-slim/Dockerfile b/stable/alpine-slim/Dockerfile
new file mode 100644
index 00000000..949cd4c1
--- /dev/null
+++ b/stable/alpine-slim/Dockerfile
@@ -0,0 +1,123 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM alpine:3.20
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION 1.26.2
+ENV PKG_RELEASE 1
+ENV DYNPKG_RELEASE 2
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && addgroup -g 101 -S nginx \
+ && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ set -x \
+ && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
+ && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
+ && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
+ echo "key verification succeeded!"; \
+ mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
+ else \
+ echo "key verification failed!"; \
+ exit 1; \
+ fi \
+ && apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz \
+ && PKGOSSCHECKSUM=\"315e9e9040253396ebd9f540557e69cda7d9754a7895c3bf04fbf79d43be8d56e8efc6c22c21c87632039340080511179946456bbc4660e8faf171d130b475a6 *f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz \
+ && cd pkg-oss-f43e929dc7a6111ef5d9ecb281a75749f7934261 \
+ && cd alpine \
+ && make base \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Bring in gettext so we can get `envsubst`, then throw
+# the rest away. To do this, we need to install `gettext`
+# then move `envsubst` out of the way so `gettext` can
+# be deleted completely, then move `envsubst` back.
+ && apk add --no-cache --virtual .gettext gettext \
+ && mv /usr/bin/envsubst /tmp/ \
+ \
+ && runDeps="$( \
+ scanelf --needed --nobanner /tmp/envsubst \
+ | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
+ | sort -u \
+ | xargs -r apk info --installed \
+ | sort -u \
+ )" \
+ && apk add --no-cache $runDeps \
+ && apk del --no-network .gettext \
+ && mv /tmp/envsubst /usr/local/bin/ \
+# Bring in tzdata so users could set the timezones through the environment
+# variables
+ && apk add --no-cache tzdata \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/stable/alpine-slim/docker-entrypoint.sh b/stable/alpine-slim/docker-entrypoint.sh
new file mode 100755
index 00000000..8ea04f21
--- /dev/null
+++ b/stable/alpine-slim/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/stable/alpine/Dockerfile b/stable/alpine/Dockerfile
index 83de7060..9a638a5d 100644
--- a/stable/alpine/Dockerfile
+++ b/stable/alpine/Dockerfile
@@ -1,146 +1,79 @@
-FROM alpine:3.7
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.26.2-alpine-slim
-LABEL maintainer="NGINX Docker Maintainers "
+ENV NJS_VERSION 0.8.8
+ENV NJS_RELEASE 1
-ENV NGINX_VERSION 1.14.0
-
-RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
- && CONFIG="\
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --modules-path=/usr/lib/nginx/modules \
- --conf-path=/etc/nginx/nginx.conf \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --http-client-body-temp-path=/var/cache/nginx/client_temp \
- --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
- --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
- --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
- --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
- --user=nginx \
- --group=nginx \
- --with-http_ssl_module \
- --with-http_realip_module \
- --with-http_addition_module \
- --with-http_sub_module \
- --with-http_dav_module \
- --with-http_flv_module \
- --with-http_mp4_module \
- --with-http_gunzip_module \
- --with-http_gzip_static_module \
- --with-http_random_index_module \
- --with-http_secure_link_module \
- --with-http_stub_status_module \
- --with-http_auth_request_module \
- --with-http_xslt_module=dynamic \
- --with-http_image_filter_module=dynamic \
- --with-http_geoip_module=dynamic \
- --with-threads \
- --with-stream \
- --with-stream_ssl_module \
- --with-stream_ssl_preread_module \
- --with-stream_realip_module \
- --with-stream_geoip_module=dynamic \
- --with-http_slice_module \
- --with-mail \
- --with-mail_ssl_module \
- --with-compat \
- --with-file-aio \
- --with-http_v2_module \
- " \
- && addgroup -S nginx \
- && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \
- && apk add --no-cache --virtual .build-deps \
- gcc \
- libc-dev \
- make \
- openssl-dev \
- pcre-dev \
- zlib-dev \
- linux-headers \
- curl \
- gnupg \
- libxslt-dev \
- gd-dev \
- geoip-dev \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \
- && export GNUPGHOME="$(mktemp -d)" \
- && found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $GPG_KEYS from $server"; \
- gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \
- gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \
- && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \
- && mkdir -p /usr/src \
- && tar -zxC /usr/src -f nginx.tar.gz \
- && rm nginx.tar.gz \
- && cd /usr/src/nginx-$NGINX_VERSION \
- && ./configure $CONFIG --with-debug \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && mv objs/nginx objs/nginx-debug \
- && mv objs/ngx_http_xslt_filter_module.so objs/ngx_http_xslt_filter_module-debug.so \
- && mv objs/ngx_http_image_filter_module.so objs/ngx_http_image_filter_module-debug.so \
- && mv objs/ngx_http_geoip_module.so objs/ngx_http_geoip_module-debug.so \
- && mv objs/ngx_stream_geoip_module.so objs/ngx_stream_geoip_module-debug.so \
- && ./configure $CONFIG \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && make install \
- && rm -rf /etc/nginx/html/ \
- && mkdir /etc/nginx/conf.d/ \
- && mkdir -p /usr/share/nginx/html/ \
- && install -m644 html/index.html /usr/share/nginx/html/ \
- && install -m644 html/50x.html /usr/share/nginx/html/ \
- && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \
- && install -m755 objs/ngx_http_xslt_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_xslt_filter_module-debug.so \
- && install -m755 objs/ngx_http_image_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_image_filter_module-debug.so \
- && install -m755 objs/ngx_http_geoip_module-debug.so /usr/lib/nginx/modules/ngx_http_geoip_module-debug.so \
- && install -m755 objs/ngx_stream_geoip_module-debug.so /usr/lib/nginx/modules/ngx_stream_geoip_module-debug.so \
- && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \
- && strip /usr/sbin/nginx* \
- && strip /usr/lib/nginx/modules/*.so \
- && rm -rf /usr/src/nginx-$NGINX_VERSION \
- \
- # Bring in gettext so we can get `envsubst`, then throw
- # the rest away. To do this, we need to install `gettext`
- # then move `envsubst` out of the way so `gettext` can
- # be deleted completely, then move `envsubst` back.
- && apk add --no-cache --virtual .gettext gettext \
- && mv /usr/bin/envsubst /tmp/ \
- \
- && runDeps="$( \
- scanelf --needed --nobanner --format '%n#p' /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \
- | tr ',' '\n' \
- | sort -u \
- | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
- )" \
- && apk add --no-cache --virtual .nginx-rundeps $runDeps \
- && apk del .build-deps \
- && apk del .gettext \
- && mv /tmp/envsubst /usr/local/bin/ \
- \
- # Bring in tzdata so users could set the timezones through the environment
- # variables
- && apk add --no-cache tzdata \
- \
- # forward request and error logs to docker log collector
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-COPY nginx.conf /etc/nginx/nginx.conf
-COPY nginx.vh.default.conf /etc/nginx/conf.d/default.conf
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ libxslt-dev \
+ gd-dev \
+ geoip-dev \
+ libedit-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz \
+ && PKGOSSCHECKSUM=\"315e9e9040253396ebd9f540557e69cda7d9754a7895c3bf04fbf79d43be8d56e8efc6c22c21c87632039340080511179946456bbc4660e8faf171d130b475a6 *f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf f43e929dc7a6111ef5d9ecb281a75749f7934261.tar.gz \
+ && cd pkg-oss-f43e929dc7a6111ef5d9ecb281a75749f7934261 \
+ && cd alpine \
+ && make module-geoip module-image-filter module-njs module-xslt \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Bring in curl and ca-certificates to make registering on DNS SD easier
+ && apk add --no-cache curl ca-certificates
diff --git a/stable/alpine/nginx.conf b/stable/alpine/nginx.conf
deleted file mode 100644
index e4bad8db..00000000
--- a/stable/alpine/nginx.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-
-user nginx;
-worker_processes 1;
-
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
-
-
-events {
- worker_connections 1024;
-}
-
-
-http {
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /var/log/nginx/access.log main;
-
- sendfile on;
- #tcp_nopush on;
-
- keepalive_timeout 65;
-
- #gzip on;
-
- include /etc/nginx/conf.d/*.conf;
-}
diff --git a/stable/alpine/nginx.vh.default.conf b/stable/alpine/nginx.vh.default.conf
deleted file mode 100644
index 299c622a..00000000
--- a/stable/alpine/nginx.vh.default.conf
+++ /dev/null
@@ -1,45 +0,0 @@
-server {
- listen 80;
- server_name localhost;
-
- #charset koi8-r;
- #access_log /var/log/nginx/host.access.log main;
-
- location / {
- root /usr/share/nginx/html;
- index index.html index.htm;
- }
-
- #error_page 404 /404.html;
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- # proxy the PHP scripts to Apache listening on 127.0.0.1:80
- #
- #location ~ \.php$ {
- # proxy_pass http://127.0.0.1;
- #}
-
- # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
- #
- #location ~ \.php$ {
- # root html;
- # fastcgi_pass 127.0.0.1:9000;
- # fastcgi_index index.php;
- # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
- # include fastcgi_params;
- #}
-
- # deny access to .htaccess files, if Apache's document root
- # concurs with nginx's one
- #
- #location ~ /\.ht {
- # deny all;
- #}
-}
-
diff --git a/stable/debian-otel/Dockerfile b/stable/debian-otel/Dockerfile
new file mode 100644
index 00000000..22565126
--- /dev/null
+++ b/stable/debian-otel/Dockerfile
@@ -0,0 +1,100 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.26.2
+
+ENV OTEL_VERSION 0.1.0
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ nginx-module-otel=${NGINX_VERSION}+${OTEL_VERSION}-${PKG_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="f43e929dc7a6111ef5d9ecb281a75749f7934261" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="315e9e9040253396ebd9f540557e69cda7d9754a7895c3bf04fbf79d43be8d56e8efc6c22c21c87632039340080511179946456bbc4660e8faf171d130b475a6 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in module-otel; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make module-otel \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/stable/debian-perl/Dockerfile b/stable/debian-perl/Dockerfile
new file mode 100644
index 00000000..a8d61162
--- /dev/null
+++ b/stable/debian-perl/Dockerfile
@@ -0,0 +1,98 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.26.2
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-perl=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="f43e929dc7a6111ef5d9ecb281a75749f7934261" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="315e9e9040253396ebd9f540557e69cda7d9754a7895c3bf04fbf79d43be8d56e8efc6c22c21c87632039340080511179946456bbc4660e8faf171d130b475a6 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in module-perl; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make module-perl \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/stable/debian/10-listen-on-ipv6-by-default.sh b/stable/debian/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 00000000..b90bf0c9
--- /dev/null
+++ b/stable/debian/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/stable/debian/15-local-resolvers.envsh b/stable/debian/15-local-resolvers.envsh
new file mode 100755
index 00000000..e830ddac
--- /dev/null
+++ b/stable/debian/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/stable/debian/20-envsubst-on-templates.sh b/stable/debian/20-envsubst-on-templates.sh
new file mode 100755
index 00000000..3804165c
--- /dev/null
+++ b/stable/debian/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/stable/debian/30-tune-worker-processes.sh b/stable/debian/30-tune-worker-processes.sh
new file mode 100755
index 00000000..defb994f
--- /dev/null
+++ b/stable/debian/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/stable/debian/Dockerfile b/stable/debian/Dockerfile
new file mode 100644
index 00000000..416d8338
--- /dev/null
+++ b/stable/debian/Dockerfile
@@ -0,0 +1,145 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM debian:bookworm-slim
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION 1.26.2
+ENV NJS_VERSION 0.8.8
+ENV NJS_RELEASE 1~bookworm
+ENV PKG_RELEASE 1~bookworm
+ENV DYNPKG_RELEASE 2~bookworm
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && groupadd --system --gid 101 nginx \
+ && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
+ && \
+ NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ export GNUPGHOME="$(mktemp -d)"; \
+ found=''; \
+ for NGINX_GPGKEY in $NGINX_GPGKEYS; do \
+ for server in \
+ hkp://keyserver.ubuntu.com:80 \
+ pgp.mit.edu \
+ ; do \
+ echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
+ gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
+ done; \
+ test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
+ done; \
+ gpg1 --export "$NGINX_GPGKEYS" > "$NGINX_GPGKEY_PATH" ; \
+ rm -rf "$GNUPGHOME"; \
+ apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
+ && dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="f43e929dc7a6111ef5d9ecb281a75749f7934261" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="315e9e9040253396ebd9f540557e69cda7d9754a7895c3bf04fbf79d43be8d56e8efc6c22c21c87632039340080511179946456bbc4660e8faf171d130b475a6 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in base module-geoip module-image-filter module-njs module-xslt; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make base module-geoip module-image-filter module-njs module-xslt \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/stable/debian/docker-entrypoint.sh b/stable/debian/docker-entrypoint.sh
new file mode 100755
index 00000000..8ea04f21
--- /dev/null
+++ b/stable/debian/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/stable/stretch-perl/Dockerfile b/stable/stretch-perl/Dockerfile
deleted file mode 100644
index 2e262ece..00000000
--- a/stable/stretch-perl/Dockerfile
+++ /dev/null
@@ -1,100 +0,0 @@
-FROM debian:stretch-slim
-
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.14.0-1~stretch
-ENV NJS_VERSION 1.14.0.0.2.0-1~stretch
-
-RUN set -x \
- && apt-get update \
- && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 apt-transport-https ca-certificates \
- && \
- NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
- found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
- apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
- apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
- && dpkgArch="$(dpkg --print-architecture)" \
- && nginxPackages=" \
- nginx=${NGINX_VERSION} \
- nginx-module-xslt=${NGINX_VERSION} \
- nginx-module-geoip=${NGINX_VERSION} \
- nginx-module-image-filter=${NGINX_VERSION} \
- nginx-module-perl=${NGINX_VERSION} \
- nginx-module-njs=${NJS_VERSION} \
- " \
- && case "$dpkgArch" in \
- amd64|i386) \
-# arches officialy built by upstream
- echo "deb https://nginx.org/packages/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- && apt-get update \
- ;; \
- *) \
-# we're on an architecture upstream doesn't officially build for
-# let's build binaries from the published source packages
- echo "deb-src https://nginx.org/packages/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- \
-# new directory for storing sources and .deb files
- && tempDir="$(mktemp -d)" \
- && chmod 777 "$tempDir" \
-# (777 to ensure APT's "_apt" user can access it too)
- \
-# save list of currently-installed packages so build dependencies can be cleanly removed later
- && savedAptMark="$(apt-mark showmanual)" \
- \
-# build .deb files from upstream's source packages (which are verified by apt-get)
- && apt-get update \
- && apt-get build-dep -y $nginxPackages \
- && ( \
- cd "$tempDir" \
- && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
- apt-get source --compile $nginxPackages \
- ) \
-# we don't remove APT lists here because they get re-downloaded and removed later
- \
-# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
-# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
- && apt-mark showmanual | xargs apt-mark auto > /dev/null \
- && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
- \
-# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
- && ls -lAFh "$tempDir" \
- && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
- && grep '^Package: ' "$tempDir/Packages" \
- && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
-# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
-# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
-# ...
-# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
- && apt-get -o Acquire::GzipIndexes=false update \
- ;; \
- esac \
- \
- && apt-get install --no-install-recommends --no-install-suggests -y \
- $nginxPackages \
- gettext-base \
- && apt-get remove --purge --auto-remove -y apt-transport-https ca-certificates && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
- \
-# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
- && if [ -n "$tempDir" ]; then \
- apt-get purge -y --auto-remove \
- && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
- fi
-
-# forward request and error logs to docker log collector
-RUN ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
diff --git a/stable/stretch/Dockerfile b/stable/stretch/Dockerfile
deleted file mode 100644
index a436cf47..00000000
--- a/stable/stretch/Dockerfile
+++ /dev/null
@@ -1,99 +0,0 @@
-FROM debian:stretch-slim
-
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.14.0-1~stretch
-ENV NJS_VERSION 1.14.0.0.2.0-1~stretch
-
-RUN set -x \
- && apt-get update \
- && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 apt-transport-https ca-certificates \
- && \
- NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
- found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
- apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
- apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
- && dpkgArch="$(dpkg --print-architecture)" \
- && nginxPackages=" \
- nginx=${NGINX_VERSION} \
- nginx-module-xslt=${NGINX_VERSION} \
- nginx-module-geoip=${NGINX_VERSION} \
- nginx-module-image-filter=${NGINX_VERSION} \
- nginx-module-njs=${NJS_VERSION} \
- " \
- && case "$dpkgArch" in \
- amd64|i386) \
-# arches officialy built by upstream
- echo "deb https://nginx.org/packages/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- && apt-get update \
- ;; \
- *) \
-# we're on an architecture upstream doesn't officially build for
-# let's build binaries from the published source packages
- echo "deb-src https://nginx.org/packages/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- \
-# new directory for storing sources and .deb files
- && tempDir="$(mktemp -d)" \
- && chmod 777 "$tempDir" \
-# (777 to ensure APT's "_apt" user can access it too)
- \
-# save list of currently-installed packages so build dependencies can be cleanly removed later
- && savedAptMark="$(apt-mark showmanual)" \
- \
-# build .deb files from upstream's source packages (which are verified by apt-get)
- && apt-get update \
- && apt-get build-dep -y $nginxPackages \
- && ( \
- cd "$tempDir" \
- && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
- apt-get source --compile $nginxPackages \
- ) \
-# we don't remove APT lists here because they get re-downloaded and removed later
- \
-# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
-# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
- && apt-mark showmanual | xargs apt-mark auto > /dev/null \
- && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
- \
-# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
- && ls -lAFh "$tempDir" \
- && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
- && grep '^Package: ' "$tempDir/Packages" \
- && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
-# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
-# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
-# ...
-# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
- && apt-get -o Acquire::GzipIndexes=false update \
- ;; \
- esac \
- \
- && apt-get install --no-install-recommends --no-install-suggests -y \
- $nginxPackages \
- gettext-base \
- && apt-get remove --purge --auto-remove -y apt-transport-https ca-certificates && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
- \
-# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
- && if [ -n "$tempDir" ]; then \
- apt-get purge -y --auto-remove \
- && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
- fi
-
-# forward request and error logs to docker log collector
-RUN ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
diff --git a/sync-awsecr.sh b/sync-awsecr.sh
new file mode 100755
index 00000000..547c83ff
--- /dev/null
+++ b/sync-awsecr.sh
@@ -0,0 +1,171 @@
+#!/bin/bash
+set -eu
+
+image="nginx"
+registry="public.ecr.aws/z9d2n7e1"
+
+declare -A aliases
+aliases=(
+ [mainline]='1 1.27 latest'
+ [stable]='1.26'
+)
+
+architectures=( amd64 arm64v8 )
+
+self="$(basename "$BASH_SOURCE")"
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+base=debian
+
+versions=( mainline stable )
+
+pulllist=()
+declare -A taglist
+taglist=()
+
+# get the most recent commit which modified any of "$@"
+fileCommit() {
+ git log -1 --format='format:%H' HEAD -- "$@"
+}
+
+# get the most recent commit which modified "$1/Dockerfile" or any file COPY'd from "$1/Dockerfile"
+dirCommit() {
+ local dir="$1"; shift
+ (
+ cd "$dir"
+ fileCommit \
+ Dockerfile \
+ $(git show HEAD:./Dockerfile | awk '
+ toupper($1) == "COPY" {
+ for (i = 2; i < NF; i++) {
+ print $i
+ }
+ }
+ ')
+ )
+}
+
+# prints "$2$1$3$1...$N"
+join() {
+ local sep="$1"; shift
+ local out; printf -v out "${sep//%/%%}%s" "$@"
+ echo "${out#$sep}"
+}
+
+for version in "${versions[@]}"; do
+ commit="$(dirCommit "$version/$base")"
+ fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
+ pulllist+=( "$image:$fullVersion" )
+ for variant in perl alpine alpine-perl alpine-slim; do
+ pulllist+=( "$image:$fullVersion-$variant" )
+ done
+done
+
+for version in "${versions[@]}"; do
+ commit="$(dirCommit "$version/$base")"
+
+ fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
+
+ versionAliases=( $fullVersion )
+ if [ "$version" != "$fullVersion" ]; then
+ versionAliases+=( $version )
+ fi
+ versionAliases+=( ${aliases[$version]:-} )
+
+ debianVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk -F"[-:]" '$1 == "FROM debian" { print $2; exit }')"
+ debianAliases=( ${versionAliases[@]/%/-$debianVersion} )
+ debianAliases=( "${debianAliases[@]//latest-/}" )
+
+ for tag in ${versionAliases[@]:1} ${debianAliases[@]:1}; do
+ taglist["$image:$tag"]="$image:$fullVersion"
+ done
+
+ for variant in debian-perl; do
+ variantAliases=( "${versionAliases[@]/%/-perl}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
+ variantAliases=( "${variantAliases[@]//latest-/}" )
+
+ for tag in ${variantAliases[@]}; do
+ if [ "$tag" != "${fullVersion}-perl" ]; then
+ taglist["$image:$tag"]="$image:$fullVersion-perl"
+ fi
+ done
+ done
+
+ commit="$(dirCommit "$version/alpine-slim")"
+ alpineVersion="$(git show "$commit":"$version/alpine-slim/Dockerfile" | awk -F: '$1 == "FROM alpine" { print $2; exit }')"
+
+ for variant in alpine alpine-perl alpine-slim; do
+ commit="$(dirCommit "$version/$variant")"
+
+ variantAliases=( "${versionAliases[@]/%/-$variant}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
+ variantAliases=( "${variantAliases[@]//latest-/}" )
+
+ for tag in ${variantAliases[@]}; do
+ if [ "$tag" != "${fullVersion}-$variant" ]; then
+ taglist["$image:$tag"]="$image:${fullVersion}-$variant"
+ fi
+ done
+ done
+
+done
+
+echo "#!/bin/sh"
+echo "set -ex"
+echo
+echo "export DOCKER_CLI_EXPERIMENTAL=enabled"
+echo
+echo "# pulling stuff"
+for arch in ${architectures[@]}; do
+ case $arch in
+ arm64v8)
+ parch="aarch64"
+ ;;
+ *)
+ parch=$arch
+ ;;
+ esac
+for tag in ${pulllist[@]}; do
+ echo "docker pull --platform linux/$parch $arch/$tag";
+done
+done
+
+echo
+
+echo "# tagging stuff"
+
+for arch in ${architectures[@]}; do
+for tag in ${pulllist[@]}; do
+ echo "docker tag $arch/$tag $registry/$tag-$arch"
+done
+for tag in ${!taglist[@]}; do
+ echo "docker tag $arch/${taglist[$tag]} $registry/$tag-$arch"
+done
+done
+
+echo "# pushing stuff"
+
+for arch in ${architectures[@]}; do
+for tag in ${pulllist[@]}; do
+ echo "docker push $registry/$tag-$arch"
+done
+for tag in ${!taglist[@]}; do
+ echo "docker push $registry/$tag-$arch"
+done
+done
+
+echo
+echo "# manifesting stuff"
+for tag in ${pulllist[@]} ${!taglist[@]}; do
+ string="docker manifest create --amend $registry/$tag"
+ for arch in ${architectures[@]}; do
+ string+=" $registry/$tag-$arch"
+ done
+ echo $string
+done
+
+echo
+echo "# pushing manifests"
+for tag in ${pulllist[@]} ${!taglist[@]}; do
+ echo "docker manifest push --purge $registry/$tag"
+done
diff --git a/update.sh b/update.sh
new file mode 100755
index 00000000..bf01741f
--- /dev/null
+++ b/update.sh
@@ -0,0 +1,264 @@
+#!/usr/bin/env bash
+set -Eeuo pipefail
+shopt -s nullglob
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+declare branches=(
+ "stable"
+ "mainline"
+)
+
+# Current nginx versions
+# Remember to update pkgosschecksum when changing this.
+declare -A nginx=(
+ [mainline]='1.27.3'
+ [stable]='1.26.2'
+)
+
+# Current njs versions
+declare -A njs=(
+ [mainline]='0.8.8'
+ [stable]='0.8.8'
+)
+
+# Current njs patchlevel version
+# Remember to update pkgosschecksum when changing this.
+declare -A njspkg=(
+ [mainline]='1'
+ [stable]='1'
+)
+
+# Current otel versions
+declare -A otel=(
+ [mainline]='0.1.0'
+ [stable]='0.1.0'
+)
+
+# Current nginx package patchlevel version
+# Remember to update pkgosschecksum when changing this.
+declare -A pkg=(
+ [mainline]=1
+ [stable]=1
+)
+
+# Current built-in dynamic modules package patchlevel version
+# Remember to update pkgosschecksum when changing this
+declare -A dynpkg=(
+ [mainline]=1
+ [stable]=2
+)
+
+declare -A debian=(
+ [mainline]='bookworm'
+ [stable]='bookworm'
+)
+
+declare -A alpine=(
+ [mainline]='3.20'
+ [stable]='3.20'
+)
+
+# When we bump njs version in a stable release we don't move the tag in the
+# pkg-oss repo. This setting allows us to specify a revision to check out
+# when building packages on architectures not supported by nginx.org
+# Remember to update pkgosschecksum when changing this.
+declare -A rev=(
+ [mainline]='0286c5190d972a49bffc9bf247885dd510ce8181'
+ [stable]='f43e929dc7a6111ef5d9ecb281a75749f7934261'
+)
+
+# Holds SHA512 checksum for the pkg-oss tarball produced by source code
+# revision/tag in the previous block
+# Used in builds for architectures not packaged by nginx.org
+declare -A pkgosschecksum=(
+ [mainline]='1e546bd15d7bc68e1772ecb6a73e29ba108ee5554a28928e57af038a9e8fc4f5cd35708ce89ad1dfaac97d870e663d32ef41045611d30b20d38b46816e3ab535'
+ [stable]='315e9e9040253396ebd9f540557e69cda7d9754a7895c3bf04fbf79d43be8d56e8efc6c22c21c87632039340080511179946456bbc4660e8faf171d130b475a6'
+)
+
+get_packages() {
+ local distro="$1"
+ shift
+ local branch="$1"
+ shift
+ local bn=""
+ local otel=
+ local perl=
+ local r=
+ local sep=
+
+ case "$distro:$branch" in
+ alpine*:*)
+ r="r"
+ sep="."
+ ;;
+ debian*:*)
+ sep="+"
+ ;;
+ esac
+
+ case "$distro" in
+ *-perl)
+ perl="nginx-module-perl"
+ ;;
+ *-otel)
+ otel="nginx-module-otel"
+ bn="\n"
+ ;;
+ esac
+
+ echo -n ' \\\n'
+ case "$distro" in
+ *-slim)
+ for p in nginx; do
+ echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${PKG_RELEASE} \\'
+ done
+ ;;
+ *)
+ for p in nginx; do
+ echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${PKG_RELEASE} \\\n'
+ done
+ for p in nginx-module-xslt nginx-module-geoip nginx-module-image-filter $perl; do
+ echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${DYNPKG_RELEASE} \\\n'
+ done
+ for p in nginx-module-njs; do
+ echo -n ' '"$p"'=${NGINX_VERSION}'"$sep"'${NJS_VERSION}-'"$r"'${NJS_RELEASE} \\'"$bn"
+ done
+ for p in $otel; do
+ echo -n ' '"$p"'=${NGINX_VERSION}'"$sep"'${OTEL_VERSION}-'"$r"'${PKG_RELEASE} \\'
+ done
+ ;;
+ esac
+}
+
+get_packagerepo() {
+ local distro="$1"
+ shift
+ distro="${distro%-perl}"
+ distro="${distro%-otel}"
+ distro="${distro%-slim}"
+ local branch="$1"
+ shift
+
+ [ "$branch" = "mainline" ] && branch="$branch/" || branch=""
+
+ echo "https://nginx.org/packages/${branch}${distro}/"
+}
+
+get_packagever() {
+ local distro="$1"
+ shift
+ distro="${distro%-perl}"
+ distro="${distro%-otel}"
+ distro="${distro%-slim}"
+ local branch="$1"
+ shift
+ local package="$1"
+ shift
+ local suffix=
+
+ [ "${distro}" = "debian" ] && suffix="~${debianver}"
+
+ case "${package}" in
+ "njs")
+ echo ${njspkg[$branch]}${suffix}
+ ;;
+ "dyn")
+ echo ${dynpkg[$branch]}${suffix}
+ ;;
+ *)
+ echo ${pkg[$branch]}${suffix}
+ ;;
+ esac
+}
+
+get_buildtarget() {
+ local distro="$1"
+ shift
+ case "$distro" in
+ alpine-slim)
+ echo base
+ ;;
+ alpine)
+ echo module-geoip module-image-filter module-njs module-xslt
+ ;;
+ debian)
+ echo base module-geoip module-image-filter module-njs module-xslt
+ ;;
+ *-perl)
+ echo module-perl
+ ;;
+ *-otel)
+ echo module-otel
+ ;;
+ esac
+}
+
+generated_warning() {
+ cat <<__EOF__
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+__EOF__
+}
+
+for branch in "${branches[@]}"; do
+ for variant in \
+ alpine{,-perl,-otel,-slim} \
+ debian{,-perl,-otel}; do
+ echo "$branch: $variant dockerfiles"
+ dir="$branch/$variant"
+ variant="$(basename "$variant")"
+
+ [ -d "$dir" ] || continue
+
+ template="Dockerfile-${variant}.template"
+ {
+ generated_warning
+ cat "$template"
+ } >"$dir/Dockerfile"
+
+ debianver="${debian[$branch]}"
+ alpinever="${alpine[$branch]}"
+ nginxver="${nginx[$branch]}"
+ njsver="${njs[${branch}]}"
+ otelver="${otel[${branch}]}"
+ revver="${rev[${branch}]}"
+ pkgosschecksumver="${pkgosschecksum[${branch}]}"
+
+ packagerepo=$(get_packagerepo "$variant" "$branch")
+ packages=$(get_packages "$variant" "$branch")
+ packagever=$(get_packagever "$variant" "$branch" "any")
+ njspkgver=$(get_packagever "$variant" "$branch" "njs")
+ dynpkgver=$(get_packagever "$variant" "$branch" "dyn")
+ buildtarget=$(get_buildtarget "$variant")
+
+ sed -i.bak \
+ -e 's,%%ALPINE_VERSION%%,'"$alpinever"',' \
+ -e 's,%%DEBIAN_VERSION%%,'"$debianver"',' \
+ -e 's,%%DYNPKG_RELEASE%%,'"$dynpkgver"',' \
+ -e 's,%%NGINX_VERSION%%,'"$nginxver"',' \
+ -e 's,%%NJS_VERSION%%,'"$njsver"',' \
+ -e 's,%%NJS_RELEASE%%,'"$njspkgver"',' \
+ -e 's,%%OTEL_VERSION%%,'"$otelver"',' \
+ -e 's,%%PKG_RELEASE%%,'"$packagever"',' \
+ -e 's,%%PACKAGES%%,'"$packages"',' \
+ -e 's,%%PACKAGEREPO%%,'"$packagerepo"',' \
+ -e 's,%%REVISION%%,'"$revver"',' \
+ -e 's,%%PKGOSSCHECKSUM%%,'"$pkgosschecksumver"',' \
+ -e 's,%%BUILDTARGET%%,'"$buildtarget"',' \
+ "$dir/Dockerfile"
+
+ done
+
+ for variant in \
+ alpine-slim \
+ debian; do \
+ echo "$branch: $variant entrypoint scripts"
+ dir="$branch/$variant"
+ cp -a entrypoint/*.sh "$dir/"
+ cp -a entrypoint/*.envsh "$dir/"
+ done
+done