diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 000000000..d8b19f81d
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,6 @@
+#####################
+# Main global owner #
+#####################
+
+* @nginx/syseng
+
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..aa0fb9e13
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,62 @@
+---
+name: 🐛 Bug report
+description: Create a report to help us improve
+labels: bug
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for taking the time to fill out this bug report!
+
+ Before you continue filling out this report, please take a moment to check that your bug has not been [already reported on GitHub][issue search] 🙌
+
+ Remember to redact any sensitive information such as authentication credentials and/or license keys!
+
+ [issue search]: ../search?q=is%3Aissue&type=issues
+
+ - type: textarea
+ id: overview
+ attributes:
+ label: Bug Overview
+ description: A clear and concise overview of the bug.
+ placeholder: When I do "X" with the NGINX Docker image, "Y" happens instead of "Z".
+ validations:
+ required: true
+
+ - type: textarea
+ id: behavior
+ attributes:
+ label: Expected Behavior
+ description: A clear and concise description of what you expected to happen.
+ placeholder: When I do "X" with the NGINX Docker image, I expect "Z" to happen.
+ validations:
+ required: true
+
+ - type: textarea
+ id: steps
+ attributes:
+ label: Steps to Reproduce the Bug
+ description: Detail the series of steps required to reproduce the bug.
+ placeholder: When I run the Docker NGINX image using [...], the image fails with an error message. If I check the terminal outputs and/or logs, I see the following error info.
+ validations:
+ required: true
+
+ - type: textarea
+ id: environment
+ attributes:
+ label: Environment Details
+ description: Please provide details about your environment.
+ value: |
+ - Version/release of Docker and method of installation (e.g. Docker Desktop / Docker Server)
+ - Version of the Docker NGINX image or specific commit: [e.g. 1.4.3/commit hash]
+ - Target deployment platform: [e.g. OpenShift/Kubernetes/Docker Compose/local cluster/etc...]
+ - Target OS: [e.g. RHEL 9/Ubuntu 24.04/etc...]
+ validations:
+ required: true
+
+ - type: textarea
+ id: context
+ attributes:
+ label: Additional Context
+ description: Add any other context about the problem here.
+ placeholder: Feel free to add any other context/information/screenshots/etc... that you think might be relevant to this issue in here.
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..3f7850f70
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,12 @@
+---
+blank_issues_enabled: false
+contact_links:
+ - name: 💬 Talk to the NGINX community!
+ url: https://community.nginx.org
+ about: A community forum for NGINX users, developers, and contributors
+ - name: 📝 Code of Conduct
+ url: https://www.contributor-covenant.org/version/2/1/code_of_conduct
+ about: NGINX follows the Contributor Covenant Code of Conduct to ensure a safe and inclusive community
+ - name: 💼 For commercial & enterprise users
+ url: https://www.f5.com/products/nginx
+ about: F5 offers a wide range of NGINX products for commercial & enterprise users
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 000000000..ee20eec9f
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,41 @@
+---
+name: ✨ Feature request
+description: Suggest an idea for this project
+labels: enhancement
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for taking the time to fill out this feature request!
+
+ Before you continue filling out this request, please take a moment to check that your feature has not been [already requested on GitHub][issue search] 🙌
+
+ **Note:** If you are seeking community support or have a question, please consider starting a new thread via [GitHub discussions][discussions] or the [NGINX Community forum][forum].
+
+ [issue search]: ../search?q=is%3Aissue&type=issues
+
+ [discussions]: ../discussions
+ [forum]: https://community.nginx.org
+
+ - type: textarea
+ id: overview
+ attributes:
+ label: Feature Overview
+ description: A clear and concise description of what the feature request is.
+ placeholder: I would like the Docker NGINX image to be able to do "X".
+ validations:
+ required: true
+
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives Considered
+ description: Detail any potential alternative solutions/workarounds you've used or considered.
+ placeholder: I have done/might be able to do "X" in the Docker NGINX image by doing "Y".
+
+ - type: textarea
+ id: context
+ attributes:
+ label: Additional Context
+ description: Add any other context about the problem here.
+ placeholder: Feel free to add any other context/information/screenshots/etc... that you think might be relevant to this feature request here.
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 000000000..0dc5899cf
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,14 @@
+### Proposed changes
+
+Describe the use case and detail of the change. If this PR addresses an issue on GitHub, make sure to include a link to that issue using one of the [supported keywords](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue) in this PR's description or commit message.
+
+### Checklist
+
+Before creating a PR, run through this checklist and mark each as complete:
+
+- [ ] I have read the [contributing guidelines](/CONTRIBUTING.md)
+- [ ] I have signed the [F5 Contributor License Agreement (CLA)](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md)
+- [ ] I have run `./update.sh` and ensured all entrypoint/Dockerfile template changes have been applied to the relevant image entrypoint scripts & Dockerfiles
+- [ ] If applicable, I have added tests that prove my fix is effective or that my feature works
+- [ ] If applicable, I have checked that any relevant tests pass after adding my changes
+- [ ] I have updated any relevant documentation ([`README.md`](/README.md) and/or [`modules/README.md`](/modules/README.md))
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 000000000..37d7a6218
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,49 @@
+name: GitHub CI
+
+on:
+ pull_request:
+ push:
+ schedule:
+ - cron: 0 10 * * Mon
+
+defaults:
+ run:
+ shell: 'bash -Eeuo pipefail -x {0}'
+
+jobs:
+
+ generate-jobs:
+ name: Generate Jobs
+ runs-on: ubuntu-latest
+ outputs:
+ strategy: ${{ steps.generate-jobs.outputs.strategy }}
+ steps:
+ - uses: actions/checkout@v3
+ - uses: docker-library/bashbrew@v0.1.12
+ - id: generate-jobs
+ name: Generate Jobs
+ run: |
+ strategy="$(GITHUB_REPOSITORY=nginx "$BASHBREW_SCRIPTS/github-actions/generate.sh")"
+ strategy="$(GITHUB_REPOSITORY=nginx "$BASHBREW_SCRIPTS/github-actions/munge-i386.sh" -c <<<"$strategy")"
+ echo "strategy=$strategy" >> "$GITHUB_OUTPUT"
+ jq . <<<"$strategy" # sanity check / debugging aid
+
+ test:
+ needs: generate-jobs
+ strategy: ${{ fromJson(needs.generate-jobs.outputs.strategy) }}
+ name: ${{ matrix.name }}
+ runs-on: ${{ matrix.os }}
+ steps:
+ - uses: actions/checkout@v3
+ - name: Prepare Environment
+ run: ${{ matrix.runs.prepare }}
+ - name: Pull Dependencies
+ run: ${{ matrix.runs.pull }}
+ - name: Build ${{ matrix.name }}
+ run: ${{ matrix.runs.build }}
+ - name: History ${{ matrix.name }}
+ run: ${{ matrix.runs.history }}
+ - name: Test ${{ matrix.name }}
+ run: ${{ matrix.runs.test }}
+ - name: '"docker images"'
+ run: ${{ matrix.runs.images }}
diff --git a/.github/workflows/f5_cla.yml b/.github/workflows/f5_cla.yml
new file mode 100644
index 000000000..43e473eab
--- /dev/null
+++ b/.github/workflows/f5_cla.yml
@@ -0,0 +1,41 @@
+---
+name: F5 CLA
+on:
+ issue_comment:
+ types: [created]
+ pull_request_target:
+ types: [opened, closed, synchronize]
+permissions: read-all
+jobs:
+ f5-cla:
+ name: F5 CLA
+ runs-on: ubuntu-24.04
+ permissions:
+ actions: write
+ pull-requests: write
+ statuses: write
+ steps:
+ - name: Run F5 Contributor License Agreement (CLA) assistant
+ if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have hereby read the F5 CLA and agree to its terms') || github.event_name == 'pull_request_target'
+ uses: contributor-assistant/github-action@ca4a40a7d1004f18d9960b404b97e5f30a505a08 # v2.6.1
+ with:
+ # Path to the CLA document.
+ path-to-document: https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md
+ # Custom CLA messages.
+ custom-notsigned-prcomment: '🎉 Thank you for your contribution! It appears you have not yet signed the [F5 Contributor License Agreement (CLA)](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md), which is required for your changes to be incorporated into an F5 Open Source Software (OSS) project. Please kindly read the [F5 CLA](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md) and reply on a new comment with the following text to agree:'
+ custom-pr-sign-comment: 'I have hereby read the F5 CLA and agree to its terms'
+ custom-allsigned-prcomment: '✅ All required contributors have signed the F5 CLA for this PR. Thank you!'
+ # Remote repository storing CLA signatures.
+ remote-organization-name: f5
+ remote-repository-name: f5-cla-data
+ # Branch where CLA signatures are stored.
+ branch: main
+ path-to-signatures: signatures/signatures.json
+ # Comma separated list of usernames for maintainers or any other individuals who should not be prompted for a CLA.
+ # NOTE: You will want to edit the usernames to suit your project needs.
+ allowlist: bot*
+ # Do not lock PRs after a merge.
+ lock-pullrequest-aftermerge: false
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ PERSONAL_ACCESS_TOKEN: ${{ secrets.F5_CLA_TOKEN }}
diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml
new file mode 100644
index 000000000..99dc98601
--- /dev/null
+++ b/.github/workflows/sync.yml
@@ -0,0 +1,44 @@
+name: Sync DockerHub with AWS ECR
+
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: 23 20 * * *
+
+defaults:
+ run:
+ shell: 'bash -Eeuo pipefail -x {0}'
+
+jobs:
+ sync-awsecr:
+ name: Sync Docker Hub to AWS ECR Public
+ runs-on: ubuntu-24.04
+ permissions:
+ id-token: write
+ contents: read
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - name: Configure AWS credentials
+ uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
+ with:
+ role-to-assume: ${{ secrets.AWS_ROLE_PUBLIC_ECR }}
+ aws-region: us-east-1
+
+ - name: Login to Amazon ECR Public
+ id: login-ecr-public
+ uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
+ with:
+ registry-type: public
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Build, tag, and push docker image to Amazon ECR Public
+ run: |
+ ./sync-awsecr.sh > sync-real.sh
+ chmod +x sync-real.sh
+ ./sync-real.sh
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000..751553b3a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+*.bak
diff --git a/.test/config.sh b/.test/config.sh
new file mode 100755
index 000000000..e371f4043
--- /dev/null
+++ b/.test/config.sh
@@ -0,0 +1,11 @@
+imageTests+=(
+ [nginx]='
+ ipv6
+ static
+ templates
+ templates-resolver
+ templates-resolver-ipv6
+ workers
+ modules
+ '
+)
diff --git a/.test/tests/ipv6/expected-std-out.txt b/.test/tests/ipv6/expected-std-out.txt
new file mode 100644
index 000000000..f16a08766
--- /dev/null
+++ b/.test/tests/ipv6/expected-std-out.txt
@@ -0,0 +1,2 @@
+
Welcome to nginx!
+10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
diff --git a/.test/tests/ipv6/run.sh b/.test/tests/ipv6/run.sh
new file mode 100755
index 000000000..0235db6b5
--- /dev/null
+++ b/.test/tests/ipv6/run.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ exit 0
+fi
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+cid="$(docker run -d "$image")"
+trap "docker rm -vf $cid > /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/index.html' | grep 'Welcome to nginx!
'
+
+docker logs $cid 2>&1 | grep "Enabled listen on IPv6"
diff --git a/mainline/alpine-perl/nginx.conf b/.test/tests/modules/nginx.conf.sme
similarity index 53%
rename from mainline/alpine-perl/nginx.conf
rename to .test/tests/modules/nginx.conf.sme
index e4bad8dbc..dab101456 100644
--- a/mainline/alpine-perl/nginx.conf
+++ b/.test/tests/modules/nginx.conf.sme
@@ -1,16 +1,17 @@
-
user nginx;
-worker_processes 1;
+worker_processes auto;
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
+load_module modules/ndk_http_module.so;
+load_module modules/ngx_http_echo_module.so;
+load_module modules/ngx_http_set_misc_module.so;
+error_log /var/log/nginx/error.log notice;
+pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
-
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
@@ -21,12 +22,13 @@ http {
access_log /var/log/nginx/access.log main;
- sendfile on;
- #tcp_nopush on;
-
- keepalive_timeout 65;
-
- #gzip on;
+ server {
+ listen 80 default_server;
+ location /hello {
+ set $raw "hello";
+ set_sha1 $digest $raw;
- include /etc/nginx/conf.d/*.conf;
+ echo $digest;
+ }
+ }
}
diff --git a/.test/tests/modules/run.sh b/.test/tests/modules/run.sh
new file mode 100755
index 000000000..257cdd551
--- /dev/null
+++ b/.test/tests/modules/run.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+echo $dir
+
+image="$1"
+
+case "$image" in
+ *-perl)
+ ;;
+ *)
+ echo >&2 "skipping non-leaf image: $image"
+ exit
+ ;;
+esac
+
+dockerfile="Dockerfile"
+case "$image" in
+ *alpine*)
+ dockerfile="$dockerfile.alpine"
+ ;;
+esac
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+modulesImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+docker build --build-arg NGINX_FROM_IMAGE="$image" --build-arg ENABLED_MODULES="ndk set-misc echo" -t "$modulesImage" -f "modules/$dockerfile" "$GITHUB_WORKSPACE/modules"
+
+serverImage="${modulesImage}-sme"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/hello' | grep 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
diff --git a/.test/tests/static/run.sh b/.test/tests/static/run.sh
new file mode 100755
index 000000000..f026bedb3
--- /dev/null
+++ b/.test/tests/static/run.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+cid="$(docker run -d "$image")"
+trap "docker rm -vf $cid > /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/index.html' | grep 'Welcome to nginx!
'
diff --git a/.test/tests/templates-resolver-ipv6/expected-std-out.txt b/.test/tests/templates-resolver-ipv6/expected-std-out.txt
new file mode 100644
index 000000000..38bfee851
--- /dev/null
+++ b/.test/tests/templates-resolver-ipv6/expected-std-out.txt
@@ -0,0 +1 @@
+example.com - OK
diff --git a/.test/tests/templates-resolver-ipv6/run.sh b/.test/tests/templates-resolver-ipv6/run.sh
new file mode 100755
index 000000000..88476d650
--- /dev/null
+++ b/.test/tests/templates-resolver-ipv6/run.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ exit 0
+fi
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create a new Docker network
+nid="$(docker network create --ipv6 --subnet fd0c:7e57::/64 nginx-test-ipv6-network)"
+
+_network_exit_handler() {
+ docker network rm -f $nid > /dev/null
+}
+
+# Create an instance of the container-under-test
+serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null
+}
+_exit_handler() { _container_exit_handler; _network_exit_handler; }
+trap "_exit_handler" EXIT
+
+ipv6cid="$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.GlobalIPv6Address}}{{end}}' $cid)"
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --network "$nid" \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to "::[$ipv6cid]:" "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/resolver-templates' | grep 'example.com - OK'
diff --git a/.test/tests/templates-resolver-ipv6/server.conf.template b/.test/tests/templates-resolver-ipv6/server.conf.template
new file mode 100644
index 000000000..70835560f
--- /dev/null
+++ b/.test/tests/templates-resolver-ipv6/server.conf.template
@@ -0,0 +1,10 @@
+resolver ${NGINX_LOCAL_RESOLVERS};
+
+server {
+ listen 80;
+ listen [::]:80;
+ server_name ${NGINX_MY_SERVER_NAME};
+ default_type text/plain;
+ location = / { return 200 'OK\n'; }
+ location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
+}
diff --git a/.test/tests/templates-resolver/run.sh b/.test/tests/templates-resolver/run.sh
new file mode 100755
index 000000000..041f7abd5
--- /dev/null
+++ b/.test/tests/templates-resolver/run.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/resolver-templates' | grep 'example.com - OK'
diff --git a/.test/tests/templates-resolver/server.conf.template b/.test/tests/templates-resolver/server.conf.template
new file mode 100644
index 000000000..04a0c0859
--- /dev/null
+++ b/.test/tests/templates-resolver/server.conf.template
@@ -0,0 +1,9 @@
+resolver ${NGINX_LOCAL_RESOLVERS};
+
+server {
+ listen 80;
+ server_name ${NGINX_MY_SERVER_NAME};
+ default_type text/plain;
+ location = / { return 200 'OK\n'; }
+ location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
+}
diff --git a/.test/tests/templates/run.sh b/.test/tests/templates/run.sh
new file mode 100755
index 000000000..c43aa1db0
--- /dev/null
+++ b/.test/tests/templates/run.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/templates' | grep 'example.com - OK'
diff --git a/.test/tests/templates/server.conf.template b/.test/tests/templates/server.conf.template
new file mode 100644
index 000000000..6b00bed6c
--- /dev/null
+++ b/.test/tests/templates/server.conf.template
@@ -0,0 +1,7 @@
+server {
+ listen 80;
+ server_name ${NGINX_MY_SERVER_NAME};
+ default_type text/plain;
+ location = / { return 200 'OK\n'; }
+ location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
+}
diff --git a/.test/tests/workers/expected-std-out.txt b/.test/tests/workers/expected-std-out.txt
new file mode 100644
index 000000000..9f1d3ac3a
--- /dev/null
+++ b/.test/tests/workers/expected-std-out.txt
@@ -0,0 +1,2 @@
+example.com - OK
+# Commented out by 30-tune-worker-processes.sh
diff --git a/.test/tests/workers/run.sh b/.test/tests/workers/run.sh
new file mode 100755
index 000000000..50def70cb
--- /dev/null
+++ b/.test/tests/workers/run.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+[ "$DEBUG" ] && set -x
+
+set -eo pipefail
+
+dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+image="$1"
+
+clientImage='buildpack-deps:buster-curl'
+# ensure the clientImage is ready and available
+if ! docker image inspect "$clientImage" &> /dev/null; then
+ docker pull "$clientImage" > /dev/null
+fi
+
+# Create an instance of the container-under-test
+serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
+"$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
+
+_request() {
+ local method="$1"
+ shift
+
+ local proto="$1"
+ shift
+
+ local url="${1#/}"
+ shift
+
+ if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
+ echo >&2 "$image stopped unexpectedly!"
+ ( set -x && docker logs "$cid" ) >&2 || true
+ false
+ fi
+
+ docker run --rm \
+ --link "$cid":nginx \
+ "$clientImage" \
+ curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
+}
+
+. "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
+
+# Check that we can request /
+_request GET http '/worker-templates' | grep 'example.com - OK'
+
+result="$(docker exec $cid grep "Commented out by" /etc/nginx/nginx.conf)"
+
+echo "$result" | cut -d\ -f 1-5
diff --git a/.test/tests/workers/server.conf.template b/.test/tests/workers/server.conf.template
new file mode 100644
index 000000000..6b00bed6c
--- /dev/null
+++ b/.test/tests/workers/server.conf.template
@@ -0,0 +1,7 @@
+server {
+ listen 80;
+ server_name ${NGINX_MY_SERVER_NAME};
+ default_type text/plain;
+ location = / { return 200 'OK\n'; }
+ location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
+}
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..e18d3706b
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,78 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our community include:
+
+- Demonstrating empathy and kindness toward other people.
+- Being respectful of differing opinions, viewpoints, and experiences.
+- Giving and gracefully accepting constructive feedback.
+- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience.
+- Focusing on what is best not just for us as individuals, but for the overall community.
+
+Examples of unacceptable behavior include:
+
+- The use of sexualized language or imagery, and sexual attention or advances of any kind.
+- Trolling, insulting or derogatory comments, and personal or political attacks.
+- Public or private harassment.
+- Publishing others' private information, such as a physical or email address, without their explicit permission.
+- Other conduct which could reasonably be considered inappropriate in a professional setting.
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.1, available at .
+
+Community Impact Guidelines were inspired by
+[Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/inclusion).
+
+For answers to common questions about this code of conduct, see the FAQ at . Translations are available at .
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 000000000..ebdcace7c
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,56 @@
+# Contributing Guidelines
+
+The following is a set of guidelines for contributing to the Docker NGINX image. We really appreciate that you are considering contributing!
+
+#### Table Of Contents
+
+- [Getting Started](#getting-started)
+- [Contributing](#contributing)
+- [Code Guidelines](#code-guidelines)
+- [Code of Conduct](/CODE_OF_CONDUCT.md)
+
+## Getting Started
+
+Follow our [how to use this image guide](https://hub.docker.com/_/nginx/) to get the Docker NGINX image up and running.
+
+## Contributing
+
+### Report a Bug
+
+To report a bug, open an issue on GitHub with the label `bug` using the available [bug report issue form](/.github/ISSUE_TEMPLATE/bug_report.yml). Please ensure the bug has not already been reported. **If the bug is a potential security vulnerability, please report it using our [security policy](/SECURITY.md).**
+
+### Suggest a Feature or Enhancement
+
+To suggest a feature or enhancement, please create an issue on GitHub with the label `enhancement` using the available [feature request issue form](/.github/ISSUE_TEMPLATE/feature_request.yml). Please ensure the feature or enhancement has not already been suggested.
+
+### Open a Pull Request (PR)
+
+- Fork the repo, create a branch, implement your changes, add any relevant tests, and submit a PR when your changes are **tested** and ready for review.
+- Fill in the [PR template](/.github/pull_request_template.md).
+
+**Note:** If you'd like to implement a new feature, please consider creating a [feature request issue](/.github/ISSUE_TEMPLATE/feature_request.yml) first to start a discussion about the feature.
+
+#### F5 Contributor License Agreement (CLA)
+
+F5 requires all contributors to agree to the terms of the F5 CLA (available [here](https://github.com/f5/f5-cla/.github/blob/main/docs/f5_cla.md)) before any of their changes can be incorporated into an F5 Open Source repository (even contributions to the F5 CLA itself!).
+
+If you have not yet agreed to the F5 CLA terms and submit a PR to this repository, a bot will prompt you to view and agree to the F5 CLA. You will have to agree to the F5 CLA terms through a comment in the PR before any of your changes can be merged. Your agreement signature will be safely stored by F5 and no longer be required in future PRs.
+
+## Code Guidelines
+
+### Git Guidelines
+
+- Keep a clean, concise and meaningful git commit history on your branch (within reason), rebasing locally and squashing before submitting a PR.
+- If possible and/or relevant, use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format when writing a commit message, so that changelogs can be automatically generated
+- Follow the guidelines of writing a good commit message as described here and summarised in the next few points:
+ - In the subject line, use the present tense ("Add feature" not "Added feature").
+ - In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to...").
+ - Limit the subject line to 72 characters or less.
+ - Reference issues and pull requests liberally after the subject line.
+ - Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`).
+
+### Docker Guidelines
+
+- Update any entrypoint scripts via the the scripts contained in the `/entrypoint` directory.
+- Update any Dockerfiles via the Dockerfile templates in the root directory (e.g. `Dockerfile-alpine.template`).
+- Run the `./update.sh` script to apply all entrypoint/Dockerfile template changes to the relevant image entrypoints & Dockerfiles.
diff --git a/Dockerfile-alpine-otel.template b/Dockerfile-alpine-otel.template
new file mode 100644
index 000000000..b870b9544
--- /dev/null
+++ b/Dockerfile-alpine-otel.template
@@ -0,0 +1,66 @@
+FROM nginx:%%NGINX_VERSION%%-alpine
+
+ENV OTEL_VERSION %%OTEL_VERSION%%
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ cmake \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ xz \
+ protobuf-dev \
+ grpc-dev \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
+ && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf %%REVISION%%.tar.gz \
+ && cd pkg-oss-%%REVISION%% \
+ && cd alpine \
+ && make %%BUILDTARGET%% \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/Dockerfile-alpine-perl.template b/Dockerfile-alpine-perl.template
new file mode 100644
index 000000000..6fc37deac
--- /dev/null
+++ b/Dockerfile-alpine-perl.template
@@ -0,0 +1,61 @@
+FROM nginx:%%NGINX_VERSION%%-alpine
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ perl-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
+ && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf %%REVISION%%.tar.gz \
+ && cd pkg-oss-%%REVISION%% \
+ && cd alpine \
+ && make %%BUILDTARGET%% \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/Dockerfile-alpine-slim.template b/Dockerfile-alpine-slim.template
new file mode 100644
index 000000000..ff076f09c
--- /dev/null
+++ b/Dockerfile-alpine-slim.template
@@ -0,0 +1,102 @@
+FROM alpine:%%ALPINE_VERSION%%
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION %%NGINX_VERSION%%
+ENV PKG_RELEASE %%PKG_RELEASE%%
+ENV DYNPKG_RELEASE %%DYNPKG_RELEASE%%
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && addgroup -g 101 -S nginx \
+ && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ set -x \
+ && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
+ && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
+ && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
+ echo "key verification succeeded!"; \
+ mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
+ else \
+ echo "key verification failed!"; \
+ exit 1; \
+ fi \
+ && apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
+ && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf %%REVISION%%.tar.gz \
+ && cd pkg-oss-%%REVISION%% \
+ && cd alpine \
+ && make %%BUILDTARGET%% \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Add `envsubst` for templating environment variables
+ && apk add --no-cache gettext-envsubst \
+# Bring in tzdata so users could set the timezones through the environment
+# variables
+ && apk add --no-cache tzdata \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/Dockerfile-alpine.template b/Dockerfile-alpine.template
new file mode 100644
index 000000000..bc77dfd61
--- /dev/null
+++ b/Dockerfile-alpine.template
@@ -0,0 +1,69 @@
+FROM nginx:%%NGINX_VERSION%%-alpine-slim
+
+ENV NJS_VERSION %%NJS_VERSION%%
+ENV NJS_RELEASE %%NJS_RELEASE%%
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ libxslt-dev \
+ gd-dev \
+ geoip-dev \
+ libedit-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
+ && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf %%REVISION%%.tar.gz \
+ && cd pkg-oss-%%REVISION%% \
+ && cd alpine \
+ && make %%BUILDTARGET%% \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Bring in curl and ca-certificates to make registering on DNS SD easier
+ && apk add --no-cache curl ca-certificates
diff --git a/Dockerfile-debian-otel.template b/Dockerfile-debian-otel.template
new file mode 100644
index 000000000..709836c17
--- /dev/null
+++ b/Dockerfile-debian-otel.template
@@ -0,0 +1,89 @@
+FROM nginx:%%NGINX_VERSION%%
+
+ENV OTEL_VERSION %%OTEL_VERSION%%
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="%%REVISION%%" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in %%BUILDTARGET%%; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make %%BUILDTARGET%% \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/Dockerfile-debian-perl.template b/Dockerfile-debian-perl.template
new file mode 100644
index 000000000..84cf99f6e
--- /dev/null
+++ b/Dockerfile-debian-perl.template
@@ -0,0 +1,87 @@
+FROM nginx:%%NGINX_VERSION%%
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="%%REVISION%%" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in %%BUILDTARGET%%; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make %%BUILDTARGET%% \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/Dockerfile-debian.template b/Dockerfile-debian.template
new file mode 100644
index 000000000..57abba1ab
--- /dev/null
+++ b/Dockerfile-debian.template
@@ -0,0 +1,135 @@
+FROM debian:%%DEBIAN_VERSION%%-slim
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION %%NGINX_VERSION%%
+ENV NJS_VERSION %%NJS_VERSION%%
+ENV NJS_RELEASE %%NJS_RELEASE%%
+ENV PKG_RELEASE %%PKG_RELEASE%%
+ENV DYNPKG_RELEASE %%DYNPKG_RELEASE%%
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && groupadd --system --gid 101 nginx \
+ && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
+ && \
+ NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ export GNUPGHOME="$(mktemp -d)"; \
+ found=''; \
+ for NGINX_GPGKEY in $NGINX_GPGKEYS; do \
+ for server in \
+ hkp://keyserver.ubuntu.com:80 \
+ pgp.mit.edu \
+ ; do \
+ echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
+ gpg1 --batch --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
+ done; \
+ test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
+ done; \
+ gpg1 --batch --export $NGINX_GPGKEYS > "$NGINX_GPGKEY_PATH" ; \
+ rm -rf "$GNUPGHOME"; \
+ apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
+ && dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages="%%PACKAGES%%
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="%%REVISION%%" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in %%BUILDTARGET%%; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make %%BUILDTARGET%% \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/LICENSE b/LICENSE
index bc1d673f0..f5af4aac9 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (C) 2011-2016 Nginx, Inc.
+Copyright (C) 2011-2023 F5, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/README.md b/README.md
index f0d253c27..d23812adf 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,41 @@
+[](https://www.repostatus.org/#active)
+[](https://github.com/nginx/docker-nginx/blob/master/SUPPORT.md)
+[](https://community.nginx.org)
+[](https://opensource.org/license/bsd-2-clause)
+[](/CODE_OF_CONDUCT.md)
+
# About this Repo
-This is the Git repo of the official Docker image for [nginx](https://registry.hub.docker.com/_/nginx/). See the
-Hub page for the full readme on how to use the Docker image and for information
-regarding contributing and issues.
+## Maintained by: [the NGINX Docker Maintainers](https://github.com/nginx/docker-nginx)
+
+This is the Git repo of the [Docker "Official Image"](https://github.com/docker-library/official-images#what-are-official-images) for [`nginx`](https://hub.docker.com/_/nginx/). See [the Docker Hub page](https://hub.docker.com/_/nginx/) for the full readme on how to use this Docker image and for information regarding contributing and issues.
+
+The [full image description on Docker Hub](https://hub.docker.com/_/nginx/) is generated/maintained over in [the docker-library/docs repository](https://github.com/docker-library/docs), specifically in [the `nginx` directory](https://github.com/docker-library/docs/tree/master/nginx).
+
+The changelog for NGINX releases is available at [nginx.org changes page](https://nginx.org/en/CHANGES).
+
+## See a change merged here that doesn't show up on Docker Hub yet?
+
+For more information about the full official images change lifecycle, see [the "An image's source changed in Git, now what?" FAQ entry](https://github.com/docker-library/faq#an-images-source-changed-in-git-now-what).
+
+For outstanding `nginx` image PRs, check [PRs with the "library/nginx" label on the official-images repository](https://github.com/docker-library/official-images/labels/library%2Fnginx). For the current "source of truth" for [`nginx`](https://hub.docker.com/_/nginx/), see [the `library/nginx` file in the official-images repository](https://github.com/docker-library/official-images/blob/master/library/nginx).
+
+## Contributing
+
+Please see the [contributing guide](/CONTRIBUTING.md) for guidelines on how to best contribute to this project.
+
+## License
+
+[BSD 2-Clause](/LICENSE)
+
+© [F5, Inc.](https://www.f5.com/) 2014-2025
+
+---
-The full readme is generated over in [docker-library/docs](https://github.com/docker-library/docs),
-specificially in [docker-library/docs/nginx](https://github.com/docker-library/docs/tree/master/nginx).
+- [](https://github.com/nginx/docker-nginx/actions?query=workflow%3A%22GitHub+CI%22+branch%3Amaster)
+| Build | Status | Badges | (per-arch) |
+|:-:|:-:|:-:|:-:|
+| [](https://doi-janky.infosiftr.net/job/multiarch/job/amd64/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v5/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v6/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v7/job/nginx/) |
+| [](https://doi-janky.infosiftr.net/job/multiarch/job/arm64v8/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/i386/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/mips64le/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/ppc64le/job/nginx/) |
+| [](https://doi-janky.infosiftr.net/job/multiarch/job/s390x/job/nginx/) | [](https://doi-janky.infosiftr.net/job/put-shared/job/light/job/nginx/) |
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 000000000..bf09fe02e
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,14 @@
+# Security Policy
+
+## Latest Versions
+
+We advise users to run or update to the most recent release of the NGINX Docker image. Older versions of the NGINX Docker image may not have all enhancements and/or bug fixes applied to them.
+
+## Reporting a Vulnerability
+
+The F5 Security Incident Response Team (F5 SIRT) offers two methods to easily report potential security vulnerabilities:
+
+- If you’re an F5 customer with an active support contract, please contact [F5 Technical Support](https://www.f5.com/support).
+- If you aren’t an F5 customer, please report any potential or current instances of security vulnerabilities in any F5 product to the F5 Security Incident Response Team at .
+
+For more information, please read the F5 SIRT vulnerability reporting guidelines available at [https://www.f5.com/support/report-a-vulnerability](https://www.f5.com/support/report-a-vulnerability).
diff --git a/SUPPORT.md b/SUPPORT.md
new file mode 100644
index 000000000..5e9434084
--- /dev/null
+++ b/SUPPORT.md
@@ -0,0 +1,37 @@
+# Support
+
+## Ask a Question
+
+We use GitHub for tracking bugs and feature requests related to this project.
+
+Don't know how something in this project works? Curious if this project can achieve your desired functionality? Please open an issue on GitHub with the label `question`. Alternatively, start a GitHub discussion!
+
+## NGINX Specific Questions and/or Issues
+
+This isn't the right place to get support for NGINX specific questions, but the following resources are available below. Thanks for your understanding!
+
+### Community Forum
+
+We have a community [forum](https://community.nginx.org/)! If you have any questions and/or issues, try checking out the [`Troubleshooting`](https://community.nginx.org/c/troubleshooting/8) and [`How do I...?`](https://community.nginx.org/c/how-do-i/9) categories. Both fellow community members and NGINXers might be able to help you! :)
+
+### Documentation
+
+For a comprehensive list of all NGINX directives, check out .
+
+For a comprehensive list of administration and deployment guides for all NGINX products, check out .
+
+### Mailing List
+
+Want to get in touch with the NGINX development team directly? Try using the relevant mailing list found at !
+
+## Contributing
+
+Please see the [contributing guide](/CONTRIBUTING.md) for guidelines on how to best contribute to this project.
+
+## Commercial Support
+
+Commercial support for this project may be available. Please get in touch with [NGINX sales](https://www.f5.com/products/get-f5/) or check your contract details for more information!
+
+## Community Support
+
+Community support is offered on a best effort basis through either GitHub issues/PRs/discussions or through any of our active communities.
diff --git a/entrypoint/10-listen-on-ipv6-by-default.sh b/entrypoint/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 000000000..61a901dee
--- /dev/null
+++ b/entrypoint/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::\]:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/entrypoint/15-local-resolvers.envsh b/entrypoint/15-local-resolvers.envsh
new file mode 100755
index 000000000..e830ddacd
--- /dev/null
+++ b/entrypoint/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/entrypoint/20-envsubst-on-templates.sh b/entrypoint/20-envsubst-on-templates.sh
new file mode 100755
index 000000000..3804165c9
--- /dev/null
+++ b/entrypoint/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/entrypoint/30-tune-worker-processes.sh b/entrypoint/30-tune-worker-processes.sh
new file mode 100755
index 000000000..defb994f3
--- /dev/null
+++ b/entrypoint/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/entrypoint/docker-entrypoint.sh b/entrypoint/docker-entrypoint.sh
new file mode 100755
index 000000000..8ea04f217
--- /dev/null
+++ b/entrypoint/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/generate-stackbrew-library.sh b/generate-stackbrew-library.sh
index 4d70a4285..13a18cfe0 100755
--- a/generate-stackbrew-library.sh
+++ b/generate-stackbrew-library.sh
@@ -3,16 +3,21 @@ set -eu
declare -A aliases
aliases=(
- [mainline]='1 1.15 latest'
- [stable]='1.14'
+ [mainline]='1 1.29 latest'
+ [stable]='1.28'
)
self="$(basename "$BASH_SOURCE")"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
-base=stretch
+base=debian
-versions=( */ )
-versions=( "${versions[@]%/}" )
+versions=( mainline stable )
+
+declare -A debian_architectures
+debian_architectures=(
+ [mainline]='amd64, arm32v5, arm32v7, arm64v8, i386, ppc64le, riscv64, s390x'
+ [stable]='amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x'
+)
# get the most recent commit which modified any of "$@"
fileCommit() {
@@ -37,10 +42,10 @@ dirCommit() {
}
cat <<-EOH
-# this file is generated via https://github.com/nginxinc/docker-nginx/blob/$(fileCommit "$self")/$self
+# this file is generated via https://github.com/nginx/docker-nginx/blob/$(fileCommit "$self")/$self
-Maintainers: NGINX Docker Maintainers (@nginxinc)
-GitRepo: https://github.com/nginxinc/docker-nginx.git
+Maintainers: NGINX Docker Maintainers (@nginx)
+GitRepo: https://github.com/nginx/docker-nginx.git
EOH
# prints "$2$1$3$1...$N"
@@ -51,10 +56,11 @@ join() {
}
for version in "${versions[@]}"; do
+ debian_otel="debian-otel"
+ alpine_otel="alpine-otel"
commit="$(dirCommit "$version/$base")"
fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
- fullVersion="${fullVersion%[.-]*}"
versionAliases=( $fullVersion )
if [ "$version" != "$fullVersion" ]; then
@@ -62,39 +68,81 @@ for version in "${versions[@]}"; do
fi
versionAliases+=( ${aliases[$version]:-} )
+ debianVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk -F"[-:]" '$1 == "FROM debian" { print $2; exit }')"
+ debianAliases=( ${versionAliases[@]/%/-$debianVersion} )
+ debianAliases=( "${debianAliases[@]//latest-/}" )
+
echo
cat <<-EOE
- Tags: $(join ', ' "${versionAliases[@]}")
- Architectures: amd64, arm32v7, arm64v8, i386, ppc64le, s390x
+ Tags: $(join ', ' "${versionAliases[@]}"), $(join ', ' "${debianAliases[@]}")
+ Architectures: ${debian_architectures[$version]}
GitCommit: $commit
Directory: $version/$base
EOE
- for variant in stretch-perl; do
+ for variant in debian-perl; do
commit="$(dirCommit "$version/$variant")"
variantAliases=( "${versionAliases[@]/%/-perl}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
+ variantAliases=( "${variantAliases[@]//latest-/}" )
+
+ echo
+ cat <<-EOE
+ Tags: $(join ', ' "${variantAliases[@]}")
+ Architectures: ${debian_architectures[$version]}
+ GitCommit: $commit
+ Directory: $version/$variant
+ EOE
+ done
+
+ for variant in $debian_otel; do
+ commit="$(dirCommit "$version/$variant")"
+
+ variantAliases=( "${versionAliases[@]/%/-otel}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
+ variantAliases=( "${variantAliases[@]//latest-/}" )
+
+ echo
+ cat <<-EOE
+ Tags: $(join ', ' "${variantAliases[@]}")
+ Architectures: amd64, arm64v8
+ GitCommit: $commit
+ Directory: $version/$variant
+ EOE
+ done
+
+
+ commit="$(dirCommit "$version/alpine-slim")"
+ alpineVersion="$(git show "$commit":"$version/alpine-slim/Dockerfile" | awk -F: '$1 == "FROM alpine" { print $2; exit }')"
+
+ for variant in alpine alpine-perl alpine-slim; do
+ commit="$(dirCommit "$version/$variant")"
+
+ variantAliases=( "${versionAliases[@]/%/-$variant}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
variantAliases=( "${variantAliases[@]//latest-/}" )
echo
cat <<-EOE
Tags: $(join ', ' "${variantAliases[@]}")
- Architectures: amd64, arm32v7, arm64v8, i386, ppc64le, s390x
+ Architectures: arm64v8, arm32v6, arm32v7, ppc64le, s390x, i386, amd64, riscv64
GitCommit: $commit
Directory: $version/$variant
EOE
done
- for variant in alpine alpine-perl; do
+ for variant in $alpine_otel; do
commit="$(dirCommit "$version/$variant")"
variantAliases=( "${versionAliases[@]/%/-$variant}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
variantAliases=( "${variantAliases[@]//latest-/}" )
echo
cat <<-EOE
Tags: $(join ', ' "${variantAliases[@]}")
- Architectures: amd64, arm32v6, arm64v8, i386, ppc64le, s390x
+ Architectures: amd64, arm64v8
GitCommit: $commit
Directory: $version/$variant
EOE
diff --git a/mainline/alpine-otel/Dockerfile b/mainline/alpine-otel/Dockerfile
new file mode 100644
index 000000000..52b98949e
--- /dev/null
+++ b/mainline/alpine-otel/Dockerfile
@@ -0,0 +1,77 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.29.2-alpine
+
+ENV OTEL_VERSION 0.1.2
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ nginx-module-otel=${NGINX_VERSION}.${OTEL_VERSION}-r${PKG_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ cmake \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ xz \
+ protobuf-dev \
+ grpc-dev \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && PKGOSSCHECKSUM=\"633b2a8b56bd48527d7e293a255fd706dfbb5a9c47605ff18e91a2a409801043ee00ecb0da5fadf9cdf1d483c5ca848e81c1861870619523e15ca9e494b6e700 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
+ && cd alpine \
+ && make module-otel \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/mainline/alpine-perl/Dockerfile b/mainline/alpine-perl/Dockerfile
index efeebbf6e..b16c406f6 100644
--- a/mainline/alpine-perl/Dockerfile
+++ b/mainline/alpine-perl/Dockerfile
@@ -1,151 +1,72 @@
-FROM alpine:3.7
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.29.2-alpine
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.15.2
-
-RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
- && CONFIG="\
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --modules-path=/usr/lib/nginx/modules \
- --conf-path=/etc/nginx/nginx.conf \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --http-client-body-temp-path=/var/cache/nginx/client_temp \
- --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
- --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
- --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
- --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
- --user=nginx \
- --group=nginx \
- --with-http_ssl_module \
- --with-http_realip_module \
- --with-http_addition_module \
- --with-http_sub_module \
- --with-http_dav_module \
- --with-http_flv_module \
- --with-http_mp4_module \
- --with-http_gunzip_module \
- --with-http_gzip_static_module \
- --with-http_random_index_module \
- --with-http_secure_link_module \
- --with-http_stub_status_module \
- --with-http_auth_request_module \
- --with-http_xslt_module=dynamic \
- --with-http_image_filter_module=dynamic \
- --with-http_geoip_module=dynamic \
- --with-http_perl_module=dynamic \
- --with-threads \
- --with-stream \
- --with-stream_ssl_module \
- --with-stream_ssl_preread_module \
- --with-stream_realip_module \
- --with-stream_geoip_module=dynamic \
- --with-http_slice_module \
- --with-mail \
- --with-mail_ssl_module \
- --with-compat \
- --with-file-aio \
- --with-http_v2_module \
- " \
- && addgroup -S nginx \
- && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \
- && apk add --no-cache --virtual .build-deps \
- gcc \
- libc-dev \
- make \
- openssl-dev \
- pcre-dev \
- zlib-dev \
- linux-headers \
- curl \
- gnupg \
- libxslt-dev \
- gd-dev \
- geoip-dev \
- perl-dev \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \
- && export GNUPGHOME="$(mktemp -d)" \
- && found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $GPG_KEYS from $server"; \
- gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \
- gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \
- && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \
- && mkdir -p /usr/src \
- && tar -zxC /usr/src -f nginx.tar.gz \
- && rm nginx.tar.gz \
- && cd /usr/src/nginx-$NGINX_VERSION \
- && ./configure $CONFIG --with-debug \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && mv objs/nginx objs/nginx-debug \
- && mv objs/ngx_http_xslt_filter_module.so objs/ngx_http_xslt_filter_module-debug.so \
- && mv objs/ngx_http_image_filter_module.so objs/ngx_http_image_filter_module-debug.so \
- && mv objs/ngx_http_geoip_module.so objs/ngx_http_geoip_module-debug.so \
- && mv objs/ngx_http_perl_module.so objs/ngx_http_perl_module-debug.so \
- && mv objs/ngx_stream_geoip_module.so objs/ngx_stream_geoip_module-debug.so \
- && ./configure $CONFIG \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && make install \
- && rm -rf /etc/nginx/html/ \
- && mkdir /etc/nginx/conf.d/ \
- && mkdir -p /usr/share/nginx/html/ \
- && install -m644 html/index.html /usr/share/nginx/html/ \
- && install -m644 html/50x.html /usr/share/nginx/html/ \
- && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \
- && install -m755 objs/ngx_http_xslt_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_xslt_filter_module-debug.so \
- && install -m755 objs/ngx_http_image_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_image_filter_module-debug.so \
- && install -m755 objs/ngx_http_geoip_module-debug.so /usr/lib/nginx/modules/ngx_http_geoip_module-debug.so \
- && install -m755 objs/ngx_http_perl_module-debug.so /usr/lib/nginx/modules/ngx_http_perl_module-debug.so \
- && install -m755 objs/ngx_stream_geoip_module-debug.so /usr/lib/nginx/modules/ngx_stream_geoip_module-debug.so \
- && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \
- && strip /usr/sbin/nginx* \
- && strip /usr/lib/nginx/modules/*.so \
- && rm -rf /usr/src/nginx-$NGINX_VERSION \
- \
- # Bring in gettext so we can get `envsubst`, then throw
- # the rest away. To do this, we need to install `gettext`
- # then move `envsubst` out of the way so `gettext` can
- # be deleted completely, then move `envsubst` back.
- && apk add --no-cache --virtual .gettext gettext \
- && mv /usr/bin/envsubst /tmp/ \
- \
- && runDeps="$( \
- scanelf --needed --nobanner /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \
- | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
- | sort -u \
- | xargs -r apk info --installed \
- | sort -u \
- )" \
- && apk add --no-cache --virtual .nginx-rundeps $runDeps \
- && apk del .build-deps \
- && apk del .gettext \
- && mv /tmp/envsubst /usr/local/bin/ \
- \
- # Bring in tzdata so users could set the timezones through the environment
- # variables
- && apk add --no-cache tzdata \
- \
- # forward request and error logs to docker log collector
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-COPY nginx.conf /etc/nginx/nginx.conf
-COPY nginx.vh.default.conf /etc/nginx/conf.d/default.conf
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-perl=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ perl-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && PKGOSSCHECKSUM=\"633b2a8b56bd48527d7e293a255fd706dfbb5a9c47605ff18e91a2a409801043ee00ecb0da5fadf9cdf1d483c5ca848e81c1861870619523e15ca9e494b6e700 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
+ && cd alpine \
+ && make module-perl \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/mainline/alpine-perl/nginx.vh.default.conf b/mainline/alpine-perl/nginx.vh.default.conf
deleted file mode 100644
index 299c622a7..000000000
--- a/mainline/alpine-perl/nginx.vh.default.conf
+++ /dev/null
@@ -1,45 +0,0 @@
-server {
- listen 80;
- server_name localhost;
-
- #charset koi8-r;
- #access_log /var/log/nginx/host.access.log main;
-
- location / {
- root /usr/share/nginx/html;
- index index.html index.htm;
- }
-
- #error_page 404 /404.html;
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- # proxy the PHP scripts to Apache listening on 127.0.0.1:80
- #
- #location ~ \.php$ {
- # proxy_pass http://127.0.0.1;
- #}
-
- # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
- #
- #location ~ \.php$ {
- # root html;
- # fastcgi_pass 127.0.0.1:9000;
- # fastcgi_index index.php;
- # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
- # include fastcgi_params;
- #}
-
- # deny access to .htaccess files, if Apache's document root
- # concurs with nginx's one
- #
- #location ~ /\.ht {
- # deny all;
- #}
-}
-
diff --git a/mainline/alpine-slim/10-listen-on-ipv6-by-default.sh b/mainline/alpine-slim/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 000000000..61a901dee
--- /dev/null
+++ b/mainline/alpine-slim/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::\]:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/mainline/alpine-slim/15-local-resolvers.envsh b/mainline/alpine-slim/15-local-resolvers.envsh
new file mode 100755
index 000000000..e830ddacd
--- /dev/null
+++ b/mainline/alpine-slim/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/mainline/alpine-slim/20-envsubst-on-templates.sh b/mainline/alpine-slim/20-envsubst-on-templates.sh
new file mode 100755
index 000000000..3804165c9
--- /dev/null
+++ b/mainline/alpine-slim/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/mainline/alpine-slim/30-tune-worker-processes.sh b/mainline/alpine-slim/30-tune-worker-processes.sh
new file mode 100755
index 000000000..defb994f3
--- /dev/null
+++ b/mainline/alpine-slim/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/mainline/alpine-slim/Dockerfile b/mainline/alpine-slim/Dockerfile
new file mode 100644
index 000000000..d09211eec
--- /dev/null
+++ b/mainline/alpine-slim/Dockerfile
@@ -0,0 +1,108 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM alpine:3.22
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION 1.29.2
+ENV PKG_RELEASE 1
+ENV DYNPKG_RELEASE 1
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && addgroup -g 101 -S nginx \
+ && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ set -x \
+ && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
+ && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
+ && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
+ echo "key verification succeeded!"; \
+ mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
+ else \
+ echo "key verification failed!"; \
+ exit 1; \
+ fi \
+ && apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && PKGOSSCHECKSUM=\"633b2a8b56bd48527d7e293a255fd706dfbb5a9c47605ff18e91a2a409801043ee00ecb0da5fadf9cdf1d483c5ca848e81c1861870619523e15ca9e494b6e700 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
+ && cd alpine \
+ && make base \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Add `envsubst` for templating environment variables
+ && apk add --no-cache gettext-envsubst \
+# Bring in tzdata so users could set the timezones through the environment
+# variables
+ && apk add --no-cache tzdata \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/mainline/alpine-slim/docker-entrypoint.sh b/mainline/alpine-slim/docker-entrypoint.sh
new file mode 100755
index 000000000..8ea04f217
--- /dev/null
+++ b/mainline/alpine-slim/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/mainline/alpine/Dockerfile b/mainline/alpine/Dockerfile
index c1b42a5ce..5d9aae8f8 100644
--- a/mainline/alpine/Dockerfile
+++ b/mainline/alpine/Dockerfile
@@ -1,146 +1,79 @@
-FROM alpine:3.7
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.29.2-alpine-slim
-LABEL maintainer="NGINX Docker Maintainers "
+ENV NJS_VERSION 0.9.3
+ENV NJS_RELEASE 1
-ENV NGINX_VERSION 1.15.2
-
-RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
- && CONFIG="\
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --modules-path=/usr/lib/nginx/modules \
- --conf-path=/etc/nginx/nginx.conf \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --http-client-body-temp-path=/var/cache/nginx/client_temp \
- --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
- --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
- --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
- --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
- --user=nginx \
- --group=nginx \
- --with-http_ssl_module \
- --with-http_realip_module \
- --with-http_addition_module \
- --with-http_sub_module \
- --with-http_dav_module \
- --with-http_flv_module \
- --with-http_mp4_module \
- --with-http_gunzip_module \
- --with-http_gzip_static_module \
- --with-http_random_index_module \
- --with-http_secure_link_module \
- --with-http_stub_status_module \
- --with-http_auth_request_module \
- --with-http_xslt_module=dynamic \
- --with-http_image_filter_module=dynamic \
- --with-http_geoip_module=dynamic \
- --with-threads \
- --with-stream \
- --with-stream_ssl_module \
- --with-stream_ssl_preread_module \
- --with-stream_realip_module \
- --with-stream_geoip_module=dynamic \
- --with-http_slice_module \
- --with-mail \
- --with-mail_ssl_module \
- --with-compat \
- --with-file-aio \
- --with-http_v2_module \
- " \
- && addgroup -S nginx \
- && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \
- && apk add --no-cache --virtual .build-deps \
- gcc \
- libc-dev \
- make \
- openssl-dev \
- pcre-dev \
- zlib-dev \
- linux-headers \
- curl \
- gnupg \
- libxslt-dev \
- gd-dev \
- geoip-dev \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \
- && export GNUPGHOME="$(mktemp -d)" \
- && found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $GPG_KEYS from $server"; \
- gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \
- gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \
- && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \
- && mkdir -p /usr/src \
- && tar -zxC /usr/src -f nginx.tar.gz \
- && rm nginx.tar.gz \
- && cd /usr/src/nginx-$NGINX_VERSION \
- && ./configure $CONFIG --with-debug \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && mv objs/nginx objs/nginx-debug \
- && mv objs/ngx_http_xslt_filter_module.so objs/ngx_http_xslt_filter_module-debug.so \
- && mv objs/ngx_http_image_filter_module.so objs/ngx_http_image_filter_module-debug.so \
- && mv objs/ngx_http_geoip_module.so objs/ngx_http_geoip_module-debug.so \
- && mv objs/ngx_stream_geoip_module.so objs/ngx_stream_geoip_module-debug.so \
- && ./configure $CONFIG \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && make install \
- && rm -rf /etc/nginx/html/ \
- && mkdir /etc/nginx/conf.d/ \
- && mkdir -p /usr/share/nginx/html/ \
- && install -m644 html/index.html /usr/share/nginx/html/ \
- && install -m644 html/50x.html /usr/share/nginx/html/ \
- && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \
- && install -m755 objs/ngx_http_xslt_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_xslt_filter_module-debug.so \
- && install -m755 objs/ngx_http_image_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_image_filter_module-debug.so \
- && install -m755 objs/ngx_http_geoip_module-debug.so /usr/lib/nginx/modules/ngx_http_geoip_module-debug.so \
- && install -m755 objs/ngx_stream_geoip_module-debug.so /usr/lib/nginx/modules/ngx_stream_geoip_module-debug.so \
- && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \
- && strip /usr/sbin/nginx* \
- && strip /usr/lib/nginx/modules/*.so \
- && rm -rf /usr/src/nginx-$NGINX_VERSION \
- \
- # Bring in gettext so we can get `envsubst`, then throw
- # the rest away. To do this, we need to install `gettext`
- # then move `envsubst` out of the way so `gettext` can
- # be deleted completely, then move `envsubst` back.
- && apk add --no-cache --virtual .gettext gettext \
- && mv /usr/bin/envsubst /tmp/ \
- \
- && runDeps="$( \
- scanelf --needed --nobanner --format '%n#p' /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \
- | tr ',' '\n' \
- | sort -u \
- | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
- )" \
- && apk add --no-cache --virtual .nginx-rundeps $runDeps \
- && apk del .build-deps \
- && apk del .gettext \
- && mv /tmp/envsubst /usr/local/bin/ \
- \
- # Bring in tzdata so users could set the timezones through the environment
- # variables
- && apk add --no-cache tzdata \
- \
- # forward request and error logs to docker log collector
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-COPY nginx.conf /etc/nginx/nginx.conf
-COPY nginx.vh.default.conf /etc/nginx/conf.d/default.conf
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ libxslt-dev \
+ gd-dev \
+ geoip-dev \
+ libedit-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && PKGOSSCHECKSUM=\"633b2a8b56bd48527d7e293a255fd706dfbb5a9c47605ff18e91a2a409801043ee00ecb0da5fadf9cdf1d483c5ca848e81c1861870619523e15ca9e494b6e700 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
+ && cd alpine \
+ && make module-geoip module-image-filter module-njs module-xslt \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Bring in curl and ca-certificates to make registering on DNS SD easier
+ && apk add --no-cache curl ca-certificates
diff --git a/mainline/alpine/nginx.conf b/mainline/alpine/nginx.conf
deleted file mode 100644
index e4bad8dbc..000000000
--- a/mainline/alpine/nginx.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-
-user nginx;
-worker_processes 1;
-
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
-
-
-events {
- worker_connections 1024;
-}
-
-
-http {
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /var/log/nginx/access.log main;
-
- sendfile on;
- #tcp_nopush on;
-
- keepalive_timeout 65;
-
- #gzip on;
-
- include /etc/nginx/conf.d/*.conf;
-}
diff --git a/mainline/alpine/nginx.vh.default.conf b/mainline/alpine/nginx.vh.default.conf
deleted file mode 100644
index 299c622a7..000000000
--- a/mainline/alpine/nginx.vh.default.conf
+++ /dev/null
@@ -1,45 +0,0 @@
-server {
- listen 80;
- server_name localhost;
-
- #charset koi8-r;
- #access_log /var/log/nginx/host.access.log main;
-
- location / {
- root /usr/share/nginx/html;
- index index.html index.htm;
- }
-
- #error_page 404 /404.html;
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- # proxy the PHP scripts to Apache listening on 127.0.0.1:80
- #
- #location ~ \.php$ {
- # proxy_pass http://127.0.0.1;
- #}
-
- # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
- #
- #location ~ \.php$ {
- # root html;
- # fastcgi_pass 127.0.0.1:9000;
- # fastcgi_index index.php;
- # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
- # include fastcgi_params;
- #}
-
- # deny access to .htaccess files, if Apache's document root
- # concurs with nginx's one
- #
- #location ~ /\.ht {
- # deny all;
- #}
-}
-
diff --git a/mainline/debian-otel/Dockerfile b/mainline/debian-otel/Dockerfile
new file mode 100644
index 000000000..075e43f54
--- /dev/null
+++ b/mainline/debian-otel/Dockerfile
@@ -0,0 +1,100 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.29.2
+
+ENV OTEL_VERSION 0.1.2
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ nginx-module-otel=${NGINX_VERSION}+${OTEL_VERSION}-${PKG_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ trixie nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="633b2a8b56bd48527d7e293a255fd706dfbb5a9c47605ff18e91a2a409801043ee00ecb0da5fadf9cdf1d483c5ca848e81c1861870619523e15ca9e494b6e700 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in module-otel; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make module-otel \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/mainline/debian-perl/Dockerfile b/mainline/debian-perl/Dockerfile
new file mode 100644
index 000000000..258db626d
--- /dev/null
+++ b/mainline/debian-perl/Dockerfile
@@ -0,0 +1,98 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.29.2
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-perl=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ trixie nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="633b2a8b56bd48527d7e293a255fd706dfbb5a9c47605ff18e91a2a409801043ee00ecb0da5fadf9cdf1d483c5ca848e81c1861870619523e15ca9e494b6e700 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in module-perl; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make module-perl \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/mainline/debian/10-listen-on-ipv6-by-default.sh b/mainline/debian/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 000000000..61a901dee
--- /dev/null
+++ b/mainline/debian/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::\]:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/mainline/debian/15-local-resolvers.envsh b/mainline/debian/15-local-resolvers.envsh
new file mode 100755
index 000000000..e830ddacd
--- /dev/null
+++ b/mainline/debian/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/mainline/debian/20-envsubst-on-templates.sh b/mainline/debian/20-envsubst-on-templates.sh
new file mode 100755
index 000000000..3804165c9
--- /dev/null
+++ b/mainline/debian/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/mainline/debian/30-tune-worker-processes.sh b/mainline/debian/30-tune-worker-processes.sh
new file mode 100755
index 000000000..defb994f3
--- /dev/null
+++ b/mainline/debian/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/mainline/debian/Dockerfile b/mainline/debian/Dockerfile
new file mode 100644
index 000000000..b2eea0ea8
--- /dev/null
+++ b/mainline/debian/Dockerfile
@@ -0,0 +1,145 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM debian:trixie-slim
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION 1.29.2
+ENV NJS_VERSION 0.9.3
+ENV NJS_RELEASE 1~trixie
+ENV PKG_RELEASE 1~trixie
+ENV DYNPKG_RELEASE 1~trixie
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && groupadd --system --gid 101 nginx \
+ && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
+ && \
+ NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ export GNUPGHOME="$(mktemp -d)"; \
+ found=''; \
+ for NGINX_GPGKEY in $NGINX_GPGKEYS; do \
+ for server in \
+ hkp://keyserver.ubuntu.com:80 \
+ pgp.mit.edu \
+ ; do \
+ echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
+ gpg1 --batch --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
+ done; \
+ test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
+ done; \
+ gpg1 --batch --export $NGINX_GPGKEYS > "$NGINX_GPGKEY_PATH" ; \
+ rm -rf "$GNUPGHOME"; \
+ apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
+ && dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ trixie nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="633b2a8b56bd48527d7e293a255fd706dfbb5a9c47605ff18e91a2a409801043ee00ecb0da5fadf9cdf1d483c5ca848e81c1861870619523e15ca9e494b6e700 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in base module-geoip module-image-filter module-njs module-xslt; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make base module-geoip module-image-filter module-njs module-xslt \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/mainline/debian/docker-entrypoint.sh b/mainline/debian/docker-entrypoint.sh
new file mode 100755
index 000000000..8ea04f217
--- /dev/null
+++ b/mainline/debian/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/mainline/stretch-perl/Dockerfile b/mainline/stretch-perl/Dockerfile
deleted file mode 100644
index 7c34189d3..000000000
--- a/mainline/stretch-perl/Dockerfile
+++ /dev/null
@@ -1,100 +0,0 @@
-FROM debian:stretch-slim
-
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.15.2-1~stretch
-ENV NJS_VERSION 1.15.2.0.2.2-1~stretch
-
-RUN set -x \
- && apt-get update \
- && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 apt-transport-https ca-certificates \
- && \
- NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
- found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
- apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
- apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
- && dpkgArch="$(dpkg --print-architecture)" \
- && nginxPackages=" \
- nginx=${NGINX_VERSION} \
- nginx-module-xslt=${NGINX_VERSION} \
- nginx-module-geoip=${NGINX_VERSION} \
- nginx-module-image-filter=${NGINX_VERSION} \
- nginx-module-perl=${NGINX_VERSION} \
- nginx-module-njs=${NJS_VERSION} \
- " \
- && case "$dpkgArch" in \
- amd64|i386) \
-# arches officialy built by upstream
- echo "deb https://nginx.org/packages/mainline/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- && apt-get update \
- ;; \
- *) \
-# we're on an architecture upstream doesn't officially build for
-# let's build binaries from the published source packages
- echo "deb-src https://nginx.org/packages/mainline/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- \
-# new directory for storing sources and .deb files
- && tempDir="$(mktemp -d)" \
- && chmod 777 "$tempDir" \
-# (777 to ensure APT's "_apt" user can access it too)
- \
-# save list of currently-installed packages so build dependencies can be cleanly removed later
- && savedAptMark="$(apt-mark showmanual)" \
- \
-# build .deb files from upstream's source packages (which are verified by apt-get)
- && apt-get update \
- && apt-get build-dep -y $nginxPackages \
- && ( \
- cd "$tempDir" \
- && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
- apt-get source --compile $nginxPackages \
- ) \
-# we don't remove APT lists here because they get re-downloaded and removed later
- \
-# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
-# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
- && apt-mark showmanual | xargs apt-mark auto > /dev/null \
- && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
- \
-# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
- && ls -lAFh "$tempDir" \
- && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
- && grep '^Package: ' "$tempDir/Packages" \
- && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
-# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
-# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
-# ...
-# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
- && apt-get -o Acquire::GzipIndexes=false update \
- ;; \
- esac \
- \
- && apt-get install --no-install-recommends --no-install-suggests -y \
- $nginxPackages \
- gettext-base \
- && apt-get remove --purge --auto-remove -y apt-transport-https ca-certificates && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
- \
-# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
- && if [ -n "$tempDir" ]; then \
- apt-get purge -y --auto-remove \
- && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
- fi
-
-# forward request and error logs to docker log collector
-RUN ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
diff --git a/mainline/stretch/Dockerfile b/mainline/stretch/Dockerfile
deleted file mode 100644
index a7b5ce892..000000000
--- a/mainline/stretch/Dockerfile
+++ /dev/null
@@ -1,99 +0,0 @@
-FROM debian:stretch-slim
-
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.15.2-1~stretch
-ENV NJS_VERSION 1.15.2.0.2.2-1~stretch
-
-RUN set -x \
- && apt-get update \
- && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 apt-transport-https ca-certificates \
- && \
- NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
- found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
- apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
- apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
- && dpkgArch="$(dpkg --print-architecture)" \
- && nginxPackages=" \
- nginx=${NGINX_VERSION} \
- nginx-module-xslt=${NGINX_VERSION} \
- nginx-module-geoip=${NGINX_VERSION} \
- nginx-module-image-filter=${NGINX_VERSION} \
- nginx-module-njs=${NJS_VERSION} \
- " \
- && case "$dpkgArch" in \
- amd64|i386) \
-# arches officialy built by upstream
- echo "deb https://nginx.org/packages/mainline/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- && apt-get update \
- ;; \
- *) \
-# we're on an architecture upstream doesn't officially build for
-# let's build binaries from the published source packages
- echo "deb-src https://nginx.org/packages/mainline/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- \
-# new directory for storing sources and .deb files
- && tempDir="$(mktemp -d)" \
- && chmod 777 "$tempDir" \
-# (777 to ensure APT's "_apt" user can access it too)
- \
-# save list of currently-installed packages so build dependencies can be cleanly removed later
- && savedAptMark="$(apt-mark showmanual)" \
- \
-# build .deb files from upstream's source packages (which are verified by apt-get)
- && apt-get update \
- && apt-get build-dep -y $nginxPackages \
- && ( \
- cd "$tempDir" \
- && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
- apt-get source --compile $nginxPackages \
- ) \
-# we don't remove APT lists here because they get re-downloaded and removed later
- \
-# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
-# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
- && apt-mark showmanual | xargs apt-mark auto > /dev/null \
- && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
- \
-# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
- && ls -lAFh "$tempDir" \
- && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
- && grep '^Package: ' "$tempDir/Packages" \
- && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
-# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
-# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
-# ...
-# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
- && apt-get -o Acquire::GzipIndexes=false update \
- ;; \
- esac \
- \
- && apt-get install --no-install-recommends --no-install-suggests -y \
- $nginxPackages \
- gettext-base \
- && apt-get remove --purge --auto-remove -y apt-transport-https ca-certificates && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
- \
-# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
- && if [ -n "$tempDir" ]; then \
- apt-get purge -y --auto-remove \
- && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
- fi
-
-# forward request and error logs to docker log collector
-RUN ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
diff --git a/modules/Dockerfile b/modules/Dockerfile
new file mode 100644
index 000000000..2e77a0405
--- /dev/null
+++ b/modules/Dockerfile
@@ -0,0 +1,79 @@
+ARG NGINX_FROM_IMAGE=nginx:mainline
+FROM ${NGINX_FROM_IMAGE} AS builder
+
+ARG ENABLED_MODULES
+
+SHELL ["/bin/bash", "-exo", "pipefail", "-c"]
+
+RUN if [ "$ENABLED_MODULES" = "" ]; then \
+ echo "No additional modules enabled, exiting"; \
+ exit 1; \
+ fi
+
+COPY ./ /modules/
+
+RUN apt-get update \
+ && apt-get install -y --no-install-suggests --no-install-recommends \
+ patch make wget git devscripts debhelper dpkg-dev \
+ quilt lsb-release build-essential libxml2-utils xsltproc \
+ equivs git g++ libparse-recdescent-perl \
+ && XSLSCRIPT_SHA512="f7194c5198daeab9b3b0c3aebf006922c7df1d345d454bd8474489ff2eb6b4bf8e2ffe442489a45d1aab80da6ecebe0097759a1e12cc26b5f0613d05b7c09ffa *stdin" \
+ && wget -O /tmp/xslscript.pl https://raw.githubusercontent.com/nginx/xslscript/9204424259c343ca08a18a78915f40f28025e093/xslscript.pl \
+ && if [ "$(cat /tmp/xslscript.pl | openssl sha512 -r)" = "$XSLSCRIPT_SHA512" ]; then \
+ echo "XSLScript checksum verification succeeded!"; \
+ chmod +x /tmp/xslscript.pl; \
+ mv /tmp/xslscript.pl /usr/local/bin/; \
+ else \
+ echo "XSLScript checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && git clone -b ${NGINX_VERSION}-${PKG_RELEASE%%~*} https://github.com/nginx/pkg-oss/ \
+ && cd pkg-oss \
+ && mkdir /tmp/packages \
+ && for module in $ENABLED_MODULES; do \
+ echo "Building $module for nginx-$NGINX_VERSION"; \
+ if [ -d /modules/$module ]; then \
+ echo "Building $module from user-supplied sources"; \
+ # check if module sources file is there and not empty
+ if [ ! -s /modules/$module/source ]; then \
+ echo "No source file for $module in modules/$module/source, exiting"; \
+ exit 1; \
+ fi; \
+ # some modules require build dependencies
+ if [ -f /modules/$module/build-deps ]; then \
+ echo "Installing $module build dependencies"; \
+ apt-get update && apt-get install -y --no-install-suggests --no-install-recommends $(cat /modules/$module/build-deps | xargs); \
+ fi; \
+ # if a module has a build dependency that is not in a distro, provide a
+ # shell script to fetch/build/install those
+ # note that shared libraries produced as a result of this script will
+ # not be copied from the builder image to the main one so build static
+ if [ -x /modules/$module/prebuild ]; then \
+ echo "Running prebuild script for $module"; \
+ /modules/$module/prebuild; \
+ fi; \
+ /pkg-oss/build_module.sh -v $NGINX_VERSION -f -y -o /tmp/packages -n $module $(cat /modules/$module/source); \
+ BUILT_MODULES="$BUILT_MODULES $(echo $module | tr '[A-Z]' '[a-z]' | tr -d '[/_\-\.\t ]')"; \
+ elif make -C /pkg-oss/debian list | grep -P "^$module\s+\d" > /dev/null; then \
+ echo "Building $module from pkg-oss sources"; \
+ cd /pkg-oss/debian; \
+ make rules-module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" debuild-module-$module/nginx-$NGINX_VERSION/debian/control; \
+ make module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
+ find ../../ -maxdepth 1 -mindepth 1 -type f -name "*.deb" -exec mv -v {} /tmp/packages/ \;; \
+ BUILT_MODULES="$BUILT_MODULES $module"; \
+ else \
+ echo "Don't know how to build $module module, exiting"; \
+ exit 1; \
+ fi; \
+ done \
+ && echo "BUILT_MODULES=\"$BUILT_MODULES\"" > /tmp/packages/modules.env
+
+FROM ${NGINX_FROM_IMAGE}
+RUN --mount=type=bind,target=/tmp/packages/,source=/tmp/packages/,from=builder \
+ apt-get update \
+ && . /tmp/packages/modules.env \
+ && for module in $BUILT_MODULES; do \
+ apt-get install --no-install-suggests --no-install-recommends -y /tmp/packages/nginx-module-${module}_${NGINX_VERSION}*.deb; \
+ done \
+ && rm -rf /var/lib/apt/lists/
diff --git a/modules/Dockerfile.alpine b/modules/Dockerfile.alpine
new file mode 100644
index 000000000..2cdb29366
--- /dev/null
+++ b/modules/Dockerfile.alpine
@@ -0,0 +1,69 @@
+ARG NGINX_FROM_IMAGE=nginx:mainline-alpine
+FROM ${NGINX_FROM_IMAGE} AS builder
+
+ARG ENABLED_MODULES
+
+SHELL ["/bin/ash", "-exo", "pipefail", "-c"]
+
+RUN if [ "$ENABLED_MODULES" = "" ]; then \
+ echo "No additional modules enabled, exiting"; \
+ exit 1; \
+ fi
+
+COPY ./ /modules/
+
+RUN apk update \
+ && apk add linux-headers openssl-dev pcre2-dev zlib-dev openssl abuild \
+ musl-dev libxslt libxml2-utils make gcc unzip git \
+ xz g++ coreutils curl \
+ # allow abuild as a root user \
+ && printf "#!/bin/sh\\nSETFATTR=true /usr/bin/abuild -F \"\$@\"\\n" > /usr/local/bin/abuild \
+ && chmod +x /usr/local/bin/abuild \
+ && git clone -b ${NGINX_VERSION}-${PKG_RELEASE} https://github.com/nginx/pkg-oss/ \
+ && cd pkg-oss \
+ && mkdir /tmp/packages \
+ && for module in $ENABLED_MODULES; do \
+ echo "Building $module for nginx-$NGINX_VERSION"; \
+ if [ -d /modules/$module ]; then \
+ echo "Building $module from user-supplied sources"; \
+ # check if module sources file is there and not empty
+ if [ ! -s /modules/$module/source ]; then \
+ echo "No source file for $module in modules/$module/source, exiting"; \
+ exit 1; \
+ fi; \
+ # some modules require build dependencies
+ if [ -f /modules/$module/build-deps ]; then \
+ echo "Installing $module build dependencies"; \
+ apk update && apk add $(cat /modules/$module/build-deps | xargs); \
+ fi; \
+ # if a module has a build dependency that is not in a distro, provide a
+ # shell script to fetch/build/install those
+ # note that shared libraries produced as a result of this script will
+ # not be copied from the builder image to the main one so build static
+ if [ -x /modules/$module/prebuild ]; then \
+ echo "Running prebuild script for $module"; \
+ /modules/$module/prebuild; \
+ fi; \
+ /pkg-oss/build_module.sh -v $NGINX_VERSION -f -y -o /tmp/packages -n $module $(cat /modules/$module/source); \
+ BUILT_MODULES="$BUILT_MODULES $(echo $module | tr '[A-Z]' '[a-z]' | tr -d '[/_\-\.\t ]')"; \
+ elif make -C /pkg-oss/alpine list | grep -E "^$module\s+\d+" > /dev/null; then \
+ echo "Building $module from pkg-oss sources"; \
+ cd /pkg-oss/alpine; \
+ make abuild-module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
+ apk add $(. ./abuild-module-$module/APKBUILD; echo $makedepends;); \
+ make module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
+ find ~/packages -type f -name "*.apk" -exec mv -v {} /tmp/packages/ \;; \
+ BUILT_MODULES="$BUILT_MODULES $module"; \
+ else \
+ echo "Don't know how to build $module module, exiting"; \
+ exit 1; \
+ fi; \
+ done \
+ && echo "BUILT_MODULES=\"$BUILT_MODULES\"" > /tmp/packages/modules.env
+
+FROM ${NGINX_FROM_IMAGE}
+RUN --mount=type=bind,target=/tmp/packages/,source=/tmp/packages/,from=builder \
+ . /tmp/packages/modules.env \
+ && for module in $BUILT_MODULES; do \
+ apk add --no-cache --allow-untrusted /tmp/packages/nginx-module-${module}-${NGINX_VERSION}*.apk; \
+ done
diff --git a/modules/README.md b/modules/README.md
new file mode 100644
index 000000000..93620e450
--- /dev/null
+++ b/modules/README.md
@@ -0,0 +1,181 @@
+# Adding third-party modules to nginx official image
+
+It's possible to extend a mainline image with third-party modules either from
+your own instructions following a simple filesystem layout/syntax using
+`build_module.sh` helper script, or falling back to package sources from
+[pkg-oss](https://github.com/nginx/pkg-oss).
+
+## Requirements
+
+To use the Dockerfiles provided here,
+[Docker BuildKit](https://docs.docker.com/build/buildkit/) is required.
+This is enabled by default as of version 23.0; for earlier versions this can be
+enabled by setting the environment variable `DOCKER_BUILDKIT` to `1`.
+
+If you can not or do not want to use BuildKit, you can use a previous version
+of these files, see for example
+https://github.com/nginx/docker-nginx/tree/4bf0763f4977fff7e9648add59e0540088f3ca9f/modules.
+
+## Usage
+
+```
+$ docker build --build-arg ENABLED_MODULES="ndk lua" -t my-nginx-with-lua .
+```
+This command will attempt to build an image called `my-nginx-with-lua` based on
+official nginx docker hub image with two modules: `ndk` and `lua`.
+By default, a Debian-based image will be used. If you wish to use Alpine
+instead, add `-f Dockerfile.alpine` to the command line. By default, mainline
+images are used as a base, but it's possible to specify a different image by
+providing `NGINX_FROM_IMAGE` build argument, e.g. `--build-arg
+NGINX_FROM_IMAGE=nginx:stable`.
+
+The build script will look for module build definition files on filesystem
+directory under the same name as the module (and resulting package) and if
+those are not found will try to look up requested modules in the pkg-oss
+repository.
+
+For well-known modules we maintain a set of build sources packages over at
+`pkg-oss`, so it's probably a good idea to rely on those instead of providing
+your own implementation.
+
+As of the time of writing this README, the following modules and their versions
+are available from `pkg-oss` repository:
+
+```
+/pkg-oss $ LC_ALL=C make -C debian list-all-modules
+auth-spnego 1.1.2-1
+brotli 1.0.0-1
+encrypted-session 0.09-1
+fips-check 0.1-1
+geoip 1.27.4-1
+geoip2 3.4-1
+headers-more 0.37-1
+image-filter 1.27.4-1
+lua 0.10.28-1
+ndk 0.3.3-1
+njs 0.8.9-1
+otel 0.1.1-1
+passenger 6.0.26-1
+perl 1.27.4-1
+rtmp 1.2.2-1
+set-misc 0.33-1
+subs-filter 0.6.4-1
+xslt 1.27.4-1
+```
+
+If you still want to provide your own instructions for a specific module,
+organize the build directory in a following way, e.g. for `echo` module:
+
+```
+docker-nginx/modules $ tree echo
+echo
+├── build-deps
+├── prebuild
+└── source
+
+0 directories, 3 files
+```
+
+The scripts expect one file to always exist for a module you wish to build
+manually: `source`. It should contain a link to a zip/tarball source code of a
+module you want to build. In `build-deps` you can specify build dependencies
+for a module as found in Debian or Alpine repositories. `prebuild` is a shell
+script (make it `chmod +x prebuild`!) that will be executed prior to building
+the module but after installing the dependencies, so it can be used to install
+additional build dependencies if they are not available from Debian or Alpine.
+Keep in mind that those dependencies wont be automatically copied to the
+resulting image and if you're building a library, build it statically.
+
+Once the build is done in the builder image, the built packages are copied over
+to resulting image and installed via apt/apk. The resulting image will be
+tagged and can be used the same way as an official docker hub image.
+
+Note that we can not provide any support for those modifications and in no way
+guarantee they will work as nice as a build without third-party modules. If
+you encounter any issues running your image with the modules enabled, please
+reproduce with a vanilla image first.
+
+## Examples
+
+### docker-compose with pre-packaged modules
+
+If desired modules are already packaged in
+[pkg-oss](https://github.com/nginx/pkg-oss/) - e.g. `debian/Makefile.module-*`
+exists for a given module, you can use this example.
+
+1. Create a directory for your project:
+
+```
+mkdir myapp
+cd myapp
+````
+
+2. Populate the build context for a custom nginx image:
+
+```
+mkdir my-nginx
+curl -o my-nginx/Dockerfile https://raw.githubusercontent.com/nginx/docker-nginx/master/modules/Dockerfile
+```
+
+3. Create a `docker-compose.yml` file:
+
+```
+cat > docker-compose.yml << __EOF__
+version: "3.3"
+services:
+ web:
+ build:
+ context: ./my-nginx/
+ args:
+ ENABLED_MODULES: ndk lua
+ image: my-nginx-with-lua:v1
+ ports:
+ - "80:8000"
+__EOF__
+```
+
+Now, running `docker-compose up --build -d` will build the image and run the application for you.
+
+### docker-compose with a non-packaged module
+
+If a needed module is not available via `pkg-oss`, you can use this example.
+
+We're going to build the image with [ngx_cache_purge](https://github.com/FRiCKLE/ngx_cache_purge) module.
+
+The steps are similar to a previous example, with a notable difference of
+providing a URL to fetch the module source code from.
+
+1. Create a directory for your project:
+
+```
+mkdir myapp-cache
+cd myapp-cache
+````
+
+2. Populate the build context for a custom nginx image:
+
+```
+mkdir my-nginx
+curl -o my-nginx/Dockerfile https://raw.githubusercontent.com/nginx/docker-nginx/master/modules/Dockerfile
+mkdir my-nginx/cachepurge
+echo "https://github.com/FRiCKLE/ngx_cache_purge/archive/2.3.tar.gz" > my-nginx/cachepurge/source
+```
+
+3. Create a `docker-compose.yml` file:
+
+```
+cat > docker-compose.yml << __EOF__
+version: "3.3"
+services:
+ web:
+ build:
+ context: ./my-nginx/
+ args:
+ ENABLED_MODULES: cachepurge
+ image: my-nginx-with-cachepurge:v1
+ ports:
+ - "80:8080"
+__EOF__
+```
+
+Now, running `docker-compose up --build -d` will build the image and run the application for you.
diff --git a/modules/echo/build-deps b/modules/echo/build-deps
new file mode 100644
index 000000000..1ccfbc2f4
--- /dev/null
+++ b/modules/echo/build-deps
@@ -0,0 +1 @@
+make gcc
diff --git a/modules/echo/prebuild b/modules/echo/prebuild
new file mode 100755
index 000000000..cd2864b05
--- /dev/null
+++ b/modules/echo/prebuild
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# if a module has a build dependency that is not in debian/alpine
+# use this script to fetch/build/install them
+#
+# note that shared libraries produced as a result of this script will
+# not be copied from the builder image to the resulting one, so you need to
+# build them statically
+
+echo "No prebuild stage required - all dependencies are satisfied already!"
+
+exit 0
diff --git a/modules/echo/source b/modules/echo/source
new file mode 100644
index 000000000..78cb376e9
--- /dev/null
+++ b/modules/echo/source
@@ -0,0 +1 @@
+https://github.com/openresty/echo-nginx-module/archive/v0.63.tar.gz
diff --git a/stable/alpine-otel/Dockerfile b/stable/alpine-otel/Dockerfile
new file mode 100644
index 000000000..7ca1a94b4
--- /dev/null
+++ b/stable/alpine-otel/Dockerfile
@@ -0,0 +1,77 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.28.0-alpine
+
+ENV OTEL_VERSION 0.1.2
+
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ nginx-module-otel=${NGINX_VERSION}.${OTEL_VERSION}-r${PKG_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ cmake \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ xz \
+ protobuf-dev \
+ grpc-dev \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
+ && cd alpine \
+ && make module-otel \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/stable/alpine-perl/Dockerfile b/stable/alpine-perl/Dockerfile
index f5a2d0148..9676d8bc5 100644
--- a/stable/alpine-perl/Dockerfile
+++ b/stable/alpine-perl/Dockerfile
@@ -1,151 +1,72 @@
-FROM alpine:3.7
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.28.0-alpine
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.14.0
-
-RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
- && CONFIG="\
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --modules-path=/usr/lib/nginx/modules \
- --conf-path=/etc/nginx/nginx.conf \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --http-client-body-temp-path=/var/cache/nginx/client_temp \
- --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
- --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
- --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
- --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
- --user=nginx \
- --group=nginx \
- --with-http_ssl_module \
- --with-http_realip_module \
- --with-http_addition_module \
- --with-http_sub_module \
- --with-http_dav_module \
- --with-http_flv_module \
- --with-http_mp4_module \
- --with-http_gunzip_module \
- --with-http_gzip_static_module \
- --with-http_random_index_module \
- --with-http_secure_link_module \
- --with-http_stub_status_module \
- --with-http_auth_request_module \
- --with-http_xslt_module=dynamic \
- --with-http_image_filter_module=dynamic \
- --with-http_geoip_module=dynamic \
- --with-http_perl_module=dynamic \
- --with-threads \
- --with-stream \
- --with-stream_ssl_module \
- --with-stream_ssl_preread_module \
- --with-stream_realip_module \
- --with-stream_geoip_module=dynamic \
- --with-http_slice_module \
- --with-mail \
- --with-mail_ssl_module \
- --with-compat \
- --with-file-aio \
- --with-http_v2_module \
- " \
- && addgroup -S nginx \
- && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \
- && apk add --no-cache --virtual .build-deps \
- gcc \
- libc-dev \
- make \
- openssl-dev \
- pcre-dev \
- zlib-dev \
- linux-headers \
- curl \
- gnupg \
- libxslt-dev \
- gd-dev \
- geoip-dev \
- perl-dev \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \
- && export GNUPGHOME="$(mktemp -d)" \
- && found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $GPG_KEYS from $server"; \
- gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \
- gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \
- && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \
- && mkdir -p /usr/src \
- && tar -zxC /usr/src -f nginx.tar.gz \
- && rm nginx.tar.gz \
- && cd /usr/src/nginx-$NGINX_VERSION \
- && ./configure $CONFIG --with-debug \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && mv objs/nginx objs/nginx-debug \
- && mv objs/ngx_http_xslt_filter_module.so objs/ngx_http_xslt_filter_module-debug.so \
- && mv objs/ngx_http_image_filter_module.so objs/ngx_http_image_filter_module-debug.so \
- && mv objs/ngx_http_geoip_module.so objs/ngx_http_geoip_module-debug.so \
- && mv objs/ngx_http_perl_module.so objs/ngx_http_perl_module-debug.so \
- && mv objs/ngx_stream_geoip_module.so objs/ngx_stream_geoip_module-debug.so \
- && ./configure $CONFIG \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && make install \
- && rm -rf /etc/nginx/html/ \
- && mkdir /etc/nginx/conf.d/ \
- && mkdir -p /usr/share/nginx/html/ \
- && install -m644 html/index.html /usr/share/nginx/html/ \
- && install -m644 html/50x.html /usr/share/nginx/html/ \
- && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \
- && install -m755 objs/ngx_http_xslt_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_xslt_filter_module-debug.so \
- && install -m755 objs/ngx_http_image_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_image_filter_module-debug.so \
- && install -m755 objs/ngx_http_geoip_module-debug.so /usr/lib/nginx/modules/ngx_http_geoip_module-debug.so \
- && install -m755 objs/ngx_http_perl_module-debug.so /usr/lib/nginx/modules/ngx_http_perl_module-debug.so \
- && install -m755 objs/ngx_stream_geoip_module-debug.so /usr/lib/nginx/modules/ngx_stream_geoip_module-debug.so \
- && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \
- && strip /usr/sbin/nginx* \
- && strip /usr/lib/nginx/modules/*.so \
- && rm -rf /usr/src/nginx-$NGINX_VERSION \
- \
- # Bring in gettext so we can get `envsubst`, then throw
- # the rest away. To do this, we need to install `gettext`
- # then move `envsubst` out of the way so `gettext` can
- # be deleted completely, then move `envsubst` back.
- && apk add --no-cache --virtual .gettext gettext \
- && mv /usr/bin/envsubst /tmp/ \
- \
- && runDeps="$( \
- scanelf --needed --nobanner /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \
- | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
- | sort -u \
- | xargs -r apk info --installed \
- | sort -u \
- )" \
- && apk add --no-cache --virtual .nginx-rundeps $runDeps \
- && apk del .build-deps \
- && apk del .gettext \
- && mv /tmp/envsubst /usr/local/bin/ \
- \
- # Bring in tzdata so users could set the timezones through the environment
- # variables
- && apk add --no-cache tzdata \
- \
- # forward request and error logs to docker log collector
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-COPY nginx.conf /etc/nginx/nginx.conf
-COPY nginx.vh.default.conf /etc/nginx/conf.d/default.conf
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-perl=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ perl-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
+ && cd alpine \
+ && make module-perl \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
diff --git a/stable/alpine-perl/nginx.conf b/stable/alpine-perl/nginx.conf
deleted file mode 100644
index e4bad8dbc..000000000
--- a/stable/alpine-perl/nginx.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-
-user nginx;
-worker_processes 1;
-
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
-
-
-events {
- worker_connections 1024;
-}
-
-
-http {
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /var/log/nginx/access.log main;
-
- sendfile on;
- #tcp_nopush on;
-
- keepalive_timeout 65;
-
- #gzip on;
-
- include /etc/nginx/conf.d/*.conf;
-}
diff --git a/stable/alpine-perl/nginx.vh.default.conf b/stable/alpine-perl/nginx.vh.default.conf
deleted file mode 100644
index 299c622a7..000000000
--- a/stable/alpine-perl/nginx.vh.default.conf
+++ /dev/null
@@ -1,45 +0,0 @@
-server {
- listen 80;
- server_name localhost;
-
- #charset koi8-r;
- #access_log /var/log/nginx/host.access.log main;
-
- location / {
- root /usr/share/nginx/html;
- index index.html index.htm;
- }
-
- #error_page 404 /404.html;
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- # proxy the PHP scripts to Apache listening on 127.0.0.1:80
- #
- #location ~ \.php$ {
- # proxy_pass http://127.0.0.1;
- #}
-
- # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
- #
- #location ~ \.php$ {
- # root html;
- # fastcgi_pass 127.0.0.1:9000;
- # fastcgi_index index.php;
- # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
- # include fastcgi_params;
- #}
-
- # deny access to .htaccess files, if Apache's document root
- # concurs with nginx's one
- #
- #location ~ /\.ht {
- # deny all;
- #}
-}
-
diff --git a/stable/alpine-slim/10-listen-on-ipv6-by-default.sh b/stable/alpine-slim/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 000000000..61a901dee
--- /dev/null
+++ b/stable/alpine-slim/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::\]:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/stable/alpine-slim/15-local-resolvers.envsh b/stable/alpine-slim/15-local-resolvers.envsh
new file mode 100755
index 000000000..e830ddacd
--- /dev/null
+++ b/stable/alpine-slim/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/stable/alpine-slim/20-envsubst-on-templates.sh b/stable/alpine-slim/20-envsubst-on-templates.sh
new file mode 100755
index 000000000..3804165c9
--- /dev/null
+++ b/stable/alpine-slim/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/stable/alpine-slim/30-tune-worker-processes.sh b/stable/alpine-slim/30-tune-worker-processes.sh
new file mode 100755
index 000000000..defb994f3
--- /dev/null
+++ b/stable/alpine-slim/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/stable/alpine-slim/Dockerfile b/stable/alpine-slim/Dockerfile
new file mode 100644
index 000000000..29bf5ba2a
--- /dev/null
+++ b/stable/alpine-slim/Dockerfile
@@ -0,0 +1,108 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM alpine:3.21
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION 1.28.0
+ENV PKG_RELEASE 1
+ENV DYNPKG_RELEASE 1
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && addgroup -g 101 -S nginx \
+ && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ set -x \
+ && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
+ && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
+ && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
+ echo "key verification succeeded!"; \
+ mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
+ else \
+ echo "key verification failed!"; \
+ exit 1; \
+ fi \
+ && apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
+ && cd alpine \
+ && make base \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Add `envsubst` for templating environment variables
+ && apk add --no-cache gettext-envsubst \
+# Bring in tzdata so users could set the timezones through the environment
+# variables
+ && apk add --no-cache tzdata \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/stable/alpine-slim/docker-entrypoint.sh b/stable/alpine-slim/docker-entrypoint.sh
new file mode 100755
index 000000000..8ea04f217
--- /dev/null
+++ b/stable/alpine-slim/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/stable/alpine/Dockerfile b/stable/alpine/Dockerfile
index 83de70601..fb0c900a9 100644
--- a/stable/alpine/Dockerfile
+++ b/stable/alpine/Dockerfile
@@ -1,146 +1,79 @@
-FROM alpine:3.7
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.28.0-alpine-slim
-LABEL maintainer="NGINX Docker Maintainers "
+ENV NJS_VERSION 0.8.10
+ENV NJS_RELEASE 1
-ENV NGINX_VERSION 1.14.0
-
-RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
- && CONFIG="\
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --modules-path=/usr/lib/nginx/modules \
- --conf-path=/etc/nginx/nginx.conf \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --http-client-body-temp-path=/var/cache/nginx/client_temp \
- --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
- --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
- --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
- --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
- --user=nginx \
- --group=nginx \
- --with-http_ssl_module \
- --with-http_realip_module \
- --with-http_addition_module \
- --with-http_sub_module \
- --with-http_dav_module \
- --with-http_flv_module \
- --with-http_mp4_module \
- --with-http_gunzip_module \
- --with-http_gzip_static_module \
- --with-http_random_index_module \
- --with-http_secure_link_module \
- --with-http_stub_status_module \
- --with-http_auth_request_module \
- --with-http_xslt_module=dynamic \
- --with-http_image_filter_module=dynamic \
- --with-http_geoip_module=dynamic \
- --with-threads \
- --with-stream \
- --with-stream_ssl_module \
- --with-stream_ssl_preread_module \
- --with-stream_realip_module \
- --with-stream_geoip_module=dynamic \
- --with-http_slice_module \
- --with-mail \
- --with-mail_ssl_module \
- --with-compat \
- --with-file-aio \
- --with-http_v2_module \
- " \
- && addgroup -S nginx \
- && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \
- && apk add --no-cache --virtual .build-deps \
- gcc \
- libc-dev \
- make \
- openssl-dev \
- pcre-dev \
- zlib-dev \
- linux-headers \
- curl \
- gnupg \
- libxslt-dev \
- gd-dev \
- geoip-dev \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \
- && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \
- && export GNUPGHOME="$(mktemp -d)" \
- && found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $GPG_KEYS from $server"; \
- gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \
- gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \
- && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \
- && mkdir -p /usr/src \
- && tar -zxC /usr/src -f nginx.tar.gz \
- && rm nginx.tar.gz \
- && cd /usr/src/nginx-$NGINX_VERSION \
- && ./configure $CONFIG --with-debug \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && mv objs/nginx objs/nginx-debug \
- && mv objs/ngx_http_xslt_filter_module.so objs/ngx_http_xslt_filter_module-debug.so \
- && mv objs/ngx_http_image_filter_module.so objs/ngx_http_image_filter_module-debug.so \
- && mv objs/ngx_http_geoip_module.so objs/ngx_http_geoip_module-debug.so \
- && mv objs/ngx_stream_geoip_module.so objs/ngx_stream_geoip_module-debug.so \
- && ./configure $CONFIG \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && make install \
- && rm -rf /etc/nginx/html/ \
- && mkdir /etc/nginx/conf.d/ \
- && mkdir -p /usr/share/nginx/html/ \
- && install -m644 html/index.html /usr/share/nginx/html/ \
- && install -m644 html/50x.html /usr/share/nginx/html/ \
- && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \
- && install -m755 objs/ngx_http_xslt_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_xslt_filter_module-debug.so \
- && install -m755 objs/ngx_http_image_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_image_filter_module-debug.so \
- && install -m755 objs/ngx_http_geoip_module-debug.so /usr/lib/nginx/modules/ngx_http_geoip_module-debug.so \
- && install -m755 objs/ngx_stream_geoip_module-debug.so /usr/lib/nginx/modules/ngx_stream_geoip_module-debug.so \
- && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \
- && strip /usr/sbin/nginx* \
- && strip /usr/lib/nginx/modules/*.so \
- && rm -rf /usr/src/nginx-$NGINX_VERSION \
- \
- # Bring in gettext so we can get `envsubst`, then throw
- # the rest away. To do this, we need to install `gettext`
- # then move `envsubst` out of the way so `gettext` can
- # be deleted completely, then move `envsubst` back.
- && apk add --no-cache --virtual .gettext gettext \
- && mv /usr/bin/envsubst /tmp/ \
- \
- && runDeps="$( \
- scanelf --needed --nobanner --format '%n#p' /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \
- | tr ',' '\n' \
- | sort -u \
- | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
- )" \
- && apk add --no-cache --virtual .nginx-rundeps $runDeps \
- && apk del .build-deps \
- && apk del .gettext \
- && mv /tmp/envsubst /usr/local/bin/ \
- \
- # Bring in tzdata so users could set the timezones through the environment
- # variables
- && apk add --no-cache tzdata \
- \
- # forward request and error logs to docker log collector
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-COPY nginx.conf /etc/nginx/nginx.conf
-COPY nginx.vh.default.conf /etc/nginx/conf.d/default.conf
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
+RUN set -x \
+ && apkArch="$(cat /etc/apk/arch)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
+ " \
+# install prerequisites for public key and pkg-oss checks
+ && apk add --no-cache --virtual .checksum-deps \
+ openssl \
+ && case "$apkArch" in \
+ x86_64|aarch64) \
+# arches officially built by upstream
+ apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+ set -x \
+ && tempDir="$(mktemp -d)" \
+ && chown nobody:nobody $tempDir \
+ && apk add --no-cache --virtual .build-deps \
+ gcc \
+ libc-dev \
+ make \
+ openssl-dev \
+ pcre2-dev \
+ zlib-dev \
+ linux-headers \
+ libxslt-dev \
+ gd-dev \
+ geoip-dev \
+ libedit-dev \
+ bash \
+ alpine-sdk \
+ findutils \
+ curl \
+ && su nobody -s /bin/sh -c " \
+ export HOME=${tempDir} \
+ && cd ${tempDir} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
+ && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
+ echo \"pkg-oss tarball checksum verification succeeded!\"; \
+ else \
+ echo \"pkg-oss tarball checksum verification failed!\"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
+ && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
+ && cd alpine \
+ && make module-geoip module-image-filter module-njs module-xslt \
+ && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
+ && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
+ " \
+ && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
+ && apk del --no-network .build-deps \
+ && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
+ ;; \
+ esac \
+# remove checksum deps
+ && apk del --no-network .checksum-deps \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
+ && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
+# Bring in curl and ca-certificates to make registering on DNS SD easier
+ && apk add --no-cache curl ca-certificates
diff --git a/stable/alpine/nginx.conf b/stable/alpine/nginx.conf
deleted file mode 100644
index e4bad8dbc..000000000
--- a/stable/alpine/nginx.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-
-user nginx;
-worker_processes 1;
-
-error_log /var/log/nginx/error.log warn;
-pid /var/run/nginx.pid;
-
-
-events {
- worker_connections 1024;
-}
-
-
-http {
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /var/log/nginx/access.log main;
-
- sendfile on;
- #tcp_nopush on;
-
- keepalive_timeout 65;
-
- #gzip on;
-
- include /etc/nginx/conf.d/*.conf;
-}
diff --git a/stable/alpine/nginx.vh.default.conf b/stable/alpine/nginx.vh.default.conf
deleted file mode 100644
index 299c622a7..000000000
--- a/stable/alpine/nginx.vh.default.conf
+++ /dev/null
@@ -1,45 +0,0 @@
-server {
- listen 80;
- server_name localhost;
-
- #charset koi8-r;
- #access_log /var/log/nginx/host.access.log main;
-
- location / {
- root /usr/share/nginx/html;
- index index.html index.htm;
- }
-
- #error_page 404 /404.html;
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-
- # proxy the PHP scripts to Apache listening on 127.0.0.1:80
- #
- #location ~ \.php$ {
- # proxy_pass http://127.0.0.1;
- #}
-
- # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
- #
- #location ~ \.php$ {
- # root html;
- # fastcgi_pass 127.0.0.1:9000;
- # fastcgi_index index.php;
- # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
- # include fastcgi_params;
- #}
-
- # deny access to .htaccess files, if Apache's document root
- # concurs with nginx's one
- #
- #location ~ /\.ht {
- # deny all;
- #}
-}
-
diff --git a/stable/debian-otel/Dockerfile b/stable/debian-otel/Dockerfile
new file mode 100644
index 000000000..e4129a437
--- /dev/null
+++ b/stable/debian-otel/Dockerfile
@@ -0,0 +1,100 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.28.0
+
+ENV OTEL_VERSION 0.1.2
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ nginx-module-otel=${NGINX_VERSION}+${OTEL_VERSION}-${PKG_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in module-otel; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make module-otel \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/stable/debian-perl/Dockerfile b/stable/debian-perl/Dockerfile
new file mode 100644
index 000000000..bda0e1b84
--- /dev/null
+++ b/stable/debian-perl/Dockerfile
@@ -0,0 +1,98 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM nginx:1.28.0
+
+RUN set -x; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-perl=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in module-perl; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make module-perl \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi
diff --git a/stable/debian/10-listen-on-ipv6-by-default.sh b/stable/debian/10-listen-on-ipv6-by-default.sh
new file mode 100755
index 000000000..61a901dee
--- /dev/null
+++ b/stable/debian/10-listen-on-ipv6-by-default.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+ME=$(basename "$0")
+DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
+
+# check if we have ipv6 available
+if [ ! -f "/proc/net/if_inet6" ]; then
+ entrypoint_log "$ME: info: ipv6 not available"
+ exit 0
+fi
+
+if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
+ exit 0
+fi
+
+# check if the file can be modified, e.g. not on a r/o filesystem
+touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
+
+# check if the file is already modified, e.g. on a container restart
+grep -q "listen \[::\]:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
+
+if [ -f "/etc/os-release" ]; then
+ . /etc/os-release
+else
+ entrypoint_log "$ME: info: can not guess the operating system"
+ exit 0
+fi
+
+entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
+
+case "$ID" in
+ "debian")
+ CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ "alpine")
+ CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
+ echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
+ entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
+ exit 0
+ }
+ ;;
+ *)
+ entrypoint_log "$ME: info: Unsupported distribution"
+ exit 0
+ ;;
+esac
+
+# enable ipv6 on default.conf listen sockets
+sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
+
+entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
+
+exit 0
diff --git a/stable/debian/15-local-resolvers.envsh b/stable/debian/15-local-resolvers.envsh
new file mode 100755
index 000000000..e830ddacd
--- /dev/null
+++ b/stable/debian/15-local-resolvers.envsh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
+
+NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
+
+NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
+
+export NGINX_LOCAL_RESOLVERS
diff --git a/stable/debian/20-envsubst-on-templates.sh b/stable/debian/20-envsubst-on-templates.sh
new file mode 100755
index 000000000..3804165c9
--- /dev/null
+++ b/stable/debian/20-envsubst-on-templates.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+set -e
+
+ME=$(basename "$0")
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+add_stream_block() {
+ local conffile="/etc/nginx/nginx.conf"
+
+ if grep -q -E "\s*stream\s*\{" "$conffile"; then
+ entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
+ else
+ # check if the file can be modified, e.g. not on a r/o filesystem
+ touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
+ entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
+ cat << END >> "$conffile"
+# added by "$ME" on "$(date)"
+stream {
+ include $stream_output_dir/*.conf;
+}
+END
+ fi
+}
+
+auto_envsubst() {
+ local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
+ local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
+ local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
+ local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
+ local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
+ local filter="${NGINX_ENVSUBST_FILTER:-}"
+
+ local template defined_envs relative_path output_path subdir
+ defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
+ [ -d "$template_dir" ] || return 0
+ if [ ! -w "$output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
+ return 0
+ fi
+ find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$output_dir/${relative_path%"$suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+
+ # Print the first file with the stream suffix, this will be false if there are none
+ if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
+ mkdir -p "$stream_output_dir"
+ if [ ! -w "$stream_output_dir" ]; then
+ entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
+ return 0
+ fi
+ add_stream_block
+ find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
+ relative_path="${template#"$template_dir/"}"
+ output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
+ subdir=$(dirname "$relative_path")
+ # create a subdirectory where the template file exists
+ mkdir -p "$stream_output_dir/$subdir"
+ entrypoint_log "$ME: Running envsubst on $template to $output_path"
+ envsubst "$defined_envs" < "$template" > "$output_path"
+ done
+ fi
+}
+
+auto_envsubst
+
+exit 0
diff --git a/stable/debian/30-tune-worker-processes.sh b/stable/debian/30-tune-worker-processes.sh
new file mode 100755
index 000000000..defb994f3
--- /dev/null
+++ b/stable/debian/30-tune-worker-processes.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# vim:sw=2:ts=2:sts=2:et
+
+set -eu
+
+LC_ALL=C
+ME=$(basename "$0")
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
+
+touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
+
+ceildiv() {
+ num=$1
+ div=$2
+ echo $(( (num + div - 1) / div ))
+}
+
+get_cpuset() {
+ cpusetroot=$1
+ cpusetfile=$2
+ ncpu=0
+ [ -f "$cpusetroot/$cpusetfile" ] || return 1
+ for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
+ case "$token" in
+ *-*)
+ count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
+ ncpu=$(( ncpu+count ))
+ ;;
+ *)
+ ncpu=$(( ncpu+1 ))
+ ;;
+ esac
+ done
+ echo "$ncpu"
+}
+
+get_quota() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
+ [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
+ cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
+ cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
+ [ "$cfs_quota" = "-1" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_quota_v2() {
+ cpuroot=$1
+ ncpu=0
+ [ -f "$cpuroot/cpu.max" ] || return 1
+ cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
+ cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
+ [ "$cfs_quota" = "max" ] && return 1
+ [ "$cfs_period" = "0" ] && return 1
+ ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
+ [ "$ncpu" -gt 0 ] || return 1
+ echo "$ncpu"
+}
+
+get_cgroup_v1_path() {
+ needle=$1
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ case "$needle" in
+ "cpuset")
+ case "$line" in
+ *cpuset*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$line" in
+ *cpuset*)
+ ;;
+ *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ break
+ ;;
+ esac
+ esac
+ done << __EOF__
+$( grep -F -- '- cgroup ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ controller=$( echo "$line" | cut -d: -f 2 )
+ case "$needle" in
+ "cpuset")
+ case "$controller" in
+ cpuset)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ "cpu")
+ case "$controller" in
+ cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+ break
+ ;;
+ esac
+ ;;
+ esac
+done << __EOF__
+$( grep -F -- 'cpu' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint")
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+get_cgroup_v2_path() {
+ found=
+ foundroot=
+ mountpoint=
+
+ [ -r "/proc/self/mountinfo" ] || return 1
+ [ -r "/proc/self/cgroup" ] || return 1
+
+ while IFS= read -r line; do
+ found=$( echo "$line" | cut -d ' ' -f 4,5 )
+ done << __EOF__
+$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
+__EOF__
+
+ while IFS= read -r line; do
+ mountpoint=$( echo "$line" | cut -d: -f 3 )
+done << __EOF__
+$( grep -F -- '0::' /proc/self/cgroup )
+__EOF__
+
+ case "${found%% *}" in
+ "")
+ return 1
+ ;;
+ "/")
+ foundroot="${found##* }$mountpoint"
+ ;;
+ "$mountpoint" | /../*)
+ foundroot="${found##* }"
+ ;;
+ esac
+ echo "$foundroot"
+}
+
+ncpu_online=$( getconf _NPROCESSORS_ONLN )
+ncpu_cpuset=
+ncpu_quota=
+ncpu_cpuset_v2=
+ncpu_quota_v2=
+
+cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
+cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
+cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
+
+ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
+ "$ncpu_online" \
+ "$ncpu_cpuset" \
+ "$ncpu_quota" \
+ "$ncpu_cpuset_v2" \
+ "$ncpu_quota_v2" \
+ | sort -n \
+ | head -n 1 )
+
+sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
diff --git a/stable/debian/Dockerfile b/stable/debian/Dockerfile
new file mode 100644
index 000000000..99f34952d
--- /dev/null
+++ b/stable/debian/Dockerfile
@@ -0,0 +1,145 @@
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+FROM debian:bookworm-slim
+
+LABEL maintainer="NGINX Docker Maintainers "
+
+ENV NGINX_VERSION 1.28.0
+ENV NJS_VERSION 0.8.10
+ENV NJS_RELEASE 1~bookworm
+ENV PKG_RELEASE 1~bookworm
+ENV DYNPKG_RELEASE 1~bookworm
+
+RUN set -x \
+# create nginx user/group first, to be consistent throughout docker variants
+ && groupadd --system --gid 101 nginx \
+ && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
+ && \
+ NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \
+ NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
+ export GNUPGHOME="$(mktemp -d)"; \
+ found=''; \
+ for NGINX_GPGKEY in $NGINX_GPGKEYS; do \
+ for server in \
+ hkp://keyserver.ubuntu.com:80 \
+ pgp.mit.edu \
+ ; do \
+ echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
+ gpg1 --batch --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
+ done; \
+ test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
+ done; \
+ gpg1 --batch --export $NGINX_GPGKEYS > "$NGINX_GPGKEY_PATH" ; \
+ rm -rf "$GNUPGHOME"; \
+ apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
+ && dpkgArch="$(dpkg --print-architecture)" \
+ && nginxPackages=" \
+ nginx=${NGINX_VERSION}-${PKG_RELEASE} \
+ nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
+ nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
+ " \
+ && case "$dpkgArch" in \
+ amd64|arm64) \
+# arches officialy built by upstream
+ echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
+ && apt-get update \
+ ;; \
+ *) \
+# we're on an architecture upstream doesn't officially build for
+# let's build binaries from the published packaging sources
+# new directory for storing sources and .deb files
+ tempDir="$(mktemp -d)" \
+ && chmod 777 "$tempDir" \
+# (777 to ensure APT's "_apt" user can access it too)
+ \
+# save list of currently-installed packages so build dependencies can be cleanly removed later
+ && savedAptMark="$(apt-mark showmanual)" \
+ \
+# build .deb files from upstream's packaging sources
+ && apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ curl \
+ devscripts \
+ equivs \
+ git \
+ libxml2-utils \
+ lsb-release \
+ xsltproc \
+ && ( \
+ cd "$tempDir" \
+ && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
+ && REVISION=${REVISION%~*} \
+ && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
+ && PKGOSSCHECKSUM="517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${REVISION}.tar.gz" \
+ && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
+ echo "pkg-oss tarball checksum verification succeeded!"; \
+ else \
+ echo "pkg-oss tarball checksum verification failed!"; \
+ exit 1; \
+ fi \
+ && tar xzvf ${REVISION}.tar.gz \
+ && cd pkg-oss-${REVISION} \
+ && cd debian \
+ && for target in base module-geoip module-image-filter module-njs module-xslt; do \
+ make rules-$target; \
+ mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
+ debuild-$target/nginx-$NGINX_VERSION/debian/control; \
+ done \
+ && make base module-geoip module-image-filter module-njs module-xslt \
+ ) \
+# we don't remove APT lists here because they get re-downloaded and removed later
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
+ && apt-mark showmanual | xargs apt-mark auto > /dev/null \
+ && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
+ \
+# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
+ && ls -lAFh "$tempDir" \
+ && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
+ && grep '^Package: ' "$tempDir/Packages" \
+ && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
+# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
+# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+# ...
+# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
+ && apt-get -o Acquire::GzipIndexes=false update \
+ ;; \
+ esac \
+ \
+ && apt-get install --no-install-recommends --no-install-suggests -y \
+ $nginxPackages \
+ gettext-base \
+ curl \
+ && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
+ \
+# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
+ && if [ -n "$tempDir" ]; then \
+ apt-get purge -y --auto-remove \
+ && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
+ fi \
+# forward request and error logs to docker log collector
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+# create a docker-entrypoint.d directory
+ && mkdir /docker-entrypoint.d
+
+COPY docker-entrypoint.sh /
+COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
+COPY 15-local-resolvers.envsh /docker-entrypoint.d
+COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
+COPY 30-tune-worker-processes.sh /docker-entrypoint.d
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+EXPOSE 80
+
+STOPSIGNAL SIGQUIT
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/stable/debian/docker-entrypoint.sh b/stable/debian/docker-entrypoint.sh
new file mode 100755
index 000000000..8ea04f217
--- /dev/null
+++ b/stable/debian/docker-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# vim:sw=4:ts=4:et
+
+set -e
+
+entrypoint_log() {
+ if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
+ echo "$@"
+ fi
+}
+
+if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
+ if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
+ entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
+
+ entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
+ find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
+ case "$f" in
+ *.envsh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Sourcing $f";
+ . "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *.sh)
+ if [ -x "$f" ]; then
+ entrypoint_log "$0: Launching $f";
+ "$f"
+ else
+ # warn on shell scripts without exec bit
+ entrypoint_log "$0: Ignoring $f, not executable";
+ fi
+ ;;
+ *) entrypoint_log "$0: Ignoring $f";;
+ esac
+ done
+
+ entrypoint_log "$0: Configuration complete; ready for start up"
+ else
+ entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
+ fi
+fi
+
+exec "$@"
diff --git a/stable/stretch-perl/Dockerfile b/stable/stretch-perl/Dockerfile
deleted file mode 100644
index 2e262ece1..000000000
--- a/stable/stretch-perl/Dockerfile
+++ /dev/null
@@ -1,100 +0,0 @@
-FROM debian:stretch-slim
-
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.14.0-1~stretch
-ENV NJS_VERSION 1.14.0.0.2.0-1~stretch
-
-RUN set -x \
- && apt-get update \
- && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 apt-transport-https ca-certificates \
- && \
- NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
- found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
- apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
- apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
- && dpkgArch="$(dpkg --print-architecture)" \
- && nginxPackages=" \
- nginx=${NGINX_VERSION} \
- nginx-module-xslt=${NGINX_VERSION} \
- nginx-module-geoip=${NGINX_VERSION} \
- nginx-module-image-filter=${NGINX_VERSION} \
- nginx-module-perl=${NGINX_VERSION} \
- nginx-module-njs=${NJS_VERSION} \
- " \
- && case "$dpkgArch" in \
- amd64|i386) \
-# arches officialy built by upstream
- echo "deb https://nginx.org/packages/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- && apt-get update \
- ;; \
- *) \
-# we're on an architecture upstream doesn't officially build for
-# let's build binaries from the published source packages
- echo "deb-src https://nginx.org/packages/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- \
-# new directory for storing sources and .deb files
- && tempDir="$(mktemp -d)" \
- && chmod 777 "$tempDir" \
-# (777 to ensure APT's "_apt" user can access it too)
- \
-# save list of currently-installed packages so build dependencies can be cleanly removed later
- && savedAptMark="$(apt-mark showmanual)" \
- \
-# build .deb files from upstream's source packages (which are verified by apt-get)
- && apt-get update \
- && apt-get build-dep -y $nginxPackages \
- && ( \
- cd "$tempDir" \
- && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
- apt-get source --compile $nginxPackages \
- ) \
-# we don't remove APT lists here because they get re-downloaded and removed later
- \
-# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
-# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
- && apt-mark showmanual | xargs apt-mark auto > /dev/null \
- && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
- \
-# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
- && ls -lAFh "$tempDir" \
- && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
- && grep '^Package: ' "$tempDir/Packages" \
- && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
-# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
-# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
-# ...
-# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
- && apt-get -o Acquire::GzipIndexes=false update \
- ;; \
- esac \
- \
- && apt-get install --no-install-recommends --no-install-suggests -y \
- $nginxPackages \
- gettext-base \
- && apt-get remove --purge --auto-remove -y apt-transport-https ca-certificates && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
- \
-# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
- && if [ -n "$tempDir" ]; then \
- apt-get purge -y --auto-remove \
- && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
- fi
-
-# forward request and error logs to docker log collector
-RUN ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
diff --git a/stable/stretch/Dockerfile b/stable/stretch/Dockerfile
deleted file mode 100644
index a436cf47d..000000000
--- a/stable/stretch/Dockerfile
+++ /dev/null
@@ -1,99 +0,0 @@
-FROM debian:stretch-slim
-
-LABEL maintainer="NGINX Docker Maintainers "
-
-ENV NGINX_VERSION 1.14.0-1~stretch
-ENV NJS_VERSION 1.14.0.0.2.0-1~stretch
-
-RUN set -x \
- && apt-get update \
- && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 apt-transport-https ca-certificates \
- && \
- NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
- found=''; \
- for server in \
- ha.pool.sks-keyservers.net \
- hkp://keyserver.ubuntu.com:80 \
- hkp://p80.pool.sks-keyservers.net:80 \
- pgp.mit.edu \
- ; do \
- echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
- apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
- done; \
- test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
- apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
- && dpkgArch="$(dpkg --print-architecture)" \
- && nginxPackages=" \
- nginx=${NGINX_VERSION} \
- nginx-module-xslt=${NGINX_VERSION} \
- nginx-module-geoip=${NGINX_VERSION} \
- nginx-module-image-filter=${NGINX_VERSION} \
- nginx-module-njs=${NJS_VERSION} \
- " \
- && case "$dpkgArch" in \
- amd64|i386) \
-# arches officialy built by upstream
- echo "deb https://nginx.org/packages/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- && apt-get update \
- ;; \
- *) \
-# we're on an architecture upstream doesn't officially build for
-# let's build binaries from the published source packages
- echo "deb-src https://nginx.org/packages/debian/ stretch nginx" >> /etc/apt/sources.list.d/nginx.list \
- \
-# new directory for storing sources and .deb files
- && tempDir="$(mktemp -d)" \
- && chmod 777 "$tempDir" \
-# (777 to ensure APT's "_apt" user can access it too)
- \
-# save list of currently-installed packages so build dependencies can be cleanly removed later
- && savedAptMark="$(apt-mark showmanual)" \
- \
-# build .deb files from upstream's source packages (which are verified by apt-get)
- && apt-get update \
- && apt-get build-dep -y $nginxPackages \
- && ( \
- cd "$tempDir" \
- && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
- apt-get source --compile $nginxPackages \
- ) \
-# we don't remove APT lists here because they get re-downloaded and removed later
- \
-# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
-# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
- && apt-mark showmanual | xargs apt-mark auto > /dev/null \
- && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
- \
-# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
- && ls -lAFh "$tempDir" \
- && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
- && grep '^Package: ' "$tempDir/Packages" \
- && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
-# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
-# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
-# ...
-# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
- && apt-get -o Acquire::GzipIndexes=false update \
- ;; \
- esac \
- \
- && apt-get install --no-install-recommends --no-install-suggests -y \
- $nginxPackages \
- gettext-base \
- && apt-get remove --purge --auto-remove -y apt-transport-https ca-certificates && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
- \
-# if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
- && if [ -n "$tempDir" ]; then \
- apt-get purge -y --auto-remove \
- && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
- fi
-
-# forward request and error logs to docker log collector
-RUN ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
-
-EXPOSE 80
-
-STOPSIGNAL SIGTERM
-
-CMD ["nginx", "-g", "daemon off;"]
diff --git a/sync-awsecr.sh b/sync-awsecr.sh
new file mode 100755
index 000000000..599a33a12
--- /dev/null
+++ b/sync-awsecr.sh
@@ -0,0 +1,171 @@
+#!/bin/bash
+set -eu
+
+image="nginx"
+registry="public.ecr.aws/z9d2n7e1"
+
+declare -A aliases
+aliases=(
+ [mainline]='1 1.29 latest'
+ [stable]='1.28'
+)
+
+architectures=( amd64 arm64v8 )
+
+self="$(basename "$BASH_SOURCE")"
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+base=debian
+
+versions=( mainline stable )
+
+pulllist=()
+declare -A taglist
+taglist=()
+
+# get the most recent commit which modified any of "$@"
+fileCommit() {
+ git log -1 --format='format:%H' HEAD -- "$@"
+}
+
+# get the most recent commit which modified "$1/Dockerfile" or any file COPY'd from "$1/Dockerfile"
+dirCommit() {
+ local dir="$1"; shift
+ (
+ cd "$dir"
+ fileCommit \
+ Dockerfile \
+ $(git show HEAD:./Dockerfile | awk '
+ toupper($1) == "COPY" {
+ for (i = 2; i < NF; i++) {
+ print $i
+ }
+ }
+ ')
+ )
+}
+
+# prints "$2$1$3$1...$N"
+join() {
+ local sep="$1"; shift
+ local out; printf -v out "${sep//%/%%}%s" "$@"
+ echo "${out#$sep}"
+}
+
+for version in "${versions[@]}"; do
+ commit="$(dirCommit "$version/$base")"
+ fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
+ pulllist+=( "$image:$fullVersion" )
+ for variant in perl alpine alpine-perl alpine-slim; do
+ pulllist+=( "$image:$fullVersion-$variant" )
+ done
+done
+
+for version in "${versions[@]}"; do
+ commit="$(dirCommit "$version/$base")"
+
+ fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
+
+ versionAliases=( $fullVersion )
+ if [ "$version" != "$fullVersion" ]; then
+ versionAliases+=( $version )
+ fi
+ versionAliases+=( ${aliases[$version]:-} )
+
+ debianVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk -F"[-:]" '$1 == "FROM debian" { print $2; exit }')"
+ debianAliases=( ${versionAliases[@]/%/-$debianVersion} )
+ debianAliases=( "${debianAliases[@]//latest-/}" )
+
+ for tag in ${versionAliases[@]:1} ${debianAliases[@]:1}; do
+ taglist["$image:$tag"]="$image:$fullVersion"
+ done
+
+ for variant in debian-perl; do
+ variantAliases=( "${versionAliases[@]/%/-perl}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
+ variantAliases=( "${variantAliases[@]//latest-/}" )
+
+ for tag in ${variantAliases[@]}; do
+ if [ "$tag" != "${fullVersion}-perl" ]; then
+ taglist["$image:$tag"]="$image:$fullVersion-perl"
+ fi
+ done
+ done
+
+ commit="$(dirCommit "$version/alpine-slim")"
+ alpineVersion="$(git show "$commit":"$version/alpine-slim/Dockerfile" | awk -F: '$1 == "FROM alpine" { print $2; exit }')"
+
+ for variant in alpine alpine-perl alpine-slim; do
+ commit="$(dirCommit "$version/$variant")"
+
+ variantAliases=( "${versionAliases[@]/%/-$variant}" )
+ variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
+ variantAliases=( "${variantAliases[@]//latest-/}" )
+
+ for tag in ${variantAliases[@]}; do
+ if [ "$tag" != "${fullVersion}-$variant" ]; then
+ taglist["$image:$tag"]="$image:${fullVersion}-$variant"
+ fi
+ done
+ done
+
+done
+
+echo "#!/bin/sh"
+echo "set -ex"
+echo
+echo "export DOCKER_CLI_EXPERIMENTAL=enabled"
+echo
+echo "# pulling stuff"
+for arch in ${architectures[@]}; do
+ case $arch in
+ arm64v8)
+ parch="aarch64"
+ ;;
+ *)
+ parch=$arch
+ ;;
+ esac
+for tag in ${pulllist[@]}; do
+ echo "docker pull --platform linux/$parch $arch/$tag";
+done
+done
+
+echo
+
+echo "# tagging stuff"
+
+for arch in ${architectures[@]}; do
+for tag in ${pulllist[@]}; do
+ echo "docker tag $arch/$tag $registry/$tag-$arch"
+done
+for tag in ${!taglist[@]}; do
+ echo "docker tag $arch/${taglist[$tag]} $registry/$tag-$arch"
+done
+done
+
+echo "# pushing stuff"
+
+for arch in ${architectures[@]}; do
+for tag in ${pulllist[@]}; do
+ echo "docker push $registry/$tag-$arch"
+done
+for tag in ${!taglist[@]}; do
+ echo "docker push $registry/$tag-$arch"
+done
+done
+
+echo
+echo "# manifesting stuff"
+for tag in ${pulllist[@]} ${!taglist[@]}; do
+ string="docker manifest create --amend $registry/$tag"
+ for arch in ${architectures[@]}; do
+ string+=" $registry/$tag-$arch"
+ done
+ echo $string
+done
+
+echo
+echo "# pushing manifests"
+for tag in ${pulllist[@]} ${!taglist[@]}; do
+ echo "docker manifest push --purge $registry/$tag"
+done
diff --git a/update.sh b/update.sh
new file mode 100755
index 000000000..ca9491891
--- /dev/null
+++ b/update.sh
@@ -0,0 +1,264 @@
+#!/usr/bin/env bash
+set -Eeuo pipefail
+shopt -s nullglob
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+declare branches=(
+ "stable"
+ "mainline"
+)
+
+# Current nginx versions
+# Remember to update pkgosschecksum when changing this.
+declare -A nginx=(
+ [mainline]='1.29.2'
+ [stable]='1.28.0'
+)
+
+# Current njs versions
+declare -A njs=(
+ [mainline]='0.9.3'
+ [stable]='0.8.10'
+)
+
+# Current njs patchlevel version
+# Remember to update pkgosschecksum when changing this.
+declare -A njspkg=(
+ [mainline]='1'
+ [stable]='1'
+)
+
+# Current otel versions
+declare -A otel=(
+ [mainline]='0.1.2'
+ [stable]='0.1.2'
+)
+
+# Current nginx package patchlevel version
+# Remember to update pkgosschecksum when changing this.
+declare -A pkg=(
+ [mainline]=1
+ [stable]=1
+)
+
+# Current built-in dynamic modules package patchlevel version
+# Remember to update pkgosschecksum when changing this
+declare -A dynpkg=(
+ [mainline]=1
+ [stable]=1
+)
+
+declare -A debian=(
+ [mainline]='trixie'
+ [stable]='bookworm'
+)
+
+declare -A alpine=(
+ [mainline]='3.22'
+ [stable]='3.21'
+)
+
+# When we bump njs version in a stable release we don't move the tag in the
+# pkg-oss repo. This setting allows us to specify a revision to check out
+# when building packages on architectures not supported by nginx.org
+# Remember to update pkgosschecksum when changing this.
+declare -A rev=(
+ [mainline]='${NGINX_VERSION}-${PKG_RELEASE}'
+ [stable]='${NGINX_VERSION}-${PKG_RELEASE}'
+)
+
+# Holds SHA512 checksum for the pkg-oss tarball produced by source code
+# revision/tag in the previous block
+# Used in builds for architectures not packaged by nginx.org
+declare -A pkgosschecksum=(
+ [mainline]='633b2a8b56bd48527d7e293a255fd706dfbb5a9c47605ff18e91a2a409801043ee00ecb0da5fadf9cdf1d483c5ca848e81c1861870619523e15ca9e494b6e700'
+ [stable]='517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4'
+)
+
+get_packages() {
+ local distro="$1"
+ shift
+ local branch="$1"
+ shift
+ local bn=""
+ local otel=
+ local perl=
+ local r=
+ local sep=
+
+ case "$distro:$branch" in
+ alpine*:*)
+ r="r"
+ sep="."
+ ;;
+ debian*:*)
+ sep="+"
+ ;;
+ esac
+
+ case "$distro" in
+ *-perl)
+ perl="nginx-module-perl"
+ ;;
+ *-otel)
+ otel="nginx-module-otel"
+ bn="\n"
+ ;;
+ esac
+
+ echo -n ' \\\n'
+ case "$distro" in
+ *-slim)
+ for p in nginx; do
+ echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${PKG_RELEASE} \\'
+ done
+ ;;
+ *)
+ for p in nginx; do
+ echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${PKG_RELEASE} \\\n'
+ done
+ for p in nginx-module-xslt nginx-module-geoip nginx-module-image-filter $perl; do
+ echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${DYNPKG_RELEASE} \\\n'
+ done
+ for p in nginx-module-njs; do
+ echo -n ' '"$p"'=${NGINX_VERSION}'"$sep"'${NJS_VERSION}-'"$r"'${NJS_RELEASE} \\'"$bn"
+ done
+ for p in $otel; do
+ echo -n ' '"$p"'=${NGINX_VERSION}'"$sep"'${OTEL_VERSION}-'"$r"'${PKG_RELEASE} \\'
+ done
+ ;;
+ esac
+}
+
+get_packagerepo() {
+ local distro="$1"
+ shift
+ distro="${distro%-perl}"
+ distro="${distro%-otel}"
+ distro="${distro%-slim}"
+ local branch="$1"
+ shift
+
+ [ "$branch" = "mainline" ] && branch="$branch/" || branch=""
+
+ echo "https://nginx.org/packages/${branch}${distro}/"
+}
+
+get_packagever() {
+ local distro="$1"
+ shift
+ distro="${distro%-perl}"
+ distro="${distro%-otel}"
+ distro="${distro%-slim}"
+ local branch="$1"
+ shift
+ local package="$1"
+ shift
+ local suffix=
+
+ [ "${distro}" = "debian" ] && suffix="~${debianver}"
+
+ case "${package}" in
+ "njs")
+ echo ${njspkg[$branch]}${suffix}
+ ;;
+ "dyn")
+ echo ${dynpkg[$branch]}${suffix}
+ ;;
+ *)
+ echo ${pkg[$branch]}${suffix}
+ ;;
+ esac
+}
+
+get_buildtarget() {
+ local distro="$1"
+ shift
+ case "$distro" in
+ alpine-slim)
+ echo base
+ ;;
+ alpine)
+ echo module-geoip module-image-filter module-njs module-xslt
+ ;;
+ debian)
+ echo base module-geoip module-image-filter module-njs module-xslt
+ ;;
+ *-perl)
+ echo module-perl
+ ;;
+ *-otel)
+ echo module-otel
+ ;;
+ esac
+}
+
+generated_warning() {
+ cat <<__EOF__
+#
+# NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
+#
+# PLEASE DO NOT EDIT IT DIRECTLY.
+#
+__EOF__
+}
+
+for branch in "${branches[@]}"; do
+ for variant in \
+ alpine{,-perl,-otel,-slim} \
+ debian{,-perl,-otel}; do
+ echo "$branch: $variant dockerfiles"
+ dir="$branch/$variant"
+ variant="$(basename "$variant")"
+
+ [ -d "$dir" ] || continue
+
+ template="Dockerfile-${variant}.template"
+ {
+ generated_warning
+ cat "$template"
+ } >"$dir/Dockerfile"
+
+ debianver="${debian[$branch]}"
+ alpinever="${alpine[$branch]}"
+ nginxver="${nginx[$branch]}"
+ njsver="${njs[${branch}]}"
+ otelver="${otel[${branch}]}"
+ revver="${rev[${branch}]}"
+ pkgosschecksumver="${pkgosschecksum[${branch}]}"
+
+ packagerepo=$(get_packagerepo "$variant" "$branch")
+ packages=$(get_packages "$variant" "$branch")
+ packagever=$(get_packagever "$variant" "$branch" "any")
+ njspkgver=$(get_packagever "$variant" "$branch" "njs")
+ dynpkgver=$(get_packagever "$variant" "$branch" "dyn")
+ buildtarget=$(get_buildtarget "$variant")
+
+ sed -i.bak \
+ -e 's,%%ALPINE_VERSION%%,'"$alpinever"',' \
+ -e 's,%%DEBIAN_VERSION%%,'"$debianver"',' \
+ -e 's,%%DYNPKG_RELEASE%%,'"$dynpkgver"',' \
+ -e 's,%%NGINX_VERSION%%,'"$nginxver"',' \
+ -e 's,%%NJS_VERSION%%,'"$njsver"',' \
+ -e 's,%%NJS_RELEASE%%,'"$njspkgver"',' \
+ -e 's,%%OTEL_VERSION%%,'"$otelver"',' \
+ -e 's,%%PKG_RELEASE%%,'"$packagever"',' \
+ -e 's,%%PACKAGES%%,'"$packages"',' \
+ -e 's,%%PACKAGEREPO%%,'"$packagerepo"',' \
+ -e 's,%%REVISION%%,'"$revver"',' \
+ -e 's,%%PKGOSSCHECKSUM%%,'"$pkgosschecksumver"',' \
+ -e 's,%%BUILDTARGET%%,'"$buildtarget"',' \
+ "$dir/Dockerfile"
+
+ done
+
+ for variant in \
+ alpine-slim \
+ debian; do \
+ echo "$branch: $variant entrypoint scripts"
+ dir="$branch/$variant"
+ cp -a entrypoint/*.sh "$dir/"
+ cp -a entrypoint/*.envsh "$dir/"
+ done
+done