Skip to content

Commit

Permalink
feat: docker_fast.sh (#9273)
Browse files Browse the repository at this point in the history
  • Loading branch information
ludamad authored Oct 18, 2024
1 parent da6c579 commit 57e792e
Show file tree
Hide file tree
Showing 7 changed files with 235 additions and 7 deletions.
143 changes: 143 additions & 0 deletions Dockerfile.fast
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
# Builds aztec image quickly, bootstrapping from S3 cache.
# TODO: Implement fallback to a normal build when cache is unavailable.
# Currently optimized for 'yarn-project' oriented workflow.
# If other components are iterated on, this will still work if a PR pushes to cache, or earthly-local is tweaked to push to cache and the component built.

# Use an ARG to define the architecture, defaulting to amd64
ARG ARCH=amd64

# Set the base image based on the architecture
FROM aztecprotocol/build:1.0-${ARCH}

# Set the working directory
WORKDIR /usr/src

# Initialize git repository for computing content hash
RUN git init -b master \
&& git config --global gc.auto 0 \
&& git add . \
&& git config user.name 'AztecBot' \
&& git config user.email 'tech@aztecprotocol.com'

# ---------- EXTRACT BUILD-SYSTEM ----------
COPY build-system.tar.gz .
RUN tar -xzf build-system.tar.gz \
&& rm build-system.tar.gz && git add . \
&& git commit -m "Update git metadata" >/dev/null

# ---------- BUILD BARRETENBERG ----------
COPY barretenberg.tar.gz .
RUN tar -xzf barretenberg.tar.gz \
&& rm barretenberg.tar.gz && git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for barretenberg/cpp
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd barretenberg/cpp \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
&& echo "barretenberg/cpp: Success"

# Bootstrap cache for barretenberg/ts
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd barretenberg/ts \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
&& echo "barretenberg/ts: Success"

# ---------- BUILD NOIR ----------
COPY noir.tar.gz .
RUN tar -xzf noir.tar.gz \
&& rm noir.tar.gz && git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for Noir
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd noir \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
&& echo "noir: Success"

# ---------- BUILD L1 CONTRACTS ----------
COPY l1-contracts.tar.gz .
RUN tar -xzf l1-contracts.tar.gz \
&& rm l1-contracts.tar.gz && git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for L1 Contracts
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd l1-contracts \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
&& echo "l1-contracts: Success"

# ---------- BUILD AVM TRANSPILER ----------
COPY avm-transpiler.tar.gz .
RUN tar -xzf avm-transpiler.tar.gz \
&& rm avm-transpiler.tar.gz && git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for AVM Transpiler
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd avm-transpiler \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
&& echo "avm-transpiler: Success"

# ---------- BUILD NOIR PROJECTS ----------
COPY noir-projects.tar.gz .
RUN tar -xzf noir-projects.tar.gz \
&& rm noir-projects.tar.gz && git add . \
&& git commit -m "Update git metadata" >/dev/null

# Bootstrap cache for Noir Projects
RUN --mount=type=secret,id=aws_access_key_id \
--mount=type=secret,id=aws_secret_access_key \
cd noir-projects \
&& AWS_ACCESS_KEY_ID=$(cat /run/secrets/aws_access_key_id) \
AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_access_key) \
./bootstrap_cache.sh \
&& echo "noir-projects: Success"

# ---------- BUILD YARN PROJECT ----------
COPY yarn-project.tar.gz .
RUN tar -xzf yarn-project.tar.gz \
&& rm yarn-project.tar.gz && git add . \
&& git commit -m "Update git metadata" >/dev/null

# Build yarn-project directly (no cache script)
RUN cd yarn-project \
&& ./bootstrap.sh fast-only \
&& echo "yarn-project: Success"

# ---------- SETUP ENVIRONMENT VARIABLES ----------
ENV BB_WORKING_DIRECTORY=/usr/src/bb
ENV BB_BINARY_PATH=/usr/src/barretenberg/cpp/build/bin/bb
ENV ACVM_WORKING_DIRECTORY=/usr/src/acvm
ENV ACVM_BINARY_PATH=/usr/src/noir/noir-repo/target/release/acvm
ENV PORT=8080

# Create necessary directories
RUN mkdir -p $BB_WORKING_DIRECTORY \
$ACVM_WORKING_DIRECTORY \
/usr/src/yarn-project/world-state/build

# Set the entrypoint
ENTRYPOINT ["node", "--no-warnings", "/usr/src/yarn-project/aztec/dest/bin/index.js"]

# Healthcheck configuration
HEALTHCHECK --interval=10s --timeout=10s --retries=6 --start-period=120s \
CMD curl -fsS http://127.0.0.1:$PORT/status

# Expose port 8080
EXPOSE 8080
1 change: 0 additions & 1 deletion barretenberg/ts/bootstrap_cache.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
set -eu

cd "$(dirname "$0")"
source ../../build-system/scripts/setup_env '' '' mainframe_$USER > /dev/null

CACHE_SCRIPTS=../../build-system/s3-cache-scripts

Expand Down
9 changes: 5 additions & 4 deletions bootstrap.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ RED="\033[31m"
BOLD="\033[1m"
RESET="\033[0m"

source ./build-system/scripts/setup_env '' '' '' > /dev/null
# setup env
export PATH="$PATH:$(git rev-parse --show-toplevel)/build-system/scripts"

function encourage_dev_container {
echo -e "${BOLD}${RED}ERROR: Toolchain incompatability. We encourage use of our dev container. See build-images/README.md.${RESET}"
Expand Down Expand Up @@ -159,12 +160,12 @@ PROJECTS=(
)

# Build projects locally
for P in "${PROJECTS[@]}"; do
for project in "${PROJECTS[@]}"; do
echo "**************************************"
echo -e "\033[1mBootstrapping $P...\033[0m"
echo -e "\033[1mBootstrapping $project...\033[0m"
echo "**************************************"
echo
(cd $P && ./bootstrap.sh)
(cd $project && ./bootstrap.sh)
echo
echo
done
2 changes: 1 addition & 1 deletion build-system/s3-cache-scripts/cache-download.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ function on_exit() {
trap on_exit EXIT

# Attempt to download the cache file
aws ${S3_BUILD_CACHE_AWS_PARAMS:-} s3 cp "s3://aztec-ci-artifacts/build-cache/$TAR_FILE" "$TAR_FILE" --quiet --no-progress
aws ${S3_BUILD_CACHE_AWS_PARAMS:-} s3 cp "s3://aztec-ci-artifacts/build-cache/$TAR_FILE" "$TAR_FILE" --quiet --no-progress || (echo "Cache download of $TAR_FILE failed." && exit 1)

# Extract the cache file
mkdir -p "$OUT_DIR"
Expand Down
79 changes: 79 additions & 0 deletions docker_fast.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
#!/usr/bin/env bash
# TODO eventually rename this docker.sh when we've moved to it entirely
set -eux

function start_minio() {
if nc -z 127.0.0.1 12000 2>/dev/null >/dev/null ; then
# Already started
return
fi
docker run -d -p 12000:9000 -p 12001:12001 -v minio-data:/data \
quay.io/minio/minio server /data --console-address ":12001"
# Make our cache bucket
AWS_ACCESS_KEY_ID="minioadmin" AWS_SECRET_ACCESS_KEY="minioadmin" aws --endpoint-url http://localhost:12000 s3 mb s3://aztec-ci-artifacts 2>/dev/null || true
}

S3_BUILD_CACHE_UPLOAD=${S3_BUILD_CACHE_UPLOAD:-false}
S3_BUILD_CACHE_MINIO_URL="http://$(hostname -I | awk '{print $1}'):12000"

# Start local file server for a quicker cache layer
start_minio

if ! git diff-index --quiet HEAD --; then
echo "Warning: You have unstaged changes. Disabling S3 caching and local MinIO caching to avoid polluting cache (which uses Git data)." >&2
S3_BUILD_CACHE_UPLOAD=false
S3_BUILD_CACHE_DOWNLOAD=false
S3_BUILD_CACHE_MINIO_URL=""
echo "Fatal: For now, this is a fatal error as it would defeat the purpose of 'fast'." >&2
exit 1
elif [ ! -z "${AWS_ACCESS_KEY_ID:-}" ] ; then
S3_BUILD_CACHE_DOWNLOAD=true
elif [ -f ~/.aws/credentials ]; then
# Retrieve credentials if available in AWS config
AWS_ACCESS_KEY_ID=$(aws configure get default.aws_access_key_id)
AWS_SECRET_ACCESS_KEY=$(aws configure get default.aws_secret_access_key)
S3_BUILD_CACHE_DOWNLOAD=true
else
S3_BUILD_CACHE_UPLOAD=false
S3_BUILD_CACHE_DOWNLOAD=false
fi

TMP=$(mktemp -d)

function on_exit() {
rm -rf "$TMP"
}
trap on_exit EXIT

# Save each secret environment variable into a separate file in $TMP directory
echo "${AWS_ACCESS_KEY_ID:-}" > "$TMP/aws_access_key_id.txt"
echo "${AWS_SECRET_ACCESS_KEY:-}" > "$TMP/aws_secret_access_key.txt"
echo "${S3_BUILD_CACHE_MINIO_URL:-}" > "$TMP/s3_build_cache_minio_url.txt"
echo "${S3_BUILD_CACHE_UPLOAD:-}" > "$TMP/s3_build_cache_upload.txt"
echo "${S3_BUILD_CACHE_DOWNLOAD:-}" > "$TMP/s3_build_cache_download.txt"

cd $(git rev-parse --show-toplevel)

PROJECTS=(
barretenberg
build-system
noir
l1-contracts
avm-transpiler
noir-projects
yarn-project
)

for project in "${PROJECTS[@]}"; do
# Archive Git-tracked files per project into a tar.gz file
git archive --format=tar.gz -o "$TMP/$project.tar.gz" HEAD $project
done

# Run Docker build with secrets in the folder with our archive
DOCKER_BUILDKIT=1 docker build -t aztecprotocol/aztec -f Dockerfile.fast --progress=plain \
--secret id=aws_access_key_id,src=$TMP/aws_access_key_id.txt \
--secret id=aws_secret_access_key,src=$TMP/aws_secret_access_key.txt \
--secret id=s3_build_cache_minio_url,src=$TMP/s3_build_cache_minio_url.txt \
--secret id=s3_build_cache_upload,src=$TMP/s3_build_cache_upload.txt \
--secret id=s3_build_cache_download,src=$TMP/s3_build_cache_download.txt \
"$TMP"
6 changes: 6 additions & 0 deletions yarn-project/bootstrap.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,12 @@ elif [ "$CMD" = "full" ]; then
yarn install --immutable
yarn build
exit 0
elif [ "$CMD" = "fast-only" ]; then
# Unlike fast build below, we don't fall back to a normal build.
# This is used when we want to ensure that fast build works.
yarn install --immutable
yarn build:fast
exit 0
elif [[ -n "$CMD" && "$CMD" != "fast" ]]; then
echo "Unknown command: $CMD"
exit 1
Expand Down
2 changes: 1 addition & 1 deletion yarn-project/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"format": "yarn prettier --cache -w .",
"test": "FORCE_COLOR=true yarn workspaces foreach --exclude @aztec/aztec3-packages --exclude @aztec/end-to-end --exclude @aztec/prover-client -p -v run test && yarn workspaces foreach --include @aztec/end-to-end -p -v run test:unit",
"build": "FORCE_COLOR=true yarn workspaces foreach --parallel --topological-dev --verbose --exclude @aztec/aztec3-packages --exclude @aztec/docs run build",
"build:fast": "yarn generate && tsc -b",
"build:fast": "cd foundation && yarn build && cd ../circuits.js && yarn build && cd .. && yarn generate && tsc -b",
"build:dev": "./watch.sh",
"generate": "FORCE_COLOR=true yarn workspaces foreach --parallel --topological-dev --verbose run generate",
"clean": "yarn workspaces foreach -p -v run clean"
Expand Down

0 comments on commit 57e792e

Please sign in to comment.