diff --git a/Dockerfile b/Dockerfile index b76aaf1d1d97..87d6bcb7451d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,50 +1,34 @@ -# Base image for building -ARG LITELLM_BUILD_IMAGE=python:3.9 # Runtime image ARG LITELLM_RUNTIME_IMAGE=python:3.9-slim - # Builder stage FROM $LITELLM_BUILD_IMAGE as builder -# Set the working directory to /app -WORKDIR /app - -# Install build dependencies -RUN apt-get clean && apt-get update && \ - apt-get install -y gcc python3-dev && \ - rm -rf /var/lib/apt/lists/* - -RUN pip install --upgrade pip && \ - pip install build - -# Copy the current directory contents into the container at /app -COPY . . - -# Build the package -RUN rm -rf dist/* && python -m build - -# There should be only one wheel file now, assume the build only creates one -RUN ls -1 dist/*.whl | head -1 - -# Install the package -RUN pip install dist/*.whl - -# install dependencies as wheels -RUN pip wheel --no-cache-dir --wheel-dir=/wheels/ -r requirements.txt + @@ -35,8 +34,12 @@ RUN pip wheel --no-cache-dir --wheel-dir=/wheels/ -r requirements.txt # Runtime stage FROM $LITELLM_RUNTIME_IMAGE as runtime +ARG with_database WORKDIR /app +# Copy the current directory contents into the container at /app +COPY . . +RUN ls -la /app # Copy the built wheel from the builder stage to the runtime stage; assumes only one wheel file is present COPY --from=builder /app/dist/*.whl . -COPY --from=builder /wheels/ /wheels/ - + @@ -45,9 +48,17 @@ COPY --from=builder /wheels/ /wheels/ # Install the built wheel using pip; again using a wildcard if it's the only file RUN pip install *.whl /wheels/* --no-index --find-links=/wheels/ && rm -f *.whl && rm -rf /wheels +# Check if the with_database argument is set to 'true' +RUN echo "Value of with_database is: ${with_database}" +# If true, execute the following instructions +RUN if [ "$with_database" = "true" ]; then \ + prisma generate; \ + chmod +x /app/retry_push.sh; \ + /app/retry_push.sh; \ + fi EXPOSE 4000/tcp diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index d3862973eff1..2f2bc76c58e3 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -2255,6 +2255,28 @@ async def health_endpoint( } +@router.get("/health/readiness", tags=["health"]) +async def health_readiness(): + """ + Unprotected endpoint for checking if worker can receive requests + """ + global prisma_client + if prisma_client is not None: # if db passed in, check if it's connected + if prisma_client.db.is_connected() == True: + return {"status": "healthy"} + else: + return {"status": "healthy"} + raise HTTPException(status_code=503, detail="Service Unhealthy") + + +@router.get("/health/liveliness", tags=["health"]) +async def health_liveliness(): + """ + Unprotected endpoint for checking if worker is alive + """ + return "I'm alive!" + + @router.get("/") async def home(request: Request): return "LiteLLM: RUNNING" diff --git a/litellm/tests/test_async_fn.py b/litellm/tests/test_async_fn.py index ecc862735bc9..22c24a973832 100644 --- a/litellm/tests/test_async_fn.py +++ b/litellm/tests/test_async_fn.py @@ -130,6 +130,7 @@ async def test_get_response(): # test_async_anyscale_response() +@pytest.mark.skip(reason="Flaky test-cloudflare is very unstable") def test_async_completion_cloudflare(): try: litellm.set_verbose = True @@ -154,6 +155,7 @@ async def test(): # test_async_completion_cloudflare() +@pytest.mark.skip(reason="Flaky test") def test_get_cloudflare_response_streaming(): import asyncio diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 2ddb5fa13f0b..3497c9d01945 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -1924,6 +1924,7 @@ def test_completion_together_ai_stream(): # Cloud flare AI tests +@pytest.mark.skip(reason="Flaky test-cloudflare is very unstable") def test_completion_cloudflare(): try: litellm.set_verbose = True diff --git a/retry_push.sh b/retry_push.sh new file mode 100644 index 000000000000..5c41d72a09f3 --- /dev/null +++ b/retry_push.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +retry_count=0 +max_retries=3 +exit_code=1 + +until [ $retry_count -ge $max_retries ] || [ $exit_code -eq 0 ] +do + retry_count=$((retry_count+1)) + echo "Attempt $retry_count..." + + # Run the Prisma db push command + prisma db push --accept-data-loss + + exit_code=$? + + if [ $exit_code -ne 0 ] && [ $retry_count -lt $max_retries ]; then + echo "Retrying in 10 seconds..." + sleep 10 + fi +done + +if [ $exit_code -ne 0 ]; then + echo "Unable to push database changes after $max_retries retries." + exit 1 +fi + +echo "Database push successful!" \ No newline at end of file diff --git a/schema.prisma b/schema.prisma new file mode 100644 index 000000000000..d12cac8f20f3 --- /dev/null +++ b/schema.prisma @@ -0,0 +1,33 @@ +datasource client { + provider = "postgresql" + url = env("DATABASE_URL") +} + +generator client { + provider = "prisma-client-py" +} + +model LiteLLM_UserTable { + user_id String @unique + max_budget Float? + spend Float @default(0.0) + user_email String? +} + +// required for token gen +model LiteLLM_VerificationToken { + token String @unique + spend Float @default(0.0) + expires DateTime? + models String[] + aliases Json @default("{}") + config Json @default("{}") + user_id String? + max_parallel_requests Int? + metadata Json @default("{}") +} + +model LiteLLM_Config { + param_name String @id + param_value Json? +} \ No newline at end of file