forked from CatchTheTornado/text-extract-api
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun.sh
executable file
·36 lines (26 loc) · 985 Bytes
/
run.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
pip install -r app/requirements.txt
if [ ! -f .env.localhost ]; then
cp .env.localhost.example .env.localhost
fi
set -a; source .env.localhost; set +a
echo "Starting Ollama Server"
ollama serve &
echo "Pulling LLama3.1 model"
ollama pull llama3.1
echo "Pulling LLama3.2-vision model"
ollama pull llama3.2-vision
echo "Starting Redis"
docker run -p 6379:6379 --restart always --detach redis &
echo "Your ENV settings loaded from .env.localhost file: "
printenv
echo "Downloading models"
RUN python -c 'from marker.models import load_all_models; load_all_models()'
echo "Starting Celery worker"
cd app
celery -A main.celery worker --loglevel=info --pool=solo & # to scale by concurrent processing please run this line as many times as many concurrent processess you want to have running
echo "Starting FastAPI server"
if [ $APP_ENV = 'production' ]; then
uvicorn main:app --host 0.0.0.0 --port 8000;
else
uvicorn main:app --host 0.0.0.0 --port 8000 --reload;
fi