From c58d8d5707417dd07158b21394e504e82ec2f7e3 Mon Sep 17 00:00:00 2001 From: Roger Meier Date: Thu, 29 Feb 2024 15:07:52 +0100 Subject: [PATCH] feat: extend docker-compose example to multiple workers --- xinference/deploy/docker/docker-compose.yml | 30 ++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/xinference/deploy/docker/docker-compose.yml b/xinference/deploy/docker/docker-compose.yml index cd6debc4ad..5f13fd897f 100644 --- a/xinference/deploy/docker/docker-compose.yml +++ b/xinference/deploy/docker/docker-compose.yml @@ -1,8 +1,32 @@ version: '3.8' services: - xinference: - image: xprobe/xinference:latest + xinference-worker-1: + image: xprobe/xinference:nightly-main + ports: + - "30001:30001" + command: xinference-worker -e http://xinference-supervisor:9997 --worker-port 30001 + deploy: + resources: + reservations: + devices: + - capabilities: [gpu] + driver: nvidia + count: all + xinference-worker-2: + image: xprobe/xinference:nightly-main + ports: + - "30002:30002" + command: xinference-worker -e http://xinference-supervisor:9997 --worker-port 30002 + deploy: + resources: + reservations: + devices: + - capabilities: [gpu] + driver: nvidia + count: all + xinference-supervisor: + image: xprobe/xinference:nightly-main ports: - "9997:9997" # volumes: @@ -17,7 +41,7 @@ services: # environment: # # add envs here. Here's an example, if you want to download model from modelscope # - XINFERENCE_MODEL_SRC=modelscope - command: xinference-local --host 0.0.0.0 --port 9997 + command: xinference-supervisor --host 0.0.0.0 --port 9997 --log-level DEBUG deploy: resources: reservations: