-
Notifications
You must be signed in to change notification settings - Fork 0
/
Dockerfile
123 lines (112 loc) · 3.18 KB
/
Dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu22.04
ARG DEBIAN_FRONTEND=noninteractive
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
# install build tools & python
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
curl \
libjpeg-dev \
libpng-dev \
build-essential \
cmake \
libprotobuf-dev \
ffmpeg \
g++ \
git \
graphicsmagick \
libatlas-base-dev \
libavcodec-dev \
libavformat-dev \
libboost-all-dev \
libfreetype6-dev \
libgraphicsmagick1-dev \
libgtk2.0-dev \
libjpeg-dev \
liblapack-dev \
libpng-dev \
libsm6 \
libswscale-dev \
libxext6 \
libzmq3-dev \
libxrender-dev \
openslide-tools \
pkg-config \
protobuf-compiler \
python3 \
python3-dev \
python3-pip \
python3-tk \
python3-lxml \
python3-setuptools \
python3-wheel \
rsync \
software-properties-common \
unzip \
wget \
x11-xserver-utils \
vim \
zip
# install cleanup
RUN apt-get -qq update && apt-get -qq clean \
&& rm -rf /var/lib/apt/lists/*
# install python dependencies
# RUN pip3 install --no-cache-dir \
RUN pip3 install --upgrade pip && pip3 install --no-cache-dir \
argparse \
cython \
requests \
protobuf \
openpyxl \
scikit-learn \
ipykernel \
jupyter \
matplotlib \
numpy \
pandas \
seaborn \
scipy \
transformers \
datasets \
torch \
dspy-ai \
accelerate \
llama-parse llama-agents llama-index-llms-ollama llama-index llama-index-embeddings-huggingface \
langchain \
chromadb
# rawpy \
# cmake \
# GitPython \
# setuptools \
# sentencepiece \
# pillow \
# llama-index-llms-huggingface-api llama-index-llms-huggingface llama-index-embeddings-huggingface \
# langchain langgraph langchain-core langchain-community \
# crewai \
# autogen
# ENV TORCH_CUDA_ARCH_LIST="6.1"
# Install miniconda
# ENV PATH="${PATH}:/root/miniconda3/bin"
# RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
# && mkdir /root/.conda \
# && bash Miniconda3-latest-Linux-x86_64.sh -b \
# && rm -f Miniconda3-latest-Linux-x86_64.sh \
# && conda update conda \
# && conda install astunparse numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing_extensions future six requests dataclasses && \
# conda install -y intel::mkl-static intel::mkl-include && \
# conda install -y -c pytorch magma-cuda121
# RUN rm -rf /root/miniconda3
# RUN PATH=$(echo "$PATH" | sed -e 's/:\/root\/miniconda3\/bin$//')
# #ENV _GLIBCXX_USE_CXX11_ABI=1
# ENV CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
# RUN git clone --recursive https://github.com/pytorch/pytorch && \
# cd pytorch && \
# make triton && \
# git checkout v2.1.0 && \
# # if you are updating an existing checkout
# git submodule sync && \
# git submodule update --init --recursive && \
# python3 setup.py develop
#RUN TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0" TORCH_NVCC_FLAGS="-Xfatbin -compress-all" \
# CMAKE_PREFIX_PATH="$(dirname $(which conda))/../"
# ENTRYPOINT ["/bin/bash"]