Skip to content

Commit cb8cfb9

Browse files
authored
Merge pull request #15 from OpenBMB/master
sync master
2 parents ee5b850 + 77beb4d commit cb8cfb9

File tree

440 files changed

+148011
-142462
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

440 files changed

+148011
-142462
lines changed

.devops/cloud-v-pipeline

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ node('x86_runner1'){ // Running on x86 runner containing latest vecto
1515
stage('Running llama.cpp'){
1616
sh'''#!/bin/bash
1717
module load gnu-bin2/0.1 # loading latest versions of vector qemu and vector gcc
18-
qemu-riscv64 -L /softwares/gnu-bin2/sysroot -cpu rv64,v=true,vlen=256,elen=64,vext_spec=v1.0 ./main -m /home/alitariq/codellama-7b.Q4_K_M.gguf -p "Anything" -n 9 > llama_log.txt # Running llama.cpp on vector qemu-riscv64
18+
qemu-riscv64 -L /softwares/gnu-bin2/sysroot -cpu rv64,v=true,vlen=256,elen=64,vext_spec=v1.0 ./llama-cli -m /home/alitariq/codellama-7b.Q4_K_M.gguf -p "Anything" -n 9 > llama_log.txt # Running llama.cpp on vector qemu-riscv64
1919
cat llama_log.txt # Printing results
2020
'''
2121
}

.devops/full-cuda.Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ FROM ${BASE_CUDA_DEV_CONTAINER} as build
1212
ARG CUDA_DOCKER_ARCH=all
1313

1414
RUN apt-get update && \
15-
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev
15+
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1
1616

1717
COPY requirements.txt requirements.txt
1818
COPY requirements requirements

.devops/full.Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ ARG UBUNTU_VERSION=22.04
33
FROM ubuntu:$UBUNTU_VERSION as build
44

55
RUN apt-get update && \
6-
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev
6+
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1
77

88
COPY requirements.txt requirements.txt
99
COPY requirements requirements

.devops/main-cuda.Dockerfile .devops/llama-cli-cuda.Dockerfile

+6-3
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,13 @@ ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2323
# Enable CUDA
2424
ENV LLAMA_CUDA=1
2525

26-
RUN make -j$(nproc)
26+
RUN make -j$(nproc) llama-cli
2727

2828
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
2929

30-
COPY --from=build /app/main /main
30+
RUN apt-get update && \
31+
apt-get install -y libgomp1
32+
33+
COPY --from=build /app/llama-cli /llama-cli
3134

32-
ENTRYPOINT [ "/main" ]
35+
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-cli-intel.Dockerfile

+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
2+
3+
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
4+
5+
ARG LLAMA_SYCL_F16=OFF
6+
RUN apt-get update && \
7+
apt-get install -y git
8+
9+
WORKDIR /app
10+
11+
COPY . .
12+
13+
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
14+
echo "LLAMA_SYCL_F16 is set" && \
15+
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
16+
fi && \
17+
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
18+
cmake --build build --config Release --target llama-cli
19+
20+
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
21+
22+
COPY --from=build /app/build/bin/llama-cli /llama-cli
23+
24+
ENV LC_ALL=C.utf8
25+
26+
ENTRYPOINT [ "/llama-cli" ]

.devops/main-rocm.Dockerfile .devops/llama-cli-rocm.Dockerfile

+2-2
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,6 @@ ENV LLAMA_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

43-
RUN make -j$(nproc)
43+
RUN make -j$(nproc) llama-cli
4444

45-
ENTRYPOINT [ "/app/main" ]
45+
ENTRYPOINT [ "/app/llama-cli" ]

.devops/main-vulkan.Dockerfile .devops/llama-cli-vulkan.Dockerfile

+4-4
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ ARG UBUNTU_VERSION=jammy
33
FROM ubuntu:$UBUNTU_VERSION as build
44

55
# Install build tools
6-
RUN apt update && apt install -y git build-essential cmake wget
6+
RUN apt update && apt install -y git build-essential cmake wget libgomp1
77

88
# Install Vulkan SDK
99
RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
@@ -15,13 +15,13 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
1515
WORKDIR /app
1616
COPY . .
1717
RUN cmake -B build -DLLAMA_VULKAN=1 && \
18-
cmake --build build --config Release --target main
18+
cmake --build build --config Release --target llama-cli
1919

2020
# Clean up
2121
WORKDIR /
22-
RUN cp /app/build/bin/main /main && \
22+
RUN cp /app/build/bin/llama-cli /llama-cli && \
2323
rm -rf /app
2424

2525
ENV LC_ALL=C.utf8
2626

27-
ENTRYPOINT [ "/main" ]
27+
ENTRYPOINT [ "/llama-cli" ]

.devops/main.Dockerfile .devops/llama-cli.Dockerfile

+6-3
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,15 @@ WORKDIR /app
99

1010
COPY . .
1111

12-
RUN make -j$(nproc)
12+
RUN make -j$(nproc) llama-cli
1313

1414
FROM ubuntu:$UBUNTU_VERSION as runtime
1515

16-
COPY --from=build /app/main /main
16+
RUN apt-get update && \
17+
apt-get install -y libgomp1
18+
19+
COPY --from=build /app/llama-cli /llama-cli
1720

1821
ENV LC_ALL=C.utf8
1922

20-
ENTRYPOINT [ "/main" ]
23+
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-cpp-clblast.srpm.spec

+7-7
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,9 @@ make -j LLAMA_CLBLAST=1
3636

3737
%install
3838
mkdir -p %{buildroot}%{_bindir}/
39-
cp -p main %{buildroot}%{_bindir}/llamaclblast
40-
cp -p server %{buildroot}%{_bindir}/llamaclblastserver
41-
cp -p simple %{buildroot}%{_bindir}/llamaclblastsimple
39+
cp -p llama-cli %{buildroot}%{_bindir}/llama-clblast-cli
40+
cp -p llama-server %{buildroot}%{_bindir}/llama-clblast-server
41+
cp -p llama-simple %{buildroot}%{_bindir}/llama-clblast-simple
4242

4343
mkdir -p %{buildroot}/usr/lib/systemd/system
4444
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llamaclblast.service
@@ -49,7 +49,7 @@ After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.t
4949
[Service]
5050
Type=simple
5151
EnvironmentFile=/etc/sysconfig/llama
52-
ExecStart=/usr/bin/llamaclblastserver $LLAMA_ARGS
52+
ExecStart=/usr/bin/llama-clblast-server $LLAMA_ARGS
5353
ExecReload=/bin/kill -s HUP $MAINPID
5454
Restart=never
5555

@@ -67,9 +67,9 @@ rm -rf %{buildroot}
6767
rm -rf %{_builddir}/*
6868

6969
%files
70-
%{_bindir}/llamaclblast
71-
%{_bindir}/llamaclblastserver
72-
%{_bindir}/llamaclblastsimple
70+
%{_bindir}/llama-clblast-cli
71+
%{_bindir}/llama-clblast-server
72+
%{_bindir}/llama-clblast-simple
7373
/usr/lib/systemd/system/llamaclblast.service
7474
%config /etc/sysconfig/llama
7575

.devops/llama-cpp-cuda.srpm.spec

+7-7
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,9 @@ make -j LLAMA_CUDA=1
3636

3737
%install
3838
mkdir -p %{buildroot}%{_bindir}/
39-
cp -p main %{buildroot}%{_bindir}/llamacppcuda
40-
cp -p server %{buildroot}%{_bindir}/llamacppcudaserver
41-
cp -p simple %{buildroot}%{_bindir}/llamacppcudasimple
39+
cp -p llama-cli %{buildroot}%{_bindir}/llama-cuda-cli
40+
cp -p llama-server %{buildroot}%{_bindir}/llama-cuda-server
41+
cp -p llama-simple %{buildroot}%{_bindir}/llama-cuda-simple
4242

4343
mkdir -p %{buildroot}/usr/lib/systemd/system
4444
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llamacuda.service
@@ -49,7 +49,7 @@ After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.t
4949
[Service]
5050
Type=simple
5151
EnvironmentFile=/etc/sysconfig/llama
52-
ExecStart=/usr/bin/llamacppcudaserver $LLAMA_ARGS
52+
ExecStart=/usr/bin/llama-cuda-server $LLAMA_ARGS
5353
ExecReload=/bin/kill -s HUP $MAINPID
5454
Restart=never
5555

@@ -67,9 +67,9 @@ rm -rf %{buildroot}
6767
rm -rf %{_builddir}/*
6868

6969
%files
70-
%{_bindir}/llamacppcuda
71-
%{_bindir}/llamacppcudaserver
72-
%{_bindir}/llamacppcudasimple
70+
%{_bindir}/llama-cuda-cli
71+
%{_bindir}/llama-cuda-server
72+
%{_bindir}/llama-cuda-simple
7373
/usr/lib/systemd/system/llamacuda.service
7474
%config /etc/sysconfig/llama
7575

.devops/llama-cpp.srpm.spec

+7-7
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ make -j
3838

3939
%install
4040
mkdir -p %{buildroot}%{_bindir}/
41-
cp -p main %{buildroot}%{_bindir}/llama
42-
cp -p server %{buildroot}%{_bindir}/llamaserver
43-
cp -p simple %{buildroot}%{_bindir}/llamasimple
41+
cp -p llama-cli %{buildroot}%{_bindir}/llama-cli
42+
cp -p llama-server %{buildroot}%{_bindir}/llama-server
43+
cp -p llama-simple %{buildroot}%{_bindir}/llama-simple
4444

4545
mkdir -p %{buildroot}/usr/lib/systemd/system
4646
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llama.service
@@ -51,7 +51,7 @@ After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.t
5151
[Service]
5252
Type=simple
5353
EnvironmentFile=/etc/sysconfig/llama
54-
ExecStart=/usr/bin/llamaserver $LLAMA_ARGS
54+
ExecStart=/usr/bin/llama-server $LLAMA_ARGS
5555
ExecReload=/bin/kill -s HUP $MAINPID
5656
Restart=never
5757

@@ -69,9 +69,9 @@ rm -rf %{buildroot}
6969
rm -rf %{_builddir}/*
7070

7171
%files
72-
%{_bindir}/llama
73-
%{_bindir}/llamaserver
74-
%{_bindir}/llamasimple
72+
%{_bindir}/llama-cli
73+
%{_bindir}/llama-server
74+
%{_bindir}/llama-simple
7575
/usr/lib/systemd/system/llama.service
7676
%config /etc/sysconfig/llama
7777

.devops/server-cuda.Dockerfile .devops/llama-server-cuda.Dockerfile

+4-4
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@ ENV LLAMA_CUDA=1
2525
# Enable cURL
2626
ENV LLAMA_CURL=1
2727

28-
RUN make -j$(nproc)
28+
RUN make -j$(nproc) llama-server
2929

3030
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
3131

3232
RUN apt-get update && \
33-
apt-get install -y libcurl4-openssl-dev
33+
apt-get install -y libcurl4-openssl-dev libgomp1
3434

35-
COPY --from=build /app/server /server
35+
COPY --from=build /app/llama-server /llama-server
3636

37-
ENTRYPOINT [ "/server" ]
37+
ENTRYPOINT [ "/llama-server" ]

.devops/llama-server-intel.Dockerfile

+29
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
2+
3+
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
4+
5+
ARG LLAMA_SYCL_F16=OFF
6+
RUN apt-get update && \
7+
apt-get install -y git libcurl4-openssl-dev
8+
9+
WORKDIR /app
10+
11+
COPY . .
12+
13+
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
14+
echo "LLAMA_SYCL_F16 is set" && \
15+
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
16+
fi && \
17+
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
18+
cmake --build build --config Release --target llama-server
19+
20+
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
21+
22+
RUN apt-get update && \
23+
apt-get install -y libcurl4-openssl-dev
24+
25+
COPY --from=build /app/build/bin/llama-server /llama-server
26+
27+
ENV LC_ALL=C.utf8
28+
29+
ENTRYPOINT [ "/llama-server" ]

.devops/server-rocm.Dockerfile .devops/llama-server-rocm.Dockerfile

+2-2
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,6 @@ ENV LLAMA_CURL=1
4545
RUN apt-get update && \
4646
apt-get install -y libcurl4-openssl-dev
4747

48-
RUN make -j$(nproc)
48+
RUN make -j$(nproc) llama-server
4949

50-
ENTRYPOINT [ "/app/server" ]
50+
ENTRYPOINT [ "/app/llama-server" ]

.devops/server-vulkan.Dockerfile .devops/llama-server-vulkan.Dockerfile

+3-3
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,13 @@ RUN apt-get update && \
1919
WORKDIR /app
2020
COPY . .
2121
RUN cmake -B build -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \
22-
cmake --build build --config Release --target server
22+
cmake --build build --config Release --target llama-server
2323

2424
# Clean up
2525
WORKDIR /
26-
RUN cp /app/build/bin/server /server && \
26+
RUN cp /app/build/bin/llama-server /llama-server && \
2727
rm -rf /app
2828

2929
ENV LC_ALL=C.utf8
3030

31-
ENTRYPOINT [ "/server" ]
31+
ENTRYPOINT [ "/llama-server" ]

.devops/server.Dockerfile .devops/llama-server.Dockerfile

+4-4
Original file line numberDiff line numberDiff line change
@@ -11,15 +11,15 @@ COPY . .
1111

1212
ENV LLAMA_CURL=1
1313

14-
RUN make -j$(nproc)
14+
RUN make -j$(nproc) llama-server
1515

1616
FROM ubuntu:$UBUNTU_VERSION as runtime
1717

1818
RUN apt-get update && \
19-
apt-get install -y libcurl4-openssl-dev
19+
apt-get install -y libcurl4-openssl-dev libgomp1
2020

21-
COPY --from=build /app/server /server
21+
COPY --from=build /app/llama-server /llama-server
2222

2323
ENV LC_ALL=C.utf8
2424

25-
ENTRYPOINT [ "/server" ]
25+
ENTRYPOINT [ "/llama-server" ]

.devops/main-intel.Dockerfile

-34
This file was deleted.

.devops/nix/apps.nix

+3-3
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,11 @@
66
let
77
inherit (config.packages) default;
88
binaries = [
9-
"llama"
9+
"llama-cli"
1010
"llama-embedding"
1111
"llama-server"
12-
"quantize"
13-
"train-text-from-scratch"
12+
"llama-quantize"
13+
"llama-train-text-from-scratch"
1414
];
1515
mkApp = name: {
1616
type = "app";

.devops/nix/package.nix

+1-3
Original file line numberDiff line numberDiff line change
@@ -243,8 +243,6 @@ effectiveStdenv.mkDerivation (
243243
# TODO(SomeoneSerge): It's better to add proper install targets at the CMake level,
244244
# if they haven't been added yet.
245245
postInstall = ''
246-
mv $out/bin/main${executableSuffix} $out/bin/llama${executableSuffix}
247-
mv $out/bin/server${executableSuffix} $out/bin/llama-server${executableSuffix}
248246
mkdir -p $out/include
249247
cp $src/llama.h $out/include/
250248
'';
@@ -294,7 +292,7 @@ effectiveStdenv.mkDerivation (
294292
license = lib.licenses.mit;
295293

296294
# Accommodates `nix run` and `lib.getExe`
297-
mainProgram = "llama";
295+
mainProgram = "llama-cli";
298296

299297
# These people might respond, on the best effort basis, if you ping them
300298
# in case of Nix-specific regressions or for reviewing Nix-specific PRs.

0 commit comments

Comments
 (0)