From e966ae0574b1f1c844d387c79771373c0c1332a7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 30 Aug 2023 13:11:42 +0300 Subject: [PATCH 01/10] build : on Mac OS enable Metal by default --- CMakeLists.txt | 56 ++++++++++++++++++++++++++++---------------------- Makefile | 35 ++++++++++++++++--------------- 2 files changed, 51 insertions(+), 40 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1b7cce9f1fbf8..e872ae310761b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,6 +36,12 @@ endif() # Option list # +if (APPLE) + set(LLAMA_METAL_DEFAULT ON) +else() + set(LLAMA_METAL_DEFAULT OFF) +endif() + # general option(LLAMA_STATIC "llama: static link libraries" OFF) option(LLAMA_NATIVE "llama: enable -march=native flag" OFF) @@ -76,7 +82,7 @@ option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K") option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF) option(LLAMA_CLBLAST "llama: use CLBlast" OFF) -option(LLAMA_METAL "llama: use Metal" OFF) +option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT}) option(LLAMA_MPI "llama: use MPI" OFF) option(LLAMA_K_QUANTS "llama: use k-quants" ON) option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF) @@ -158,6 +164,31 @@ if (APPLE AND LLAMA_ACCELERATE) endif() endif() +if (LLAMA_METAL) + find_library(FOUNDATION_LIBRARY Foundation REQUIRED) + find_library(METAL_FRAMEWORK Metal REQUIRED) + find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) + + message(STATUS "Metal framework found") + + set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h) + + add_compile_definitions(GGML_USE_METAL) + #add_compile_definitions(GGML_METAL_NDEBUG) + + # get full path to the file + #add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/") + + # copy ggml-metal.metal to bin directory + configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY) + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} + ${FOUNDATION_LIBRARY} + ${METAL_FRAMEWORK} + ${METALKIT_FRAMEWORK} + ) +endif() + if (LLAMA_BLAS) if (LLAMA_STATIC) set(BLA_STATIC ON) @@ -293,29 +324,6 @@ if (LLAMA_CUBLAS) endif() endif() -if (LLAMA_METAL) - find_library(FOUNDATION_LIBRARY Foundation REQUIRED) - find_library(METAL_FRAMEWORK Metal REQUIRED) - find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) - - set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h) - - add_compile_definitions(GGML_USE_METAL) - #add_compile_definitions(GGML_METAL_NDEBUG) - - # get full path to the file - #add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/") - - # copy ggml-metal.metal to bin directory - configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY) - - set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} - ${FOUNDATION_LIBRARY} - ${METAL_FRAMEWORK} - ${METALKIT_FRAMEWORK} - ) -endif() - if (LLAMA_MPI) cmake_minimum_required(VERSION 3.10) find_package(MPI) diff --git a/Makefile b/Makefile index 23f050c0d7a13..1e11fe808c520 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,10 @@ # Define the default target now so that it is always the first target BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam-search tests/test-c.o +ifndef LLAMA_NO_METAL +BUILD_TARGETS += metal +endif + # Binaries only useful for tests TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1 @@ -235,14 +239,24 @@ endif endif ifndef LLAMA_NO_ACCELERATE - # Mac M1 - include Accelerate framework. - # `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time). + # Mac OS - include Accelerate framework. + # `-framework Accelerate` works both with Apple Silicon and Mac Intel ifeq ($(UNAME_S),Darwin) CFLAGS += -DGGML_USE_ACCELERATE LDFLAGS += -framework Accelerate endif endif # LLAMA_NO_ACCELERATE +ifndef LLAMA_NO_METAL + # By default - use GPU acceleration on Mac OS + ifeq ($(UNAME_S),Darwin) + CFLAGS += -DGGML_USE_METAL #-DGGML_METAL_NDEBUG + CXXFLAGS += -DGGML_USE_METAL + LDFLAGS += -framework Foundation -framework Metal -framework MetalKit + OBJS += ggml-metal.o + endif +endif # LLAMA_NO_METAL + ifdef LLAMA_MPI CFLAGS += -DGGML_USE_MPI -Wno-cast-qual CXXFLAGS += -DGGML_USE_MPI -Wno-cast-qual @@ -352,17 +366,10 @@ ggml-cuda.o: ggml-cuda.cu ggml-cuda.h $(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $< endif # LLAMA_HIPBLAS -ifdef LLAMA_METAL - CFLAGS += -DGGML_USE_METAL #-DGGML_METAL_NDEBUG - CXXFLAGS += -DGGML_USE_METAL - LDFLAGS += -framework Foundation -framework Metal -framework MetalKit - OBJS += ggml-metal.o -endif # LLAMA_METAL - -ifdef LLAMA_METAL +ifndef LLAMA_NO_METAL ggml-metal.o: ggml-metal.m ggml-metal.h $(CC) $(CFLAGS) -c $< -o $@ -endif # LLAMA_METAL +endif # LLAMA_NO_METAL ifdef LLAMA_MPI ggml-mpi.o: ggml-mpi.c ggml-mpi.h @@ -475,11 +482,7 @@ baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o common.o $(OBJS) beam-search: examples/beam-search/beam-search.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -ifneq '' '$(or $(filter clean,$(MAKECMDGOALS)),$(LLAMA_METAL))' -BUILD_TARGETS += metal -endif - -ifdef LLAMA_METAL +ifndef LLAMA_NO_METAL metal: examples/metal/metal.cpp ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) endif From bcf62ba7b4b0ec01dea9cbb5a9c39ce793843df8 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 1 Sep 2023 17:42:32 +0300 Subject: [PATCH 02/10] make : try to fix build on Linux --- Makefile | 58 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/Makefile b/Makefile index 1e11fe808c520..1aab79df73603 100644 --- a/Makefile +++ b/Makefile @@ -1,10 +1,35 @@ -# Define the default target now so that it is always the first target -BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam-search tests/test-c.o +ifndef UNAME_S +UNAME_S := $(shell uname -s) +endif -ifndef LLAMA_NO_METAL -BUILD_TARGETS += metal +ifndef UNAME_P +UNAME_P := $(shell uname -p) +endif + +ifndef UNAME_M +UNAME_M := $(shell uname -m) +endif + +# Mac OS + Arm can report x86_64 +# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789 +ifeq ($(UNAME_S),Darwin) + ifndef LLAMA_NO_METAL + BUILD_TARGETS += metal + endif + + ifneq ($(UNAME_P),arm) + SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null) + ifeq ($(SYSCTL_M),1) + # UNAME_P := arm + # UNAME_M := arm64 + warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789) + endif + endif endif +# Define the default target now so that it is always the first target +BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam-search tests/test-c.o + # Binaries only useful for tests TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1 @@ -27,18 +52,6 @@ test: all: $(BUILD_TARGETS) $(TEST_TARGETS) -ifndef UNAME_S -UNAME_S := $(shell uname -s) -endif - -ifndef UNAME_P -UNAME_P := $(shell uname -p) -endif - -ifndef UNAME_M -UNAME_M := $(shell uname -m) -endif - ifdef RISCV_CROSS_COMPILE CC := riscv64-unknown-linux-gnu-gcc CXX := riscv64-unknown-linux-gnu-g++ @@ -47,19 +60,6 @@ endif CCV := $(shell $(CC) --version | head -n 1) CXXV := $(shell $(CXX) --version | head -n 1) -# Mac OS + Arm can report x86_64 -# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789 -ifeq ($(UNAME_S),Darwin) - ifneq ($(UNAME_P),arm) - SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null) - ifeq ($(SYSCTL_M),1) - # UNAME_P := arm - # UNAME_M := arm64 - warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789) - endif - endif -endif - # # Compile flags # From b59beebdbfe942ed11c854c10a9eab67e2b74b09 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 3 Sep 2023 10:04:51 +0300 Subject: [PATCH 03/10] make : move targets back to the top --- Makefile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 3876e93d00215..29c295ffba32b 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,9 @@ +# Define the default target now so that it is always the first target +BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam-search tests/test-c.o + +# Binaries only useful for tests +TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1 + ifndef UNAME_S UNAME_S := $(shell uname -s) endif @@ -27,12 +33,6 @@ ifeq ($(UNAME_S),Darwin) endif endif -# Define the default target now so that it is always the first target -BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam-search tests/test-c.o - -# Binaries only useful for tests -TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1 - default: $(BUILD_TARGETS) test: From 15f1790a758ab6c6e9f2a52472638afa995fb235 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 3 Sep 2023 10:13:44 +0300 Subject: [PATCH 04/10] make : fix target clean --- Makefile | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 29c295ffba32b..d5a6632e1d1ea 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ endif # ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789 ifeq ($(UNAME_S),Darwin) ifndef LLAMA_NO_METAL - BUILD_TARGETS += metal + LLAMA_METAL := 1 endif ifneq ($(UNAME_P),arm) @@ -33,6 +33,10 @@ ifeq ($(UNAME_S),Darwin) endif endif +ifneq '' '$(or $(filter clean,$(MAKECMDGOALS)),$(LLAMA_METAL))' +BUILD_TARGETS += metal +endif + default: $(BUILD_TARGETS) test: @@ -223,7 +227,7 @@ ifndef LLAMA_NO_ACCELERATE endif endif # LLAMA_NO_ACCELERATE -ifndef LLAMA_NO_METAL +ifdef LLAMA_METAL # By default - use GPU acceleration on Mac OS ifeq ($(UNAME_S),Darwin) CFLAGS += -DGGML_USE_METAL #-DGGML_METAL_NDEBUG @@ -231,7 +235,7 @@ ifndef LLAMA_NO_METAL LDFLAGS += -framework Foundation -framework Metal -framework MetalKit OBJS += ggml-metal.o endif -endif # LLAMA_NO_METAL +endif # LLAMA_METAL ifdef LLAMA_MPI MK_CPPFLAGS += -DGGML_USE_MPI @@ -343,16 +347,16 @@ ggml-cuda.o: ggml-cuda.cu ggml-cuda.h $(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $< endif # LLAMA_HIPBLAS -ifndef LLAMA_NO_METAL +ifdef LLAMA_METAL MK_CPPFLAGS += -DGGML_USE_METAL #-DGGML_METAL_NDEBUG MK_LDFLAGS += -framework Foundation -framework Metal -framework MetalKit OBJS += ggml-metal.o endif # LLAMA_METAL -ifndef LLAMA_NO_METAL +ifdef LLAMA_METAL ggml-metal.o: ggml-metal.m ggml-metal.h $(CC) $(CFLAGS) -c $< -o $@ -endif # LLAMA_NO_METAL +endif # LLAMA_METAL ifdef LLAMA_MPI ggml-mpi.o: ggml-mpi.c ggml-mpi.h @@ -471,7 +475,7 @@ baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o common.o $(OBJS) beam-search: examples/beam-search/beam-search.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -ifndef LLAMA_NO_METAL +ifdef LLAMA_METAL metal: examples/metal/metal.cpp ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) endif From 99161230c44fae7d4bac07b899d9b1f1e20ef407 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 3 Sep 2023 10:30:53 +0300 Subject: [PATCH 05/10] llama : enable GPU inference by default with Metal --- common/common.cpp | 4 ++-- common/common.h | 2 +- examples/main/main.cpp | 15 +++++++-------- examples/perplexity/perplexity.cpp | 12 ++++++------ llama.cpp | 6 +++++- 5 files changed, 21 insertions(+), 18 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 41fc59ced5b57..70b4bf2e9b8f1 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -702,7 +702,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param lparams.n_ctx = params.n_ctx; lparams.n_batch = params.n_batch; - lparams.n_gpu_layers = params.n_gpu_layers; + lparams.n_gpu_layers = params.n_gpu_layers != -1 ? params.n_gpu_layers : lparams.n_gpu_layers; lparams.main_gpu = params.main_gpu; lparams.tensor_split = params.tensor_split; lparams.low_vram = params.low_vram; @@ -1064,7 +1064,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "model: %s # default: models/7B/ggml-model.bin\n", params.model.c_str()); fprintf(stream, "mtest: %s # default: false\n", params.mem_test ? "true" : "false"); fprintf(stream, "multiline_input: %s # default: false\n", params.multiline_input ? "true" : "false"); - fprintf(stream, "n_gpu_layers: %d # default: 0\n", params.n_gpu_layers); + fprintf(stream, "n_gpu_layers: %d # default: -1\n", params.n_gpu_layers); fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict); fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", params.n_probs); fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false"); diff --git a/common/common.h b/common/common.h index 5a379688ee529..28cc04772c314 100644 --- a/common/common.h +++ b/common/common.h @@ -33,7 +33,7 @@ struct gpt_params { int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) int32_t n_keep = 0; // number of tokens to keep from initial prompt int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) - int32_t n_gpu_layers = 0; // number of layers to store in VRAM + int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens. diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 7117db4b091f1..38ad5df48ef59 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -151,14 +151,6 @@ int main(int argc, char ** argv) { LOG_TEE("%s: warning: scaling RoPE frequency by %g (default 1.0)\n", __func__, params.rope_freq_scale); } - if (params.n_ctx > 2048) { - // TODO: determine the actual max context of the model (e.g. 4096 for LLaMA v2) and use that instead of 2048 - LOG_TEE("%s: warning: base model only supports context sizes no greater than 2048 tokens (%d specified)\n", __func__, params.n_ctx); - } else if (params.n_ctx < 8) { - LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__); - params.n_ctx = 8; - } - LOG_TEE("%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); if (params.seed == LLAMA_DEFAULT_SEED) { @@ -194,6 +186,13 @@ int main(int argc, char ** argv) { return 1; } + if (params.n_ctx > llama_n_ctx(ctx)) { + LOG_TEE("%s: warning: base model only supports context sizes no greater than %d tokens (%d specified)\n", __func__, llama_n_ctx(ctx), params.n_ctx); + } else if (params.n_ctx < 8) { + LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__); + params.n_ctx = 8; + } + // print system information { LOG_TEE("\n"); diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 7c02b6d4058f1..843b2ae3527f6 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -368,7 +368,7 @@ results_perplexity perplexity(llama_context * ctx, const gpt_params & params) { // Example, we have a context window of 512, we will compute perplexity for each of the // last 256 tokens. Then, we split the input up into context window size chunks to // process the entire prompt. - const int first = std::min(512, params.n_ctx/2); + const int first = params.n_ctx/2; process_logits(n_vocab, logits.data() + first*n_vocab, tokens.data() + start + first, params.n_ctx - 1 - first, workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first); count += params.n_ctx - first - 1; @@ -668,11 +668,6 @@ int main(int argc, char ** argv) { params.n_ctx += params.ppl_stride/2; } - if (params.n_ctx > 2048) { - fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);" - "expect poor results\n", __func__, params.n_ctx); - } - fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); if (params.seed == LLAMA_DEFAULT_SEED) { @@ -698,6 +693,11 @@ int main(int argc, char ** argv) { return 1; } + if (params.n_ctx > llama_n_ctx(ctx)) { + fprintf(stderr, "%s: warning: model might not support context sizes greater than %d tokens (%d specified);" + "expect poor results\n", __func__, llama_n_ctx(ctx), params.n_ctx); + } + // print system information { fprintf(stderr, "\n"); diff --git a/llama.cpp b/llama.cpp index 2b0cf30f6ec0d..3ba522779c561 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5334,7 +5334,7 @@ struct llama_context_params llama_context_default_params() { /*.seed =*/ LLAMA_DEFAULT_SEED, /*.n_ctx =*/ 512, /*.n_batch =*/ 512, - /*.gpu_layers =*/ 0, + /*.n_gpu_layers =*/ 0, /*.main_gpu =*/ 0, /*.tensor_split =*/ nullptr, /*.rope_freq_base =*/ 10000.0f, @@ -5351,6 +5351,10 @@ struct llama_context_params llama_context_default_params() { /*.embedding =*/ false, }; +#ifdef GGML_USE_METAL + result.n_gpu_layers = 1; +#endif + return result; } From 323a9d3b8c3d60c2d30e334f6635cf47777d1171 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 3 Sep 2023 10:39:15 +0300 Subject: [PATCH 06/10] llama : fix vocab_only logic when GPU is enabled --- llama.cpp | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/llama.cpp b/llama.cpp index 3ba522779c561..56e605df50bdf 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5547,43 +5547,43 @@ struct llama_context * llama_new_context_with_model( } #endif } - } #ifdef GGML_USE_METAL - if (params.n_gpu_layers > 0) { - // this allocates all Metal resources and memory buffers + if (params.n_gpu_layers > 0) { + // this allocates all Metal resources and memory buffers - void * data_ptr = NULL; - size_t data_size = 0; + void * data_ptr = NULL; + size_t data_size = 0; - if (params.use_mmap) { - data_ptr = ctx->model.mapping->addr; - data_size = ctx->model.mapping->size; - } else { - data_ptr = ggml_get_mem_buffer(ctx->model.ctx); - data_size = ggml_get_mem_size (ctx->model.ctx); - } + if (params.use_mmap) { + data_ptr = ctx->model.mapping->addr; + data_size = ctx->model.mapping->size; + } else { + data_ptr = ggml_get_mem_buffer(ctx->model.ctx); + data_size = ggml_get_mem_size (ctx->model.ctx); + } - const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); + const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); - LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); + LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); #define LLAMA_METAL_CHECK_BUF(result) \ - if (!(result)) { \ - LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \ - llama_free(ctx); \ - return NULL; \ - } + if (!(result)) { \ + LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \ + llama_free(ctx); \ + return NULL; \ + } - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.data, ctx->buf_alloc.size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.data, ctx->buf_alloc.size, 0)); #undef LLAMA_METAL_CHECK_BUF - } + } #endif + } #ifdef GGML_USE_MPI ctx->ctx_mpi = ggml_mpi_init(); From 23360b15b685a30b7dab18f8746a0dc5ad868031 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 4 Sep 2023 22:14:22 +0300 Subject: [PATCH 07/10] common : better `n_gpu_layers` assignment --- common/common.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/common/common.cpp b/common/common.cpp index 70b4bf2e9b8f1..b99b5db27dad8 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -702,7 +702,9 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param lparams.n_ctx = params.n_ctx; lparams.n_batch = params.n_batch; - lparams.n_gpu_layers = params.n_gpu_layers != -1 ? params.n_gpu_layers : lparams.n_gpu_layers; + if (params.n_gpu_layers != -1) { + lparams.n_gpu_layers = params.n_gpu_layers; + } lparams.main_gpu = params.main_gpu; lparams.tensor_split = params.tensor_split; lparams.low_vram = params.low_vram; From ac4038aab194de6444db1b945feb1c8ad63439a5 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 4 Sep 2023 22:19:24 +0300 Subject: [PATCH 08/10] readme : update Metal instructions --- README.md | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 0cfd94db4843a..17a5c2cbf69cf 100644 --- a/README.md +++ b/README.md @@ -280,29 +280,11 @@ In order to build llama.cpp you have three different options. ### Metal Build -Using Metal allows the computation to be executed on the GPU for Apple devices: +On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU. +To disable the Metal build at compile time use the `LLAMA_NO_METAL=1` flag or the `LLAMA_METAL=OFF` cmake option. -- Using `make`: - - ```bash - LLAMA_METAL=1 make - ``` - -- Using `CMake`: - - ```bash - mkdir build-metal - cd build-metal - cmake -DLLAMA_METAL=ON .. - cmake --build . --config Release - ``` - -When built with Metal support, you can enable GPU inference with the `--gpu-layers|-ngl` command-line argument. -Any value larger than 0 will offload the computation to the GPU. For example: - -```bash -./main -m ./models/7B/ggml-model-q4_0.gguf -n 128 -ngl 1 -``` +When built with Metal support, you can explicitly disable GPU inference with the `--gpu-layers|-ngl 0` command-line +argument. ### MPI Build From 28eea84ac09a198cee363e9705fce3158f2036fa Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 4 Sep 2023 22:21:45 +0300 Subject: [PATCH 09/10] make : fix merge conflict remnants --- Makefile | 7 ------- 1 file changed, 7 deletions(-) diff --git a/Makefile b/Makefile index dea7cf74b31aa..847aa3a857b67 100644 --- a/Makefile +++ b/Makefile @@ -495,16 +495,9 @@ baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o common.o $(OBJS) beam-search: examples/beam-search/beam-search.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -<<<<<<< HEAD -======= speculative: examples/speculative/speculative.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -ifneq '' '$(or $(filter clean,$(MAKECMDGOALS)),$(LLAMA_METAL))' -BUILD_TARGETS += metal -endif - ->>>>>>> master ifdef LLAMA_METAL metal: examples/metal/metal.cpp ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) From 30ac7a41171bc9df043dc00b62f81b2c61f9b9e0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 4 Sep 2023 22:23:16 +0300 Subject: [PATCH 10/10] gitignore : metal --- .gitignore | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/.gitignore b/.gitignore index e4157e804f5f3..b862a0415f279 100644 --- a/.gitignore +++ b/.gitignore @@ -31,28 +31,29 @@ tmp/ models/* models-mnt -/main -/quantize -/quantize-stats -/result -/perplexity -/embedding -/train-text-from-scratch -/convert-llama2c-to-ggml -/simple -/benchmark-matmult -/vdot -/server /Pipfile +/baby-llama +/beam-search +/benchmark-matmult +/convert-llama2c-to-ggml /embd-input-test +/embedding /gguf /gguf-llama-simple /libllama.so /llama-bench -/baby-llama -/beam-search +/main +/metal +/perplexity +/quantize +/quantize-stats +/result /save-load-state +/server +/simple /speculative +/train-text-from-scratch +/vdot build-info.h arm_neon.h compile_commands.json