From 00d772d11c8127020d9d1f211d20f40a99488249 Mon Sep 17 00:00:00 2001 From: Wu Zhenyu Date: Sun, 1 Oct 2023 17:42:23 +0800 Subject: [PATCH] Fix #777, #464 https://github.com/ggerganov/llama.cpp/pull/3424 Move some code about disabling some flags in Apple x86 to the upstream Use ExternalProjectAdd() to simplify CMakeLists.txt Change base_path from llama_cpp to llama_cpp/data like other projects: - https://github.com/ssciwr/clang-format-wheel/blob/99df0fe0145e0af14c38772f1af78b215456e3a3/CMakeLists.txt#L44 - https://github.com/Freed-Wu/astyle-wheel/blob/340cef6dfd5cfa9158a321fe01b36df6a1e826e8/CMakeLists.txt#L12 - https://github.com/scikit-build/cmake-python-distributions/blob/13473f41061f73e599987af8dee5ba262e065769/setup.py#L46 - https://github.com/scikit-build/ninja-python-distributions/blob/e5510f4f725323b9be845a023bdc164718df1ddd/CMakeLists.txt#L149 --- .gitignore | 1 + .gitmodules | 3 --- CMakeLists.txt | 46 ++++++++++-------------------------------- llama_cpp/llama_cpp.py | 2 +- vendor/llama.cpp | 1 - 5 files changed, 13 insertions(+), 40 deletions(-) delete mode 100644 .gitmodules delete mode 160000 vendor/llama.cpp diff --git a/.gitignore b/.gitignore index 51f357200..eb166d99e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +data/ .python-version .vscode/ diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 7edf0975d..000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "vendor/llama.cpp"] - path = vendor/llama.cpp - url = https://github.com/ggerganov/llama.cpp.git diff --git a/CMakeLists.txt b/CMakeLists.txt index c633c0797..d06eff417 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,40 +5,16 @@ project(llama_cpp) option(LLAMA_BUILD "Build llama.cpp shared library and install alongside python package" ON) if (LLAMA_BUILD) - set(BUILD_SHARED_LIBS "On") - if (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR MATCHES "arm64") - # Need to disable these llama.cpp flags on Apple x86_64, - # otherwise users may encounter invalid instruction errors - set(LLAMA_AVX "Off" CACHE BOOL "llama: enable AVX" FORCE) - set(LLAMA_AVX2 "Off" CACHE BOOL "llama: enable AVX2" FORCE) - set(LLAMA_FMA "Off" CACHE BOOL "llama: enable FMA" FORCE) - set(LLAMA_F16C "Off" CACHE BOOL "llama: enable F16C" FORCE) + include(ExternalProject) + if(NOT DEFINED SKBUILD_PLATLIB_DIR) + set(SKBUILD_PLATLIB_DIR ${CMAKE_SOURCE_DIR}) endif() - add_subdirectory(vendor/llama.cpp) - install( - TARGETS llama - LIBRARY DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp - RUNTIME DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp - ARCHIVE DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp - FRAMEWORK DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp - RESOURCE DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp - ) - # Temporary fix for https://github.com/scikit-build/scikit-build-core/issues/374 - install( - TARGETS llama - LIBRARY DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp - RUNTIME DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp - ARCHIVE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp - FRAMEWORK DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp - RESOURCE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp - ) - # Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563 - install( - FILES $ - DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp - ) - install( - FILES $ - DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp - ) + ExternalProject_Add( + llama-cpp + GIT_REPOSITORY "https://github.com/ggerganov/llama.cpp" + GIT_TAG f5ef5cf + CMAKE_ARGS -DBUILD_SHARED_LIBS=ON "-DCMAKE_INSTALL_PREFIX=${SKBUILD_PLATLIB_DIR}/llama_cpp/data" + USES_TERMINAL_DOWNLOAD 1 + USES_TERMINAL_CONFIGURE 1 + USES_TERMINAL_BUILD 1) endif() diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index d2a35c13f..e646e3385 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -25,7 +25,7 @@ # Load the library def _load_shared_library(lib_base_name: str): # Construct the paths to the possible shared library names - _base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) + _base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) / "data" / "lib" # Searching for the library in the current directory under the name "libllama" (default name # for llamacpp) and "llama" (default name for this repo) _lib_paths: List[pathlib.Path] = [] diff --git a/vendor/llama.cpp b/vendor/llama.cpp deleted file mode 160000 index f5ef5cfb1..000000000 --- a/vendor/llama.cpp +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f5ef5cfb18148131fcf45bdd2331f0db5ab7c3d0