From 0877741b0350d200be7f1e6cca2780a25ee29cd0 Mon Sep 17 00:00:00 2001 From: Ashley Kleynhans Date: Sat, 20 Apr 2024 00:04:40 +0200 Subject: [PATCH 1/8] Bumped ExLlamaV2 to version 0.0.19 to resolve #5851 (#5880) --- requirements.txt | 10 +++++----- requirements_amd.txt | 6 +++--- requirements_amd_noavx2.txt | 6 +++--- requirements_apple_intel.txt | 2 +- requirements_apple_silicon.txt | 2 +- requirements_noavx2.txt | 10 +++++----- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/requirements.txt b/requirements.txt index c5ca17b5f4..1e6c476323 100644 --- a/requirements.txt +++ b/requirements.txt @@ -56,11 +56,11 @@ https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu1 https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64" https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" diff --git a/requirements_amd.txt b/requirements_amd.txt index 3bac470dc3..9bed95053f 100644 --- a/requirements_amd.txt +++ b/requirements_amd.txt @@ -42,9 +42,9 @@ https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/ro https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.61+rocm5.6.1-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64" https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.3/autoawq-0.2.3+rocm561-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" diff --git a/requirements_amd_noavx2.txt b/requirements_amd_noavx2.txt index 656d1e35e3..1b5f81bcd6 100644 --- a/requirements_amd_noavx2.txt +++ b/requirements_amd_noavx2.txt @@ -40,9 +40,9 @@ https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cp # AMD wheels https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64" https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.3/autoawq-0.2.3+rocm561-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" diff --git a/requirements_apple_intel.txt b/requirements_apple_intel.txt index fc46b82f36..9b95342e48 100644 --- a/requirements_apple_intel.txt +++ b/requirements_apple_intel.txt @@ -38,4 +38,4 @@ https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/me https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10" https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp311-cp311-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11" https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18-py3-none-any.whl +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl diff --git a/requirements_apple_silicon.txt b/requirements_apple_silicon.txt index d694f28836..b0e14509dc 100644 --- a/requirements_apple_silicon.txt +++ b/requirements_apple_silicon.txt @@ -40,4 +40,4 @@ https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/me https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.10" https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp311-cp311-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11" https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18-py3-none-any.whl +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl diff --git a/requirements_noavx2.txt b/requirements_noavx2.txt index 8f1671719e..fb76278f86 100644 --- a/requirements_noavx2.txt +++ b/requirements_noavx2.txt @@ -56,11 +56,11 @@ https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu1 https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.18/exllamav2-0.0.18-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" +https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64" https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" From 9b623b8a787de6857f4fb98f8ad4c6efe2962a9b Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 23 Apr 2024 23:17:05 -0300 Subject: [PATCH 2/8] Bump llama-cpp-python to 0.2.64, use official wheels (#5921) --- README.md | 14 +++--- docs/04 - Model Tab.md | 4 +- modules/llama_cpp_python_hijack.py | 18 +------- modules/llamacpp_hf.py | 31 ++----------- modules/llamacpp_model.py | 35 +++------------ modules/loaders.py | 4 -- modules/shared.py | 6 +-- one_click.py | 35 +++------------ requirements.txt | 22 +++------ requirements_amd.txt | 12 +++-- requirements_amd_noavx2.txt | 49 -------------------- requirements_apple_intel.txt | 12 ++--- requirements_apple_silicon.txt | 16 +++---- requirements_cpu_only.txt | 10 ++--- requirements_cpu_only_noavx2.txt | 38 ---------------- requirements_noavx2.txt | 72 ------------------------------ 16 files changed, 53 insertions(+), 325 deletions(-) delete mode 100644 requirements_amd_noavx2.txt delete mode 100644 requirements_cpu_only_noavx2.txt delete mode 100644 requirements_noavx2.txt diff --git a/README.md b/README.md index 9f3e81bdba..04c4cbb176 100644 --- a/README.md +++ b/README.md @@ -107,14 +107,11 @@ pip install -r Requirements file to use: -| GPU | CPU | requirements file to use | +| GPU | requirements file to use | |--------|---------|---------| -| NVIDIA | has AVX2 | `requirements.txt` | -| NVIDIA | no AVX2 | `requirements_noavx2.txt` | -| AMD | has AVX2 | `requirements_amd.txt` | -| AMD | no AVX2 | `requirements_amd_noavx2.txt` | -| CPU only | has AVX2 | `requirements_cpu_only.txt` | -| CPU only | no AVX2 | `requirements_cpu_only_noavx2.txt` | +| NVIDIA | `requirements.txt` | +| AMD | `requirements_amd.txt` | +| CPU only | `requirements_cpu_only.txt` | | Apple | Intel | `requirements_apple_intel.txt` | | Apple | Apple Silicon | `requirements_apple_silicon.txt` | @@ -132,7 +129,7 @@ Then browse to ##### AMD GPU on Windows -1) Use `requirements_cpu_only.txt` or `requirements_cpu_only_noavx2.txt` in the command above. +1) Use `requirements_cpu_only.txt` in the command above. 2) Manually install llama-cpp-python using the appropriate command for your hardware: [Installation from PyPI](https://github.com/abetlen/llama-cpp-python#installation-with-hardware-acceleration). * Use the `LLAMA_HIPBLAS=on` toggle. @@ -255,7 +252,6 @@ List of command-line flags | Flag | Description | |-------------|-------------| -| `--tensorcores` | Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only. | | `--n_ctx N_CTX` | Size of the prompt context. | | `--threads` | Number of threads to use. | | `--threads-batch THREADS_BATCH` | Number of threads to use for batches/prompt processing. | diff --git a/docs/04 - Model Tab.md b/docs/04 - Model Tab.md index 7c168e891b..cbc08843c7 100644 --- a/docs/04 - Model Tab.md +++ b/docs/04 - Model Tab.md @@ -21,7 +21,7 @@ Options: * **alpha_value**: Used to extend the context length of a model with a minor loss in quality. I have measured 1.75 to be optimal for 1.5x context, and 2.5 for 2x context. That is, with alpha = 2.5 you can make a model with 4096 context length go to 8192 context length. * **rope_freq_base**: Originally another way to write "alpha_value", it ended up becoming a necessary parameter for some models like CodeLlama, which was fine-tuned with this set to 1000000 and hence needs to be loaded with it set to 1000000 as well. * **compress_pos_emb**: The first and original context-length extension method, discovered by [kaiokendev](https://kaiokendev.github.io/til). When set to 2, the context length is doubled, 3 and it's tripled, etc. It should only be used for models that have been fine-tuned with this parameter set to different than 1. For models that have not been tuned to have greater context length, alpha_value will lead to a smaller accuracy loss. -* **cpu**: Loads the model in CPU mode using Pytorch. The model will be loaded in 32-bit precision, so a lot of RAM will be used. CPU inference with transformers is older than llama.cpp and it works, but it's a lot slower. Note: this parameter has a different interpretation in the llama.cpp loader (see below). +* **cpu**: Loads the model in CPU mode using Pytorch. The model will be loaded in 32-bit precision, so a lot of RAM will be used. CPU inference with transformers is older than llama.cpp and it works, but it's a lot slower. * **load-in-8bit**: Load the model in 8-bit precision using bitsandbytes. The 8-bit kernel in that library has been optimized for training and not inference, so load-in-8bit is slower than load-in-4bit (but more accurate). * **bf16**: Use bfloat16 precision instead of float16 (the default). Only applies when quantization is not used. * **auto-devices**: When checked, the backend will try to guess a reasonable value for "gpu-memory" to allow you to load a model with CPU offloading. I recommend just setting "gpu-memory" manually instead. This parameter is also needed for loading GPTQ models, in which case it needs to be checked before loading the model. @@ -84,9 +84,7 @@ Example: https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF * **n_batch**: Batch size for prompt processing. Higher values are supposed to make generation faster, but I have never obtained any benefit from changing this value. * **threads**: Number of threads. Recommended value: your number of physical cores. * **threads_batch**: Number of threads for batch processing. Recommended value: your total number of cores (physical + virtual). -* **tensorcores**: Use llama.cpp compiled with "tensor cores" support, which improves performance on NVIDIA RTX cards in most cases. * **streamingllm**: Experimental feature to avoid re-evaluating the entire prompt when part of it is removed, for instance, when you hit the context length for the model in chat mode and an old message is removed. -* **cpu**: Force a version of llama.cpp compiled without GPU acceleration to be used. Can usually be ignored. Only set this if you want to use CPU only and llama.cpp doesn't work otherwise. * **no_mul_mat_q**: Disable the mul_mat_q kernel. This kernel usually improves generation speed significantly. This option to disable it is included in case it doesn't work on some system. * **no-mmap**: Loads the model into memory at once, possibly preventing I/O operations later on at the cost of a longer load time. * **mlock**: Force the system to keep the model in RAM rather than swapping or compressing (no idea what this means, never used it). diff --git a/modules/llama_cpp_python_hijack.py b/modules/llama_cpp_python_hijack.py index eb23177f9c..573f79af5f 100644 --- a/modules/llama_cpp_python_hijack.py +++ b/modules/llama_cpp_python_hijack.py @@ -1,25 +1,11 @@ from typing import Sequence +import llama_cpp from tqdm import tqdm from modules import shared from modules.cache_utils import process_llamacpp_cache -try: - import llama_cpp -except: - llama_cpp = None - -try: - import llama_cpp_cuda -except: - llama_cpp_cuda = None - -try: - import llama_cpp_cuda_tensorcores -except: - llama_cpp_cuda_tensorcores = None - def eval_with_progress(self, tokens: Sequence[int]): """ @@ -81,7 +67,7 @@ def my_generate(self, *args, **kwargs): lib.Llama.generate = my_generate -for lib in [llama_cpp, llama_cpp_cuda, llama_cpp_cuda_tensorcores]: +for lib in [llama_cpp]: if lib is not None: lib.Llama.eval = eval_with_progress monkey_patch_generate(lib) diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index 1bfd667dbc..75765b6fae 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -2,6 +2,7 @@ from pathlib import Path from typing import Any, Dict, Optional, Union +import llama_cpp import torch from torch.nn import CrossEntropyLoss from transformers import GenerationConfig, PretrainedConfig, PreTrainedModel @@ -10,32 +11,6 @@ from modules import RoPE, llama_cpp_python_hijack, shared from modules.logging_colors import logger -try: - import llama_cpp -except: - llama_cpp = None - -try: - import llama_cpp_cuda -except: - llama_cpp_cuda = None - -try: - import llama_cpp_cuda_tensorcores -except: - llama_cpp_cuda_tensorcores = None - - -def llama_cpp_lib(): - if shared.args.cpu and llama_cpp is not None: - return llama_cpp - elif shared.args.tensorcores and llama_cpp_cuda_tensorcores is not None: - return llama_cpp_cuda_tensorcores - elif llama_cpp_cuda is not None: - return llama_cpp_cuda - else: - return llama_cpp - class LlamacppHF(PreTrainedModel): def __init__(self, model, path): @@ -57,7 +32,7 @@ def __init__(self, model, path): 'n_tokens': self.model.n_tokens, 'input_ids': self.model.input_ids.copy(), 'scores': self.model.scores.copy(), - 'ctx': llama_cpp_lib().llama_new_context_with_model(model.model, model.context_params) + 'ctx': llama_cpp.llama_new_context_with_model(model.model, model.context_params) } def _validate_model_class(self): @@ -220,7 +195,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P 'split_mode': 1 if not shared.args.row_split else 2 } - Llama = llama_cpp_lib().Llama + Llama = llama_cpp.Llama model = Llama(**params) return LlamacppHF(model, model_file) diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index 8bc9b7cb0b..e09447679b 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -1,6 +1,7 @@ import re from functools import partial +import llama_cpp import numpy as np import torch @@ -9,32 +10,6 @@ from modules.logging_colors import logger from modules.text_generation import get_max_prompt_length -try: - import llama_cpp -except: - llama_cpp = None - -try: - import llama_cpp_cuda -except: - llama_cpp_cuda = None - -try: - import llama_cpp_cuda_tensorcores -except: - llama_cpp_cuda_tensorcores = None - - -def llama_cpp_lib(): - if shared.args.cpu and llama_cpp is not None: - return llama_cpp - elif shared.args.tensorcores and llama_cpp_cuda_tensorcores is not None: - return llama_cpp_cuda_tensorcores - elif llama_cpp_cuda is not None: - return llama_cpp_cuda - else: - return llama_cpp - def ban_eos_logits_processor(eos_token, input_ids, logits): logits[eos_token] = -float('inf') @@ -60,8 +35,8 @@ def __del__(self): @classmethod def from_pretrained(self, path): - Llama = llama_cpp_lib().Llama - LlamaCache = llama_cpp_lib().LlamaCache + Llama = llama_cpp.Llama + LlamaCache = llama_cpp.LlamaCache result = self() cache_capacity = 0 @@ -126,12 +101,12 @@ def load_grammar(self, string): if string != self.grammar_string: self.grammar_string = string if string.strip() != '': - self.grammar = llama_cpp_lib().LlamaGrammar.from_string(string) + self.grammar = llama_cpp.LlamaGrammar.from_string(string) else: self.grammar = None def generate(self, prompt, state, callback=None): - LogitsProcessorList = llama_cpp_lib().LogitsProcessorList + LogitsProcessorList = llama_cpp.LogitsProcessorList prompt = prompt if type(prompt) is str else prompt.decode() # Handle truncation diff --git a/modules/loaders.py b/modules/loaders.py index 234773397d..623d27d184 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -41,11 +41,9 @@ 'alpha_value', 'rope_freq_base', 'compress_pos_emb', - 'cpu', 'numa', 'no_offload_kqv', 'row_split', - 'tensorcores', 'streaming_llm', 'attention_sink_size', ], @@ -62,7 +60,6 @@ 'alpha_value', 'rope_freq_base', 'compress_pos_emb', - 'cpu', 'numa', 'cfg_cache', 'trust_remote_code', @@ -70,7 +67,6 @@ 'logits_all', 'no_offload_kqv', 'row_split', - 'tensorcores', 'streaming_llm', 'attention_sink_size', 'llamacpp_HF_info', diff --git a/modules/shared.py b/modules/shared.py index 431e624963..0c0db85854 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -113,7 +113,6 @@ # llama.cpp group = parser.add_argument_group('llama.cpp') -group.add_argument('--tensorcores', action='store_true', help='Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only.') group.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.') group.add_argument('--threads', type=int, default=0, help='Number of threads to use.') group.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.') @@ -204,7 +203,8 @@ group.add_argument('--multimodal-pipeline', type=str, default=None, help='The multimodal pipeline to use. Examples: llava-7b, llava-13b.') # Deprecated parameters -# group = parser.add_argument_group('Deprecated') +group = parser.add_argument_group('Deprecated') +group.add_argument('--tensorcores', action='store_true', help='DEPRECATED') args = parser.parse_args() args_defaults = parser.parse_args([]) @@ -214,7 +214,7 @@ if hasattr(args, arg): provided_arguments.append(arg) -deprecated_args = [] +deprecated_args = ['tensorcores'] def do_cmd_flags_warnings(): diff --git a/one_click.py b/one_click.py index 0d543e303f..96f241070d 100644 --- a/one_click.py +++ b/one_click.py @@ -58,32 +58,6 @@ def is_x86_64(): return platform.machine() == "x86_64" -def cpu_has_avx2(): - try: - import cpuinfo - - info = cpuinfo.get_cpu_info() - if 'avx2' in info['flags']: - return True - else: - return False - except: - return True - - -def cpu_has_amx(): - try: - import cpuinfo - - info = cpuinfo.get_cpu_info() - if 'amx' in info['flags']: - return True - else: - return False - except: - return True - - def torch_version(): site_packages_path = None for sitedir in site.getsitepackages(): @@ -305,7 +279,7 @@ def install_webui(): # Install Git and then Pytorch print_big_message("Installing PyTorch.") - run_cmd(f"conda install -y -k ninja git && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True) + run_cmd(f"conda install -y -k ninja git && {install_pytorch}", assert_success=True, environment=True) if selected_gpu == "INTEL": # Install oneAPI dependencies via conda @@ -372,13 +346,13 @@ def update_requirements(initial_installation=False, pull=True): is_cpu = '+cpu' in torver # 2.0.1+cpu if is_rocm: - base_requirements = "requirements_amd" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt" + base_requirements = "requirements_amd.txt" elif is_cpu or is_intel: - base_requirements = "requirements_cpu_only" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt" + base_requirements = "requirements_cpu_only.txt" elif is_macos(): base_requirements = "requirements_apple_" + ("intel" if is_x86_64() else "silicon") + ".txt" else: - base_requirements = "requirements" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt" + base_requirements = "requirements.txt" requirements_file = base_requirements @@ -389,6 +363,7 @@ def update_requirements(initial_installation=False, pull=True): textgen_requirements = open(requirements_file).read().splitlines() if is_cuda118: textgen_requirements = [req.replace('+cu121', '+cu118').replace('+cu122', '+cu118') for req in textgen_requirements] + textgen_requirements = [req for req in textgen_requirements if '-cu121' not in req] if is_windows() and is_cuda118: # No flash-attention on Windows for CUDA 11 textgen_requirements = [req for req in textgen_requirements if 'oobabooga/flash-attention' not in req] diff --git a/requirements.txt b/requirements.txt index 1e6c476323..919132d5d1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,23 +33,11 @@ flask_cloudflared==0.0.14 sse-starlette==1.6.5 tiktoken -# llama-cpp-python (CPU only, AVX2) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" - -# llama-cpp-python (CUDA, no tensor cores) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.61+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.61+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.61+cu121-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.61+cu121-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" - -# llama-cpp-python (CUDA, tensor cores) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.61+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.61+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.61+cu121-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.61+cu121-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" +# llama-cpp-python (CUDA) +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-cu121/llama_cpp_python-0.2.64-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-cu121/llama_cpp_python-0.2.64-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-cu121/llama_cpp_python-0.2.64-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-cu121/llama_cpp_python-0.2.64-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" # CUDA wheels https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" diff --git a/requirements_amd.txt b/requirements_amd.txt index 9bed95053f..e9482a91c9 100644 --- a/requirements_amd.txt +++ b/requirements_amd.txt @@ -31,15 +31,13 @@ flask_cloudflared==0.0.14 sse-starlette==1.6.5 tiktoken -# llama-cpp-python (CPU only, AVX2) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" +# llama-cpp-python (CPU only) +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64/llama_cpp_python-0.2.64-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64/llama_cpp_python-0.2.64-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64/llama_cpp_python-0.2.64-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64/llama_cpp_python-0.2.64-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" # AMD wheels -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.61+rocm5.6.1-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.61+rocm5.6.1-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" diff --git a/requirements_amd_noavx2.txt b/requirements_amd_noavx2.txt deleted file mode 100644 index 1b5f81bcd6..0000000000 --- a/requirements_amd_noavx2.txt +++ /dev/null @@ -1,49 +0,0 @@ -accelerate==0.27.* -colorama -datasets -einops -gradio==4.26.* -hqq==0.1.5 -jinja2==3.1.2 -lm_eval==0.3.0 -markdown -numba==0.59.* -numpy==1.26.* -optimum==1.17.* -pandas -peft==0.8.* -Pillow>=9.5.0 -psutil -pyyaml -requests -rich -safetensors==0.4.* -scipy -sentencepiece -tensorboard -transformers==4.40.* -tqdm -wandb - -# API -SpeechRecognition==3.10.0 -flask_cloudflared==0.0.14 -sse-starlette==1.6.5 -tiktoken - -# llama-cpp-python (CPU only, no AVX2) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" - -# AMD wheels -https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64" -https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.3/autoawq-0.2.3+rocm561-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.3/autoawq-0.2.3+rocm561-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" diff --git a/requirements_apple_intel.txt b/requirements_apple_intel.txt index 9b95342e48..96531184a9 100644 --- a/requirements_apple_intel.txt +++ b/requirements_apple_intel.txt @@ -32,10 +32,10 @@ sse-starlette==1.6.5 tiktoken # Mac wheels -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp311-cp311-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp311-cp311-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp311-cp311-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp311-cp311-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp310-cp310-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp311-cp311-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp310-cp310-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp311-cp311-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp310-cp310-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10" https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl diff --git a/requirements_apple_silicon.txt b/requirements_apple_silicon.txt index b0e14509dc..c529b04875 100644 --- a/requirements_apple_silicon.txt +++ b/requirements_apple_silicon.txt @@ -32,12 +32,12 @@ sse-starlette==1.6.5 tiktoken # Mac wheels -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp311-cp311-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp311-cp311-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp311-cp311-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp311-cp311-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.61-cp310-cp310-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp311-cp311-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp310-cp310-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp311-cp311-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp310-cp310-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp311-cp311-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp310-cp310-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp311-cp311-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64-metal/llama_cpp_python-0.2.64-cp310-cp310-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10" https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl diff --git a/requirements_cpu_only.txt b/requirements_cpu_only.txt index 76a6e9c390..a70dfff1c0 100644 --- a/requirements_cpu_only.txt +++ b/requirements_cpu_only.txt @@ -31,8 +31,8 @@ flask_cloudflared==0.0.14 sse-starlette==1.6.5 tiktoken -# llama-cpp-python (CPU only, AVX2) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" +# llama-cpp-python (CPU only) +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64/llama_cpp_python-0.2.64-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64/llama_cpp_python-0.2.64-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64/llama_cpp_python-0.2.64-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" +https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.64/llama_cpp_python-0.2.64-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" diff --git a/requirements_cpu_only_noavx2.txt b/requirements_cpu_only_noavx2.txt deleted file mode 100644 index 4f72bb2d9d..0000000000 --- a/requirements_cpu_only_noavx2.txt +++ /dev/null @@ -1,38 +0,0 @@ -accelerate==0.27.* -colorama -datasets -einops -gradio==4.26.* -hqq==0.1.5 -jinja2==3.1.2 -lm_eval==0.3.0 -markdown -numba==0.59.* -numpy==1.26.* -optimum==1.17.* -pandas -peft==0.8.* -Pillow>=9.5.0 -psutil -pyyaml -requests -rich -safetensors==0.4.* -scipy -sentencepiece -tensorboard -transformers==4.40.* -tqdm -wandb - -# API -SpeechRecognition==3.10.0 -flask_cloudflared==0.0.14 -sse-starlette==1.6.5 -tiktoken - -# llama-cpp-python (CPU only, no AVX2) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" diff --git a/requirements_noavx2.txt b/requirements_noavx2.txt deleted file mode 100644 index fb76278f86..0000000000 --- a/requirements_noavx2.txt +++ /dev/null @@ -1,72 +0,0 @@ -accelerate==0.27.* -aqlm[gpu,cpu]==1.1.3; platform_system == "Linux" -bitsandbytes==0.43.* -colorama -datasets -einops -gradio==4.26.* -hqq==0.1.5 -jinja2==3.1.2 -lm_eval==0.3.0 -markdown -numba==0.59.* -numpy==1.26.* -optimum==1.17.* -pandas -peft==0.8.* -Pillow>=9.5.0 -psutil -pyyaml -requests -rich -safetensors==0.4.* -scipy -sentencepiece -tensorboard -transformers==4.40.* -tqdm -wandb - -# API -SpeechRecognition==3.10.0 -flask_cloudflared==0.0.14 -sse-starlette==1.6.5 -tiktoken - -# llama-cpp-python (CPU only, no AVX2) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.61+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" - -# llama-cpp-python (CUDA, no tensor cores) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.61+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.61+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.61+cu121avx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.61+cu121avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" - -# llama-cpp-python (CUDA, tensor cores) -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.61+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.61+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.61+cu121avx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.61+cu121avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" - -# CUDA wheels -https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/turboderp/exllamav2/releases/download/v0.0.19/exllamav2-0.0.19-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64" -https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11" -https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10" -https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11" -https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10" -autoawq==0.2.3; platform_system == "Linux" or platform_system == "Windows" From c725d973689ae7b7b4c4484664d4eb64c64ab896 Mon Sep 17 00:00:00 2001 From: Jari Van Melckebeke Date: Wed, 24 Apr 2024 04:17:55 +0200 Subject: [PATCH 3/8] nvidia docker: make sure gradio listens on 0.0.0.0 (#5918) --- docker/nvidia/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/nvidia/Dockerfile b/docker/nvidia/Dockerfile index ca17c17b63..66a717a7a4 100644 --- a/docker/nvidia/Dockerfile +++ b/docker/nvidia/Dockerfile @@ -18,4 +18,4 @@ COPY CMD_FLAGS.txt /home/app/text-generation-webui/ EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005} WORKDIR /home/app/text-generation-webui # set umask to ensure group read / write at runtime -CMD umask 0002 && export HOME=/home/app/text-generation-webui && ./start_linux.sh +CMD umask 0002 && export HOME=/home/app/text-generation-webui && ./start_linux.sh --listen From f3c9103e04f2fca128b7ec5b78113dcfa9429807 Mon Sep 17 00:00:00 2001 From: Colin Date: Wed, 24 Apr 2024 00:09:14 -0400 Subject: [PATCH 4/8] Revert walrus operator for params['max_memory'] (#5878) --- modules/models.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/models.py b/modules/models.py index 20a657641c..cf4e46349b 100644 --- a/modules/models.py +++ b/modules/models.py @@ -179,7 +179,7 @@ def huggingface_loader(model_name): # DeepSpeed ZeRO-3 elif shared.args.deepspeed: - model = LoaderClass.from_pretrained(path_to_model, torch_dtype=params['torch_dtype'], trust_remote_code=params['trust_remote_code']) + model = LoaderClass.from_pretrained(path_to_model, torch_dtype=params['torch_dtype'], trust_remote_code=params.get('trust_remote_code')) model = deepspeed.initialize(model=model, config_params=ds_config, model_parameters=None, optimizer=None, lr_scheduler=None)[0] model.module.eval() # Inference logger.info(f'DeepSpeed ZeRO-3 is enabled: {is_deepspeed_zero3_enabled()}') @@ -194,7 +194,7 @@ def huggingface_loader(model_name): params['torch_dtype'] = torch.float32 else: params['device_map'] = 'auto' - if x := get_max_memory_dict(): + if x:= get_max_memory_dict(): params['max_memory'] = x if shared.args.load_in_4bit: @@ -215,15 +215,15 @@ def huggingface_loader(model_name): else: params['quantization_config'] = BitsAndBytesConfig(load_in_8bit=True) - if params['max_memory'] is not None: + if params.get('max_memory') is not None: with init_empty_weights(): - model = LoaderClass.from_config(config, trust_remote_code=params['trust_remote_code']) + model = LoaderClass.from_config(config, trust_remote_code=params.get('trust_remote_code')) model.tie_weights() params['device_map'] = infer_auto_device_map( model, dtype=torch.int8, - max_memory=params['max_memory'], + max_memory=params.get('max_memory'), no_split_module_classes=model._no_split_modules ) From f0538efb99a4e84f8d26c21da8a10290d987e953 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 24 Apr 2024 00:31:28 -0700 Subject: [PATCH 5/8] Remove obsolete --tensorcores references --- modules/ui.py | 1 - modules/ui_model_menu.py | 1 - 2 files changed, 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index a84378b18e..a7c3b0f454 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -97,7 +97,6 @@ def list_model_elements(): 'logits_all', 'no_offload_kqv', 'row_split', - 'tensorcores', 'streaming_llm', 'attention_sink_size', 'hqq_backend', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 7f7a3ab8e0..5c4b2f89d4 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -119,7 +119,6 @@ def create_ui(): shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant) shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.') shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices) - shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards.') shared.gradio['streaming_llm'] = gr.Checkbox(label="streaming_llm", value=shared.args.streaming_llm, info='(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.') shared.gradio['attention_sink_size'] = gr.Number(label="attention_sink_size", value=shared.args.attention_sink_size, precision=0, info='StreamingLLM: number of sink tokens. Only used if the trimmed prompt doesn\'t share a prefix with the old prompt.') shared.gradio['cpu'] = gr.Checkbox(label="cpu", value=shared.args.cpu, info='llama.cpp: Use llama-cpp-python compiled without GPU acceleration. Transformers: use PyTorch in CPU mode.') From 64e2a9a0a7d723dbc4e852d3c3b996e81349a040 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 24 Apr 2024 01:34:11 -0700 Subject: [PATCH 6/8] Fix the Phi-3 template when used in the UI --- modules/models_settings.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/models_settings.py b/modules/models_settings.py index 5c29243146..a7fed427b9 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -71,6 +71,7 @@ def get_model_metadata(model): template = template.replace('bos_token', "'{}'".format(bos_token)) template = re.sub(r'raise_exception\([^)]*\)', "''", template) + template = re.sub(r'{% if add_generation_prompt %}.*', '', template, flags=re.DOTALL) model_settings['instruction_template'] = 'Custom (obtained from model metadata)' model_settings['instruction_template_str'] = template @@ -130,6 +131,7 @@ def get_model_metadata(model): template = template.replace(k, "'{}'".format(value)) template = re.sub(r'raise_exception\([^)]*\)', "''", template) + template = re.sub(r'{% if add_generation_prompt %}.*', '', template, flags=re.DOTALL) model_settings['instruction_template'] = 'Custom (obtained from model metadata)' model_settings['instruction_template_str'] = template From 4094813f8ddebb3f591f7294d389376a33c72d61 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 24 Apr 2024 09:53:41 -0700 Subject: [PATCH 7/8] Lint --- README.md | 4 ++-- docker/amd/docker-compose.yml | 15 ++++++--------- docker/cpu/docker-compose.yml | 15 ++++++--------- docker/intel/docker-compose.yml | 19 ++++++++----------- docker/nvidia/docker-compose.yml | 19 ++++++++----------- modules/models.py | 2 +- 6 files changed, 31 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index 04c4cbb176..330ae8ce77 100644 --- a/README.md +++ b/README.md @@ -112,8 +112,8 @@ Requirements file to use: | NVIDIA | `requirements.txt` | | AMD | `requirements_amd.txt` | | CPU only | `requirements_cpu_only.txt` | -| Apple | Intel | `requirements_apple_intel.txt` | -| Apple | Apple Silicon | `requirements_apple_silicon.txt` | +| Apple Intel | `requirements_apple_intel.txt` | +| Apple Silicon | `requirements_apple_silicon.txt` | ### Start the web UI diff --git a/docker/amd/docker-compose.yml b/docker/amd/docker-compose.yml index 8f0ff3a3a1..58c9911f92 100644 --- a/docker/amd/docker-compose.yml +++ b/docker/amd/docker-compose.yml @@ -5,16 +5,13 @@ services: context: . args: # Requirements file to use: - # | GPU | CPU | requirements file to use | + # | GPU | requirements file to use | # |--------|---------|---------| - # | NVIDIA | has AVX2 | `requirements.txt` | - # | NVIDIA | no AVX2 | `requirements_noavx2.txt` | - # | AMD | has AVX2 | `requirements_amd.txt` | - # | AMD | no AVX2 | `requirements_amd_noavx2.txt` | - # | CPU only | has AVX2 | `requirements_cpu_only.txt` | - # | CPU only | no AVX2 | `requirements_cpu_only_noavx2.txt` | - # | Apple | Intel | `requirements_apple_intel.txt` | - # | Apple | Apple Silicon | `requirements_apple_silicon.txt` | + # | NVIDIA | `requirements.txt` | + # | AMD | `requirements_amd.txt` | + # | CPU only | `requirements_cpu_only.txt` | + # | Apple Intel | `requirements_apple_intel.txt` | + # | Apple Silicon | `requirements_apple_silicon.txt` | # Default: requirements.txt` # BUILD_REQUIREMENTS: requirements.txt diff --git a/docker/cpu/docker-compose.yml b/docker/cpu/docker-compose.yml index 0020e9e665..4432d129f7 100644 --- a/docker/cpu/docker-compose.yml +++ b/docker/cpu/docker-compose.yml @@ -5,16 +5,13 @@ services: context: . args: # Requirements file to use: - # | GPU | CPU | requirements file to use | + # | GPU | requirements file to use | # |--------|---------|---------| - # | NVIDIA | has AVX2 | `requirements.txt` | - # | NVIDIA | no AVX2 | `requirements_noavx2.txt` | - # | AMD | has AVX2 | `requirements_amd.txt` | - # | AMD | no AVX2 | `requirements_amd_noavx2.txt` | - # | CPU only | has AVX2 | `requirements_cpu_only.txt` | - # | CPU only | no AVX2 | `requirements_cpu_only_noavx2.txt` | - # | Apple | Intel | `requirements_apple_intel.txt` | - # | Apple | Apple Silicon | `requirements_apple_silicon.txt` | + # | NVIDIA | `requirements.txt` | + # | AMD | `requirements_amd.txt` | + # | CPU only | `requirements_cpu_only.txt` | + # | Apple Intel | `requirements_apple_intel.txt` | + # | Apple Silicon | `requirements_apple_silicon.txt` | # Default: requirements.txt` # BUILD_REQUIREMENTS: requirements.txt diff --git a/docker/intel/docker-compose.yml b/docker/intel/docker-compose.yml index 5656e880be..d737626b08 100644 --- a/docker/intel/docker-compose.yml +++ b/docker/intel/docker-compose.yml @@ -5,22 +5,19 @@ services: context: . args: # Requirements file to use: - # | GPU | CPU | requirements file to use | + # | GPU | requirements file to use | # |--------|---------|---------| - # | NVIDIA | has AVX2 | `requirements.txt` | - # | NVIDIA | no AVX2 | `requirements_noavx2.txt` | - # | AMD | has AVX2 | `requirements_amd.txt` | - # | AMD | no AVX2 | `requirements_amd_noavx2.txt` | - # | CPU only | has AVX2 | `requirements_cpu_only.txt` | - # | CPU only | no AVX2 | `requirements_cpu_only_noavx2.txt` | - # | Apple | Intel | `requirements_apple_intel.txt` | - # | Apple | Apple Silicon | `requirements_apple_silicon.txt` | + # | NVIDIA | `requirements.txt` | + # | AMD | `requirements_amd.txt` | + # | CPU only | `requirements_cpu_only.txt` | + # | Apple Intel | `requirements_apple_intel.txt` | + # | Apple Silicon | `requirements_apple_silicon.txt` | # Default: requirements.txt` # BUILD_REQUIREMENTS: requirements.txt - + # Extension requirements to build: # BUILD_EXTENSIONS: - + # specify which cuda version your card supports: https://developer.nvidia.com/cuda-gpus TORCH_CUDA_ARCH_LIST: ${TORCH_CUDA_ARCH_LIST:-7.5} BUILD_EXTENSIONS: ${BUILD_EXTENSIONS:-} diff --git a/docker/nvidia/docker-compose.yml b/docker/nvidia/docker-compose.yml index f16c7e70ba..6d18b0728b 100644 --- a/docker/nvidia/docker-compose.yml +++ b/docker/nvidia/docker-compose.yml @@ -5,22 +5,19 @@ services: context: . args: # Requirements file to use: - # | GPU | CPU | requirements file to use | + # | GPU | requirements file to use | # |--------|---------|---------| - # | NVIDIA | has AVX2 | `requirements.txt` | - # | NVIDIA | no AVX2 | `requirements_noavx2.txt` | - # | AMD | has AVX2 | `requirements_amd.txt` | - # | AMD | no AVX2 | `requirements_amd_noavx2.txt` | - # | CPU only | has AVX2 | `requirements_cpu_only.txt` | - # | CPU only | no AVX2 | `requirements_cpu_only_noavx2.txt` | - # | Apple | Intel | `requirements_apple_intel.txt` | - # | Apple | Apple Silicon | `requirements_apple_silicon.txt` | + # | NVIDIA | `requirements.txt` | + # | AMD | `requirements_amd.txt` | + # | CPU only | `requirements_cpu_only.txt` | + # | Apple Intel | `requirements_apple_intel.txt` | + # | Apple Silicon | `requirements_apple_silicon.txt` | # Default: requirements.txt` # BUILD_REQUIREMENTS: requirements.txt - + # Extension requirements to build: # BUILD_EXTENSIONS: - + # specify which cuda version your card supports: https://developer.nvidia.com/cuda-gpus TORCH_CUDA_ARCH_LIST: ${TORCH_CUDA_ARCH_LIST:-7.5} BUILD_EXTENSIONS: ${BUILD_EXTENSIONS:-} diff --git a/modules/models.py b/modules/models.py index cf4e46349b..64cbffe10d 100644 --- a/modules/models.py +++ b/modules/models.py @@ -194,7 +194,7 @@ def huggingface_loader(model_name): params['torch_dtype'] = torch.float32 else: params['device_map'] = 'auto' - if x:= get_max_memory_dict(): + if x := get_max_memory_dict(): params['max_memory'] = x if shared.args.load_in_4bit: From c9b0df16ee5a17088e6b0000c272cf726447b8ea Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 24 Apr 2024 09:55:00 -0700 Subject: [PATCH 8/8] Lint --- README.md | 2 +- docker/amd/docker-compose.yml | 2 +- docker/cpu/docker-compose.yml | 2 +- docker/intel/docker-compose.yml | 2 +- docker/nvidia/docker-compose.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 330ae8ce77..46cca63ece 100644 --- a/README.md +++ b/README.md @@ -108,7 +108,7 @@ pip install -r Requirements file to use: | GPU | requirements file to use | -|--------|---------|---------| +|--------|---------| | NVIDIA | `requirements.txt` | | AMD | `requirements_amd.txt` | | CPU only | `requirements_cpu_only.txt` | diff --git a/docker/amd/docker-compose.yml b/docker/amd/docker-compose.yml index 58c9911f92..4709ae941c 100644 --- a/docker/amd/docker-compose.yml +++ b/docker/amd/docker-compose.yml @@ -6,7 +6,7 @@ services: args: # Requirements file to use: # | GPU | requirements file to use | - # |--------|---------|---------| + # |--------|---------| # | NVIDIA | `requirements.txt` | # | AMD | `requirements_amd.txt` | # | CPU only | `requirements_cpu_only.txt` | diff --git a/docker/cpu/docker-compose.yml b/docker/cpu/docker-compose.yml index 4432d129f7..c9d415ae22 100644 --- a/docker/cpu/docker-compose.yml +++ b/docker/cpu/docker-compose.yml @@ -6,7 +6,7 @@ services: args: # Requirements file to use: # | GPU | requirements file to use | - # |--------|---------|---------| + # |--------|---------| # | NVIDIA | `requirements.txt` | # | AMD | `requirements_amd.txt` | # | CPU only | `requirements_cpu_only.txt` | diff --git a/docker/intel/docker-compose.yml b/docker/intel/docker-compose.yml index d737626b08..31e9dde015 100644 --- a/docker/intel/docker-compose.yml +++ b/docker/intel/docker-compose.yml @@ -6,7 +6,7 @@ services: args: # Requirements file to use: # | GPU | requirements file to use | - # |--------|---------|---------| + # |--------|---------| # | NVIDIA | `requirements.txt` | # | AMD | `requirements_amd.txt` | # | CPU only | `requirements_cpu_only.txt` | diff --git a/docker/nvidia/docker-compose.yml b/docker/nvidia/docker-compose.yml index 6d18b0728b..835dd8384b 100644 --- a/docker/nvidia/docker-compose.yml +++ b/docker/nvidia/docker-compose.yml @@ -6,7 +6,7 @@ services: args: # Requirements file to use: # | GPU | requirements file to use | - # |--------|---------|---------| + # |--------|---------| # | NVIDIA | `requirements.txt` | # | AMD | `requirements_amd.txt` | # | CPU only | `requirements_cpu_only.txt` |