From 071f0776ad6e7d8dab08e0d98d089c808807ab45 Mon Sep 17 00:00:00 2001 From: AlphaAtlas <46462706+AlphaAtlas@users.noreply.github.com> Date: Sun, 14 May 2023 21:58:11 -0400 Subject: [PATCH] Add llama.cpp GPU offload option (#2060) --- README.md | 1 + docs/llama.cpp-models.md | 23 +++++++++++++++++++---- modules/llamacpp_model.py | 3 ++- modules/shared.py | 1 + 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 6724f196..6027333d 100644 --- a/README.md +++ b/README.md @@ -230,6 +230,7 @@ Optionally, you can use the following command-line flags: | `--n_batch` | Maximum number of prompt tokens to batch together when calling llama_eval. | | `--no-mmap` | Prevent mmap from being used. | | `--mlock` | Force the system to keep the model in RAM. | +| `--n-gpu-layers N_GPU_LAYERS` | Number of layers to offload to the GPU. Only works if llama-cpp-python was compiled with BLAS. Set this to 1000000000 to offload all layers to the GPU. | #### GPTQ diff --git a/docs/llama.cpp-models.md b/docs/llama.cpp-models.md index 4c727e26..4ed00dca 100644 --- a/docs/llama.cpp-models.md +++ b/docs/llama.cpp-models.md @@ -1,16 +1,31 @@ -## Using llama.cpp in the web UI +# Using llama.cpp in the web UI -#### Pre-converted models +## Setting up the models + +#### Pre-converted Place the model in the `models` folder, making sure that its name contains `ggml` somewhere and ends in `.bin`. #### Convert LLaMA yourself -Follow the instructions in the llama.cpp README to generate the `ggml-model-q4_0.bin` file: https://github.com/ggerganov/llama.cpp#usage +Follow the instructions in the llama.cpp README to generate the `ggml-model.bin` file: https://github.com/ggerganov/llama.cpp#usage + +## GPU offloading + +Enabled with the `--n-gpu-layers` parameter. If you have enough VRAM, use a high number like `--n-gpu-layers 200000` to offload all layers to the GPU. + +Note that you need to manually install `llama-cpp-python` with GPU support. To do that: + +``` +pip uninstall -y llama-cpp-python +CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir +``` + +Here you can find the different compilation options for OpenBLAS / cuBLAS / CLBlast: https://pypi.org/project/llama-cpp-python/ ## Performance -This was the performance of llama-7b int4 on my i5-12400F: +This was the performance of llama-7b int4 on my i5-12400F (cpu only): > Output generated in 33.07 seconds (6.05 tokens/s, 200 tokens, context 17) diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index d19eea27..fa8c3045 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -27,7 +27,8 @@ class LlamaCppModel: 'n_threads': shared.args.threads or None, 'n_batch': shared.args.n_batch, 'use_mmap': not shared.args.no_mmap, - 'use_mlock': shared.args.mlock + 'use_mlock': shared.args.mlock, + 'n_gpu_layers': shared.args.n_gpu_layers } self.model = Llama(**params) self.model.set_cache(LlamaCache) diff --git a/modules/shared.py b/modules/shared.py index 3d94932d..cb15ba86 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -123,6 +123,7 @@ parser.add_argument('--threads', type=int, default=0, help='Number of threads to parser.add_argument('--n_batch', type=int, default=512, help='Maximum number of prompt tokens to batch together when calling llama_eval.') parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.') parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.') +parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.') # GPTQ parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')