From 4a999e3bcd6f44e78eda3d448f4e36884aaf8966 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Sat, 26 Aug 2023 09:15:11 -0500 Subject: [PATCH] Use separate llama-cpp-python packages for GGML support --- download-model.py | 15 +++++++++++++-- modules/llamacpp_hf.py | 27 +++++++++++++++++++++------ modules/llamacpp_model.py | 29 +++++++++++++++++++++++------ modules/models.py | 2 +- modules/models_settings.py | 4 ++-- modules/utils.py | 9 +++++++++ requirements.txt | 5 +++++ 7 files changed, 74 insertions(+), 17 deletions(-) diff --git a/download-model.py b/download-model.py index be8d59fe..3bb4a39b 100644 --- a/download-model.py +++ b/download-model.py @@ -57,7 +57,8 @@ class ModelDownloader: classifications = [] has_pytorch = False has_pt = False - # has_gguf = False + has_gguf = False + has_ggml = False has_safetensors = False is_lora = False while True: @@ -79,6 +80,7 @@ class ModelDownloader: is_safetensors = re.match(r".*\.safetensors", fname) is_pt = re.match(r".*\.pt", fname) is_gguf = re.match(r'.*\.gguf', fname) + is_ggml = re.match(r".*ggml.*\.bin", fname) is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname) is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_tokenizer, is_text)): @@ -102,8 +104,11 @@ class ModelDownloader: has_pt = True classifications.append('pt') elif is_gguf: - # has_gguf = True + has_gguf = True classifications.append('gguf') + elif is_ggml: + has_ggml = True + classifications.append('ggml') cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50' cursor = base64.b64encode(cursor) @@ -115,6 +120,12 @@ class ModelDownloader: if classifications[i] in ['pytorch', 'pt']: links.pop(i) + # If both GGML and GGUF are available, download GGUF only + if has_ggml and has_gguf: + for i in range(len(classifications) - 1, -1, -1): + if classifications[i] == 'ggml': + links.pop(i) + return links, sha256, is_lora def get_output_folder(self, model, branch, is_lora, base_folder=None): diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index 0608cb01..bcb537fa 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -9,23 +9,38 @@ from transformers.modeling_outputs import CausalLMOutputWithPast from modules import RoPE, shared from modules.logging_colors import logger +from modules.utils import is_gguf import llama_cpp +try: + import llama_cpp_ggml +except: + llama_cpp_ggml = llama_cpp + if torch.cuda.is_available() and not torch.version.hip: try: import llama_cpp_cuda except: llama_cpp_cuda = None + try: + import llama_cpp_ggml_cuda + except: + llama_cpp_ggml_cuda = llama_cpp_cuda else: llama_cpp_cuda = None + llama_cpp_ggml_cuda = None -def llama_cpp_lib(): - if shared.args.cpu or llama_cpp_cuda is None: - return llama_cpp +def llama_cpp_lib(model_file: Union[str, Path] = None): + if model_file is not None: + gguf_model = is_gguf(model_file) else: - return llama_cpp_cuda + gguf_model = True + if shared.args.cpu or llama_cpp_cuda is None: + return llama_cpp if gguf_model else llama_cpp_ggml + else: + return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda class LlamacppHF(PreTrainedModel): @@ -165,7 +180,7 @@ class LlamacppHF(PreTrainedModel): if path.is_file(): model_file = path else: - model_file = list(path.glob('*.gguf*'))[0] + model_file = (list(path.glob('*.gguf*')) + list(path.glob('*ggml*.bin')))[0] logger.info(f"llama.cpp weights detected: {model_file}\n") @@ -193,7 +208,7 @@ class LlamacppHF(PreTrainedModel): 'logits_all': True, } - Llama = llama_cpp_lib().Llama + Llama = llama_cpp_lib(model_file).Llama model = Llama(**params) return LlamacppHF(model) diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index d2893b0d..c3c41541 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -1,5 +1,7 @@ import re from functools import partial +from pathlib import Path +from typing import Union import torch @@ -7,23 +9,38 @@ from modules import RoPE, shared from modules.callbacks import Iteratorize from modules.logging_colors import logger from modules.text_generation import get_max_prompt_length +from modules.utils import is_gguf import llama_cpp +try: + import llama_cpp_ggml +except: + llama_cpp_ggml = llama_cpp + if torch.cuda.is_available() and not torch.version.hip: try: import llama_cpp_cuda except: llama_cpp_cuda = None + try: + import llama_cpp_ggml_cuda + except: + llama_cpp_ggml_cuda = llama_cpp_cuda else: llama_cpp_cuda = None + llama_cpp_ggml_cuda = None -def llama_cpp_lib(): - if shared.args.cpu or llama_cpp_cuda is None: - return llama_cpp +def llama_cpp_lib(model_file: Union[str, Path] = None): + if model_file is not None: + gguf_model = is_gguf(model_file) else: - return llama_cpp_cuda + gguf_model = True + if shared.args.cpu or llama_cpp_cuda is None: + return llama_cpp if gguf_model else llama_cpp_ggml + else: + return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda def ban_eos_logits_processor(eos_token, input_ids, logits): @@ -41,8 +58,8 @@ class LlamaCppModel: @classmethod def from_pretrained(self, path): - Llama = llama_cpp_lib().Llama - LlamaCache = llama_cpp_lib().LlamaCache + Llama = llama_cpp_lib(str(path)).Llama + LlamaCache = llama_cpp_lib(str(path)).LlamaCache result = self() cache_capacity = 0 diff --git a/modules/models.py b/modules/models.py index 5268a2fc..3025fe3d 100644 --- a/modules/models.py +++ b/modules/models.py @@ -241,7 +241,7 @@ def llamacpp_loader(model_name): if path.is_file(): model_file = path else: - model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*'))[0] + model_file = (list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*')) + list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin')))[0] logger.info(f"llama.cpp weights detected: {model_file}") model, tokenizer = LlamaCppModel.from_pretrained(model_file) diff --git a/modules/models_settings.py b/modules/models_settings.py index 2ed658b8..c55b1e88 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -24,9 +24,9 @@ def infer_loader(model_name): loader = None elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0): loader = 'AutoGPTQ' - elif len(list(path_to_model.glob('*.gguf*'))) > 0: + elif len(list(path_to_model.glob('*.gguf*')) + list(path_to_model.glob('*ggml*.bin'))) > 0: loader = 'llama.cpp' - elif re.match(r'.*\.gguf', model_name.lower()): + elif re.match(r'.*\.gguf|.*ggml.*\.bin', model_name.lower()): loader = 'llama.cpp' elif re.match(r'.*rwkv.*\.pth', model_name.lower()): loader = 'RWKV' diff --git a/modules/utils.py b/modules/utils.py index 0a7edffa..3862817d 100644 --- a/modules/utils.py +++ b/modules/utils.py @@ -2,6 +2,7 @@ import os import re from datetime import datetime from pathlib import Path +from typing import Union from modules import shared from modules.logging_colors import logger @@ -124,3 +125,11 @@ def get_datasets(path: str, ext: str): def get_available_chat_styles(): return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys) + +# Determines if a llama.cpp model is in GGUF format +# Copied from ctransformers utils.py +def is_gguf(path: Union[str, Path]) -> bool: + path = str(Path(path).resolve()) + with open(path, "rb") as f: + magic = f.read(4) + return magic == "GGUF".encode() diff --git a/requirements.txt b/requirements.txt index a28d87ee..0c6aeb1b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -35,6 +35,11 @@ https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.79/llama_cpp_ # llama-cpp-python with CUDA support https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows" https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" +# llama-cpp-python with GGML support +https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" +https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" +https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_ggml_cuda-0.1.78+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows" +https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_ggml_cuda-0.1.78+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" # GPTQ-for-LLaMa https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"