mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Replace ggml occurences with gguf
This commit is contained in:
parent
1a642c12b5
commit
83640d6f43
@ -156,7 +156,7 @@ text-generation-webui
|
|||||||
|
|
||||||
In the "Model" tab of the UI, those models can be automatically downloaded from Hugging Face. You can also download them via the command-line with `python download-model.py organization/model`.
|
In the "Model" tab of the UI, those models can be automatically downloaded from Hugging Face. You can also download them via the command-line with `python download-model.py organization/model`.
|
||||||
|
|
||||||
* GGML models are a single file and should be placed directly into `models`. Example:
|
* GGUF models are a single file and should be placed directly into `models`. Example:
|
||||||
|
|
||||||
```
|
```
|
||||||
text-generation-webui
|
text-generation-webui
|
||||||
@ -258,7 +258,7 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--quant_type QUANT_TYPE` | quant_type for 4-bit. Valid options: nf4, fp4. |
|
| `--quant_type QUANT_TYPE` | quant_type for 4-bit. Valid options: nf4, fp4. |
|
||||||
| `--use_double_quant` | use_double_quant for 4-bit. |
|
| `--use_double_quant` | use_double_quant for 4-bit. |
|
||||||
|
|
||||||
#### GGML (for llama.cpp and ctransformers)
|
#### GGUF (for llama.cpp and ctransformers)
|
||||||
|
|
||||||
| Flag | Description |
|
| Flag | Description |
|
||||||
|-------------|-------------|
|
|-------------|-------------|
|
||||||
|
@ -57,7 +57,7 @@ class ModelDownloader:
|
|||||||
classifications = []
|
classifications = []
|
||||||
has_pytorch = False
|
has_pytorch = False
|
||||||
has_pt = False
|
has_pt = False
|
||||||
# has_ggml = False
|
# has_gguf = False
|
||||||
has_safetensors = False
|
has_safetensors = False
|
||||||
is_lora = False
|
is_lora = False
|
||||||
while True:
|
while True:
|
||||||
@ -78,10 +78,10 @@ class ModelDownloader:
|
|||||||
is_pytorch = re.match(r"(pytorch|adapter|gptq)_model.*\.bin", fname)
|
is_pytorch = re.match(r"(pytorch|adapter|gptq)_model.*\.bin", fname)
|
||||||
is_safetensors = re.match(r".*\.safetensors", fname)
|
is_safetensors = re.match(r".*\.safetensors", fname)
|
||||||
is_pt = re.match(r".*\.pt", fname)
|
is_pt = re.match(r".*\.pt", fname)
|
||||||
is_ggml = re.match(r".*ggml.*\.bin", fname)
|
is_gguf = re.match(r'.*\.gguf', fname)
|
||||||
is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname)
|
is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname)
|
||||||
is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer
|
is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer
|
||||||
if any((is_pytorch, is_safetensors, is_pt, is_ggml, is_tokenizer, is_text)):
|
if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_tokenizer, is_text)):
|
||||||
if 'lfs' in dict[i]:
|
if 'lfs' in dict[i]:
|
||||||
sha256.append([fname, dict[i]['lfs']['oid']])
|
sha256.append([fname, dict[i]['lfs']['oid']])
|
||||||
|
|
||||||
@ -101,9 +101,9 @@ class ModelDownloader:
|
|||||||
elif is_pt:
|
elif is_pt:
|
||||||
has_pt = True
|
has_pt = True
|
||||||
classifications.append('pt')
|
classifications.append('pt')
|
||||||
elif is_ggml:
|
elif is_gguf:
|
||||||
# has_ggml = True
|
# has_gguf = True
|
||||||
classifications.append('ggml')
|
classifications.append('gguf')
|
||||||
|
|
||||||
cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50'
|
cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50'
|
||||||
cursor = base64.b64encode(cursor)
|
cursor = base64.b64encode(cursor)
|
||||||
|
@ -165,7 +165,7 @@ class LlamacppHF(PreTrainedModel):
|
|||||||
if path.is_file():
|
if path.is_file():
|
||||||
model_file = path
|
model_file = path
|
||||||
else:
|
else:
|
||||||
model_file = list(path.glob('*ggml*.bin'))[0]
|
model_file = list(path.glob('*.gguf*'))[0]
|
||||||
|
|
||||||
logger.info(f"llama.cpp weights detected: {model_file}\n")
|
logger.info(f"llama.cpp weights detected: {model_file}\n")
|
||||||
|
|
||||||
|
@ -241,7 +241,7 @@ def llamacpp_loader(model_name):
|
|||||||
if path.is_file():
|
if path.is_file():
|
||||||
model_file = path
|
model_file = path
|
||||||
else:
|
else:
|
||||||
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin'))[0]
|
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*'))[0]
|
||||||
|
|
||||||
logger.info(f"llama.cpp weights detected: {model_file}")
|
logger.info(f"llama.cpp weights detected: {model_file}")
|
||||||
model, tokenizer = LlamaCppModel.from_pretrained(model_file)
|
model, tokenizer = LlamaCppModel.from_pretrained(model_file)
|
||||||
|
@ -24,9 +24,9 @@ def infer_loader(model_name):
|
|||||||
loader = None
|
loader = None
|
||||||
elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0):
|
elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0):
|
||||||
loader = 'AutoGPTQ'
|
loader = 'AutoGPTQ'
|
||||||
elif len(list(path_to_model.glob('*ggml*.bin'))) > 0:
|
elif len(list(path_to_model.glob('*.gguf*'))) > 0:
|
||||||
loader = 'llama.cpp'
|
loader = 'llama.cpp'
|
||||||
elif re.match(r'.*ggml.*\.bin', model_name.lower()):
|
elif re.match(r'.*\.gguf', model_name.lower()):
|
||||||
loader = 'llama.cpp'
|
loader = 'llama.cpp'
|
||||||
elif re.match(r'.*rwkv.*\.pth', model_name.lower()):
|
elif re.match(r'.*rwkv.*\.pth', model_name.lower()):
|
||||||
loader = 'RWKV'
|
loader = 'RWKV'
|
||||||
|
Loading…
Reference in New Issue
Block a user