mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Disable logits_all in llamacpp_HF (makes processing 3x faster)
This commit is contained in:
parent
5c3eb22ce6
commit
af3d25a503
@ -204,7 +204,7 @@ class LlamacppHF(PreTrainedModel):
|
|||||||
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
||||||
'tensor_split': tensor_split_list,
|
'tensor_split': tensor_split_list,
|
||||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||||
'logits_all': True,
|
'logits_all': shared.args.logits_all,
|
||||||
}
|
}
|
||||||
|
|
||||||
Llama = llama_cpp_lib().Llama
|
Llama = llama_cpp_lib().Llama
|
||||||
|
@ -123,6 +123,7 @@ loaders_and_params = OrderedDict({
|
|||||||
'numa',
|
'numa',
|
||||||
'cfg_cache',
|
'cfg_cache',
|
||||||
'use_fast',
|
'use_fast',
|
||||||
|
'logits_all',
|
||||||
'llamacpp_HF_info',
|
'llamacpp_HF_info',
|
||||||
],
|
],
|
||||||
'ctransformers': [
|
'ctransformers': [
|
||||||
|
@ -113,6 +113,7 @@ parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layer
|
|||||||
parser.add_argument('--tensor_split', type=str, default=None, help='Split the model across multiple GPUs. Comma-separated list of proportions. Example: 18,17.')
|
parser.add_argument('--tensor_split', type=str, default=None, help='Split the model across multiple GPUs. Comma-separated list of proportions. Example: 18,17.')
|
||||||
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default is 0 (random).')
|
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default is 0 (random).')
|
||||||
parser.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.')
|
parser.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.')
|
||||||
|
parser.add_argument('--logits_all', action='store_true', help='Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.')
|
||||||
parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity (llama-cpp-python). Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
|
parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity (llama-cpp-python). Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
|
||||||
|
|
||||||
# ExLlama
|
# ExLlama
|
||||||
|
@ -87,6 +87,7 @@ def list_model_elements():
|
|||||||
'alpha_value',
|
'alpha_value',
|
||||||
'rope_freq_base',
|
'rope_freq_base',
|
||||||
'numa',
|
'numa',
|
||||||
|
'logits_all',
|
||||||
]
|
]
|
||||||
if is_torch_xpu_available():
|
if is_torch_xpu_available():
|
||||||
for i in range(torch.xpu.device_count()):
|
for i in range(torch.xpu.device_count()):
|
||||||
|
@ -124,6 +124,7 @@ def create_ui():
|
|||||||
shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed)
|
shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed)
|
||||||
shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='To enable this option, start the web UI with the --trust-remote-code flag. It is necessary for some models.', interactive=shared.args.trust_remote_code)
|
shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='To enable this option, start the web UI with the --trust-remote-code flag. It is necessary for some models.', interactive=shared.args.trust_remote_code)
|
||||||
shared.gradio['use_fast'] = gr.Checkbox(label="use_fast", value=shared.args.use_fast, info='Set use_fast=True while loading the tokenizer. May trigger a conversion that takes several minutes.')
|
shared.gradio['use_fast'] = gr.Checkbox(label="use_fast", value=shared.args.use_fast, info='Set use_fast=True while loading the tokenizer. May trigger a conversion that takes several minutes.')
|
||||||
|
shared.gradio['logits_all'] = gr.Checkbox(label="logits_all", value=shared.args.logits_all, info='Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.')
|
||||||
shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.')
|
shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.')
|
||||||
shared.gradio['disable_exllama'] = gr.Checkbox(label="disable_exllama", value=shared.args.disable_exllama, info='Disable ExLlama kernel.')
|
shared.gradio['disable_exllama'] = gr.Checkbox(label="disable_exllama", value=shared.args.disable_exllama, info='Disable ExLlama kernel.')
|
||||||
shared.gradio['no_flash_attn'] = gr.Checkbox(label="no_flash_attn", value=shared.args.no_flash_attn, info='Force flash-attention to not be used.')
|
shared.gradio['no_flash_attn'] = gr.Checkbox(label="no_flash_attn", value=shared.args.no_flash_attn, info='Force flash-attention to not be used.')
|
||||||
|
Loading…
Reference in New Issue
Block a user