mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Enable NUMA feature for llama_cpp_python (#4040)
This commit is contained in:
parent
87ea2d96fd
commit
7e6ff8d1f0
13
README.md
13
README.md
@ -295,14 +295,15 @@ Optionally, you can use the following command-line flags:
|
|||||||
|
|
||||||
| Flag | Description |
|
| Flag | Description |
|
||||||
|---------------|---------------|
|
|---------------|---------------|
|
||||||
|
| `--mul_mat_q` | Activate new mulmat kernels. |
|
||||||
|
| `--tensor_split TENSOR_SPLIT` | Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17 |
|
||||||
|
| `--llama_cpp_seed SEED` | Seed for llama-cpp models. Default 0 (random). |
|
||||||
|
| `--cache-capacity CACHE_CAPACITY` | Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed. |
|
||||||
|
|`--cfg-cache` | llamacpp_HF: Create an additional cache for CFG negative prompts. |
|
||||||
| `--no-mmap` | Prevent mmap from being used. |
|
| `--no-mmap` | Prevent mmap from being used. |
|
||||||
| `--mlock` | Force the system to keep the model in RAM. |
|
| `--mlock` | Force the system to keep the model in RAM. |
|
||||||
| `--mul_mat_q` | Activate new mulmat kernels. |
|
| `--numa` | Activate NUMA task allocation for llama.cpp |
|
||||||
| `--cache-capacity CACHE_CAPACITY` | Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed. |
|
| `--cpu` | Use the CPU version of llama-cpp-python instead of the GPU-accelerated version. |
|
||||||
| `--tensor_split TENSOR_SPLIT` | Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17 |
|
|
||||||
| `--llama_cpp_seed SEED` | Seed for llama-cpp models. Default 0 (random). |
|
|
||||||
| `--cpu` | Use the CPU version of llama-cpp-python instead of the GPU-accelerated version. |
|
|
||||||
|`--cfg-cache` | llamacpp_HF: Create an additional cache for CFG negative prompts. |
|
|
||||||
|
|
||||||
#### ctransformers
|
#### ctransformers
|
||||||
|
|
||||||
|
@ -172,6 +172,7 @@ class LlamacppHF(PreTrainedModel):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
|
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
|
||||||
assert len(model_args) == 0 and len(kwargs) == 0, "extra args is currently not supported"
|
assert len(model_args) == 0 and len(kwargs) == 0, "extra args is currently not supported"
|
||||||
|
|
||||||
if isinstance(pretrained_model_name_or_path, str):
|
if isinstance(pretrained_model_name_or_path, str):
|
||||||
pretrained_model_name_or_path = Path(pretrained_model_name_or_path)
|
pretrained_model_name_or_path = Path(pretrained_model_name_or_path)
|
||||||
|
|
||||||
@ -198,6 +199,7 @@ class LlamacppHF(PreTrainedModel):
|
|||||||
'use_mlock': shared.args.mlock,
|
'use_mlock': shared.args.mlock,
|
||||||
'mul_mat_q': shared.args.mul_mat_q,
|
'mul_mat_q': shared.args.mul_mat_q,
|
||||||
'low_vram': shared.args.low_vram,
|
'low_vram': shared.args.low_vram,
|
||||||
|
'numa': shared.args.numa,
|
||||||
'n_gpu_layers': shared.args.n_gpu_layers,
|
'n_gpu_layers': shared.args.n_gpu_layers,
|
||||||
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
||||||
'tensor_split': tensor_split_list,
|
'tensor_split': tensor_split_list,
|
||||||
|
@ -81,6 +81,7 @@ class LlamaCppModel:
|
|||||||
'use_mlock': shared.args.mlock,
|
'use_mlock': shared.args.mlock,
|
||||||
'mul_mat_q': shared.args.mul_mat_q,
|
'mul_mat_q': shared.args.mul_mat_q,
|
||||||
'low_vram': shared.args.low_vram,
|
'low_vram': shared.args.low_vram,
|
||||||
|
'numa': shared.args.numa,
|
||||||
'n_gpu_layers': shared.args.n_gpu_layers,
|
'n_gpu_layers': shared.args.n_gpu_layers,
|
||||||
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
||||||
'tensor_split': tensor_split_list,
|
'tensor_split': tensor_split_list,
|
||||||
|
@ -100,6 +100,7 @@ loaders_and_params = OrderedDict({
|
|||||||
'rope_freq_base',
|
'rope_freq_base',
|
||||||
'compress_pos_emb',
|
'compress_pos_emb',
|
||||||
'cpu',
|
'cpu',
|
||||||
|
'numa',
|
||||||
],
|
],
|
||||||
'llamacpp_HF': [
|
'llamacpp_HF': [
|
||||||
'n_ctx',
|
'n_ctx',
|
||||||
@ -115,6 +116,7 @@ loaders_and_params = OrderedDict({
|
|||||||
'rope_freq_base',
|
'rope_freq_base',
|
||||||
'compress_pos_emb',
|
'compress_pos_emb',
|
||||||
'cpu',
|
'cpu',
|
||||||
|
'numa',
|
||||||
'cfg_cache',
|
'cfg_cache',
|
||||||
'use_fast',
|
'use_fast',
|
||||||
'llamacpp_HF_info',
|
'llamacpp_HF_info',
|
||||||
|
@ -125,6 +125,7 @@ parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layer
|
|||||||
parser.add_argument('--tensor_split', type=str, default=None, help="Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17")
|
parser.add_argument('--tensor_split', type=str, default=None, help="Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17")
|
||||||
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||||
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
|
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
|
||||||
|
parser.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp')
|
||||||
|
|
||||||
# GPTQ
|
# GPTQ
|
||||||
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
||||||
|
@ -82,7 +82,8 @@ def list_model_elements():
|
|||||||
'max_seq_len',
|
'max_seq_len',
|
||||||
'compress_pos_emb',
|
'compress_pos_emb',
|
||||||
'alpha_value',
|
'alpha_value',
|
||||||
'rope_freq_base'
|
'rope_freq_base',
|
||||||
|
'numa',
|
||||||
]
|
]
|
||||||
|
|
||||||
for i in range(torch.cuda.device_count()):
|
for i in range(torch.cuda.device_count()):
|
||||||
|
@ -102,6 +102,12 @@ def create_ui():
|
|||||||
shared.gradio['no_inject_fused_mlp'] = gr.Checkbox(label="no_inject_fused_mlp", value=shared.args.no_inject_fused_mlp, info='Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.')
|
shared.gradio['no_inject_fused_mlp'] = gr.Checkbox(label="no_inject_fused_mlp", value=shared.args.no_inject_fused_mlp, info='Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.')
|
||||||
shared.gradio['no_use_cuda_fp16'] = gr.Checkbox(label="no_use_cuda_fp16", value=shared.args.no_use_cuda_fp16, info='This can make models faster on some systems.')
|
shared.gradio['no_use_cuda_fp16'] = gr.Checkbox(label="no_use_cuda_fp16", value=shared.args.no_use_cuda_fp16, info='This can make models faster on some systems.')
|
||||||
shared.gradio['desc_act'] = gr.Checkbox(label="desc_act", value=shared.args.desc_act, info='\'desc_act\', \'wbits\', and \'groupsize\' are used for old models without a quantize_config.json.')
|
shared.gradio['desc_act'] = gr.Checkbox(label="desc_act", value=shared.args.desc_act, info='\'desc_act\', \'wbits\', and \'groupsize\' are used for old models without a quantize_config.json.')
|
||||||
|
shared.gradio['mul_mat_q'] = gr.Checkbox(label="mul_mat_q", value=shared.args.mul_mat_q, info='Recommended in most cases. Improves generation speed by 10-20%.')
|
||||||
|
shared.gradio['cfg_cache'] = gr.Checkbox(label="cfg-cache", value=shared.args.cfg_cache, info='Create an additional cache for CFG negative prompts.')
|
||||||
|
shared.gradio['no_mmap'] = gr.Checkbox(label="no-mmap", value=shared.args.no_mmap)
|
||||||
|
shared.gradio['mlock'] = gr.Checkbox(label="mlock", value=shared.args.mlock)
|
||||||
|
shared.gradio['numa'] = gr.Checkbox(label="numa", value=shared.args.numa, info='NUMA support can help on some systems with non-uniform memory access.')
|
||||||
|
shared.gradio['low_vram'] = gr.Checkbox(label="low-vram", value=shared.args.low_vram)
|
||||||
shared.gradio['cpu'] = gr.Checkbox(label="cpu", value=shared.args.cpu)
|
shared.gradio['cpu'] = gr.Checkbox(label="cpu", value=shared.args.cpu)
|
||||||
shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit)
|
shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit)
|
||||||
shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16)
|
shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16)
|
||||||
@ -109,11 +115,6 @@ def create_ui():
|
|||||||
shared.gradio['disk'] = gr.Checkbox(label="disk", value=shared.args.disk)
|
shared.gradio['disk'] = gr.Checkbox(label="disk", value=shared.args.disk)
|
||||||
shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit)
|
shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit)
|
||||||
shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant)
|
shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant)
|
||||||
shared.gradio['no_mmap'] = gr.Checkbox(label="no-mmap", value=shared.args.no_mmap)
|
|
||||||
shared.gradio['low_vram'] = gr.Checkbox(label="low-vram", value=shared.args.low_vram)
|
|
||||||
shared.gradio['mlock'] = gr.Checkbox(label="mlock", value=shared.args.mlock)
|
|
||||||
shared.gradio['mul_mat_q'] = gr.Checkbox(label="mul_mat_q", value=shared.args.mul_mat_q, info='Recommended in most cases. Improves generation speed by 10-20%.')
|
|
||||||
shared.gradio['cfg_cache'] = gr.Checkbox(label="cfg-cache", value=shared.args.cfg_cache, info='Create an additional cache for CFG negative prompts.')
|
|
||||||
shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17')
|
shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17')
|
||||||
shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed)
|
shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed)
|
||||||
shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='Make sure to inspect the .py files inside the model folder before loading it with this option enabled.')
|
shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='Make sure to inspect the .py files inside the model folder before loading it with this option enabled.')
|
||||||
|
Loading…
Reference in New Issue
Block a user