diff --git a/README.md b/README.md index 333df158..d9502367 100644 --- a/README.md +++ b/README.md @@ -267,6 +267,12 @@ Optionally, you can use the following command-line flags: | `--no_inject_fused_mlp` | Triton mode only: disable the use of fused MLP, which will use less VRAM at the cost of slower inference. | | `--desc_act` | For models that don't have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig. | +#### ExLlama + +| Flag | Description | +|------------------|-------------| +|`--gpu-split` | Comma-separated list of VRAM (in GB) to use per GPU device for model layers, e.g. `20,7,7` | + #### GPTQ-for-LLaMa | Flag | Description | diff --git a/modules/exllama.py b/modules/exllama.py index 11deb9b0..76dccd55 100644 --- a/modules/exllama.py +++ b/modules/exllama.py @@ -1,9 +1,10 @@ import sys from pathlib import Path -sys.path.insert(0, str(Path("repositories/exllama"))) - +from modules import shared from modules.logging_colors import logger + +sys.path.insert(0, str(Path("repositories/exllama"))) from repositories.exllama.generator import ExLlamaGenerator from repositories.exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig from repositories.exllama.tokenizer import ExLlamaTokenizer @@ -33,6 +34,10 @@ class ExllamaModel: config = ExLlamaConfig(str(model_config_path)) config.model_path = str(model_path) + if shared.args.gpu_split: + config.set_auto_map(shared.args.gpu_split) + config.gpu_peer_fix = True + model = ExLlama(config) tokenizer = ExLlamaTokenizer(str(tokenizer_model_path)) cache = ExLlamaCache(model) diff --git a/modules/loaders.py b/modules/loaders.py index 87fac259..ac6f80bd 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -53,6 +53,7 @@ loaders_and_params = { 'transformers_info' ], 'ExLlama' : [ + 'gpu_split', 'exllama_info', ] } diff --git a/modules/shared.py b/modules/shared.py index c7dba9cb..1e9b3de8 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -148,6 +148,9 @@ parser.add_argument('--no_inject_fused_attention', action='store_true', help='Do parser.add_argument('--no_inject_fused_mlp', action='store_true', help='Triton mode only: Do not use fused MLP (lowers VRAM requirements).') parser.add_argument('--desc_act', action='store_true', help='For models that don\'t have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig.') +# ExLlama +parser.add_argument('--gpu-split', type=str, help="Comma-separated list of VRAM (in GB) to use per GPU device for model layers, e.g. 20,7,7") + # FlexGen parser.add_argument('--flexgen', action='store_true', help='DEPRECATED') parser.add_argument('--percent', type=int, nargs="+", default=[0, 100, 100, 0, 100, 0], help='FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0).') diff --git a/modules/ui.py b/modules/ui.py index b6d62c9b..e10cf158 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -30,7 +30,7 @@ theme = gr.themes.Default( def list_model_elements(): - elements = ['loader', 'cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'trust_remote_code', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'triton', 'desc_act', 'no_inject_fused_attention', 'no_inject_fused_mlp', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed'] + elements = ['loader', 'cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'trust_remote_code', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'triton', 'desc_act', 'no_inject_fused_attention', 'no_inject_fused_mlp', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed', 'gpu_split'] for i in range(torch.cuda.device_count()): elements.append(f'gpu_memory_{i}') diff --git a/server.py b/server.py index 6ffc10c5..9335adda 100644 --- a/server.py +++ b/server.py @@ -216,7 +216,7 @@ def create_model_menus(): shared.gradio['model_type'] = gr.Dropdown(label="model_type", choices=["None", "llama", "opt", "gptj"], value=shared.args.model_type or "None") shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0) shared.gradio['autogptq_info'] = gr.Markdown('On some systems, AutoGPTQ can be 2x slower than GPTQ-for-LLaMa. You can manually select the GPTQ-for-LLaMa loader above.') - shared.gradio['exllama_info'] = gr.Markdown('ExLlama has to be installed manually. See the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/ExLlama.md).') + shared.gradio['gpu_split'] = gr.Textbox(label='gpu-slit', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7') with gr.Column(): shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton) @@ -235,6 +235,7 @@ def create_model_menus(): shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed) shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='Make sure to inspect the .py files inside the model folder before loading it with this option enabled.') shared.gradio['gptq_for_llama_info'] = gr.Markdown('GPTQ-for-LLaMa is currently 2x faster than AutoGPTQ on some systems. It is installed by default with the one-click installers. Otherwise, it has to be installed manually following the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#installation-1).') + shared.gradio['exllama_info'] = gr.Markdown('ExLlama has to be installed manually. See the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/ExLlama.md).') with gr.Column(): with gr.Row():