From 104573f7d49085b4916c87bbcd5a2c8a040c1747 Mon Sep 17 00:00:00 2001 From: Bartowski Date: Thu, 7 Mar 2024 11:08:21 -0500 Subject: [PATCH] Update cache_4bit documentation (#5649) --------- Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com> --- README.md | 2 +- docs/04 - Model Tab.md | 1 + modules/exllamav2_hf.py | 2 ++ modules/shared.py | 2 +- modules/ui_model_menu.py | 2 +- 5 files changed, 6 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f34e9ca4..bb5d2810 100644 --- a/README.md +++ b/README.md @@ -279,7 +279,7 @@ List of command-line flags |`--cfg-cache` | ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader. | |`--no_flash_attn` | Force flash-attention to not be used. | |`--cache_8bit` | Use 8-bit cache to save VRAM. | -|`--cache_4bit` | Use 4-bit cache to save VRAM. | +|`--cache_4bit` | Use Q4 cache to save VRAM. | |`--num_experts_per_token NUM_EXPERTS_PER_TOKEN` | Number of experts to use for generation. Applies to MoE models like Mixtral. | #### AutoGPTQ diff --git a/docs/04 - Model Tab.md b/docs/04 - Model Tab.md index 762f85e8..3766c96c 100644 --- a/docs/04 - Model Tab.md +++ b/docs/04 - Model Tab.md @@ -46,6 +46,7 @@ Examples: * **cfg-cache**: Creates a second cache to hold the CFG negative prompts. You need to set this if and only if you intend to use CFG in the "Parameters" > "Generation" tab. Checking this parameter doubles the cache VRAM usage. * **no_flash_attn**: Disables flash attention. Otherwise, it is automatically used as long as the library is installed. * **cache_8bit**: Create a 8-bit precision cache instead of a 16-bit one. This saves VRAM but increases perplexity (I don't know by how much). +* **cache_4bit**: Creates a Q4 cache using grouped quantization. ### ExLlamav2 diff --git a/modules/exllamav2_hf.py b/modules/exllamav2_hf.py index 55cdc9f4..9ab9cdc7 100644 --- a/modules/exllamav2_hf.py +++ b/modules/exllamav2_hf.py @@ -63,6 +63,8 @@ class Exllamav2HF(PreTrainedModel): if shared.args.cfg_cache: if shared.args.cache_8bit: self.ex_cache_negative = ExLlamaV2Cache_8bit(self.ex_model) + elif shared.args.cache_4bit: + self.ex_cache_negative = ExLlamaV2Cache_Q4(self.ex_model) else: self.ex_cache_negative = ExLlamaV2Cache(self.ex_model) diff --git a/modules/shared.py b/modules/shared.py index 0a0c678b..10a70001 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -139,7 +139,7 @@ group.add_argument('--max_seq_len', type=int, default=2048, help='Maximum sequen group.add_argument('--cfg-cache', action='store_true', help='ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.') group.add_argument('--no_flash_attn', action='store_true', help='Force flash-attention to not be used.') group.add_argument('--cache_8bit', action='store_true', help='Use 8-bit cache to save VRAM.') -group.add_argument('--cache_4bit', action='store_true', help='Use 4-bit cache to save VRAM.') +group.add_argument('--cache_4bit', action='store_true', help='Use Q4 cache to save VRAM.') group.add_argument('--num_experts_per_token', type=int, default=2, help='Number of experts to use for generation. Applies to MoE models like Mixtral.') # AutoGPTQ diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 227ddc42..c29db7d0 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -132,7 +132,7 @@ def create_ui(): shared.gradio['disk'] = gr.Checkbox(label="disk", value=shared.args.disk) shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16) shared.gradio['cache_8bit'] = gr.Checkbox(label="cache_8bit", value=shared.args.cache_8bit, info='Use 8-bit cache to save VRAM.') - shared.gradio['cache_4bit'] = gr.Checkbox(label="cache_4bit", value=shared.args.cache_4bit, info='Use 4-bit cache to save VRAM.') + shared.gradio['cache_4bit'] = gr.Checkbox(label="cache_4bit", value=shared.args.cache_4bit, info='Use Q4 cache to save VRAM.') shared.gradio['autosplit'] = gr.Checkbox(label="autosplit", value=shared.args.autosplit, info='Automatically split the model tensors across the available GPUs.') shared.gradio['no_flash_attn'] = gr.Checkbox(label="no_flash_attn", value=shared.args.no_flash_attn, info='Force flash-attention to not be used.') shared.gradio['cfg_cache'] = gr.Checkbox(label="cfg-cache", value=shared.args.cfg_cache, info='Necessary to use CFG with this loader.')