From 4766a57352a97d5f47cabca3cfd769ee2c552b23 Mon Sep 17 00:00:00 2001 From: feng lui <3090641@qq.com> Date: Sun, 5 Nov 2023 00:59:33 +0800 Subject: [PATCH] transformers: add use_flash_attention_2 option (#4373) --- README.md | 1 + modules/loaders.py | 2 +- modules/models.py | 4 ++++ modules/shared.py | 1 + modules/ui.py | 1 + modules/ui_model_menu.py | 1 + 6 files changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 17c9add7..79d86a15 100644 --- a/README.md +++ b/README.md @@ -300,6 +300,7 @@ Optionally, you can use the following command-line flags: | `--sdp-attention` | Use PyTorch 2.0's SDP attention. Same as above. | | `--trust-remote-code` | Set `trust_remote_code=True` while loading the model. Necessary for some models. | | `--use_fast` | Set `use_fast=True` while loading the tokenizer. | +| `--use_flash_attention_2` | Set use_flash_attention_2=True while loading the model. | #### Accelerate 4-bit diff --git a/modules/loaders.py b/modules/loaders.py index c9accc34..cf2305c7 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -9,7 +9,6 @@ loaders_and_params = OrderedDict({ 'Transformers': [ 'cpu_memory', 'gpu_memory', - 'trust_remote_code', 'load_in_8bit', 'bf16', 'cpu', @@ -21,6 +20,7 @@ loaders_and_params = OrderedDict({ 'compute_dtype', 'trust_remote_code', 'use_fast', + 'use_flash_attention_2', 'alpha_value', 'rope_freq_base', 'compress_pos_emb', diff --git a/modules/models.py b/modules/models.py index 67f3e94c..e9005fee 100644 --- a/modules/models.py +++ b/modules/models.py @@ -126,6 +126,10 @@ def huggingface_loader(model_name): 'torch_dtype': torch.bfloat16 if shared.args.bf16 else torch.float16, 'use_safetensors': True if shared.args.force_safetensors else None } + + if shared.args.use_flash_attention_2: + params['use_flash_attention_2'] = True + config = AutoConfig.from_pretrained(path_to_model, trust_remote_code=params['trust_remote_code']) if 'chatglm' in model_name.lower(): diff --git a/modules/shared.py b/modules/shared.py index a6082ea8..1dd6841d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -93,6 +93,7 @@ parser.add_argument('--sdp-attention', action='store_true', help='Use PyTorch 2. parser.add_argument('--trust-remote-code', action='store_true', help='Set trust_remote_code=True while loading the model. Necessary for some models.') parser.add_argument('--force-safetensors', action='store_true', help='Set use_safetensors=True while loading the model. This prevents arbitrary code execution.') parser.add_argument('--use_fast', action='store_true', help='Set use_fast=True while loading the tokenizer.') +parser.add_argument('--use_flash_attention_2', action='store_true', help='Set use_flash_attention_2=True while loading the model.') # Accelerate 4-bit parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision (using bitsandbytes).') diff --git a/modules/ui.py b/modules/ui.py index 466af187..7c241e67 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -53,6 +53,7 @@ def list_model_elements(): 'load_in_8bit', 'trust_remote_code', 'use_fast', + 'use_flash_attention_2', 'load_in_4bit', 'compute_dtype', 'quant_type', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index bb236021..0d82ee8f 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -124,6 +124,7 @@ def create_ui(): shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed) shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='To enable this option, start the web UI with the --trust-remote-code flag. It is necessary for some models.', interactive=shared.args.trust_remote_code) shared.gradio['use_fast'] = gr.Checkbox(label="use_fast", value=shared.args.use_fast, info='Set use_fast=True while loading the tokenizer. May trigger a conversion that takes several minutes.') + shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.') shared.gradio['disable_exllama'] = gr.Checkbox(label="disable_exllama", value=shared.args.disable_exllama, info='Disable ExLlama kernel.') shared.gradio['no_flash_attn'] = gr.Checkbox(label="no_flash_attn", value=shared.args.no_flash_attn, info='Force flash-attention to not be used.') shared.gradio['cache_8bit'] = gr.Checkbox(label="cache_8bit", value=shared.args.cache_8bit, info='Use 8-bit cache to save VRAM.')