From 8b44d7b12a33ab5830fc7fec4c42c35b8f61a400 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 4 Jul 2024 20:16:44 -0700 Subject: [PATCH] Lint --- modules/models.py | 2 +- modules/ui_model_menu.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/models.py b/modules/models.py index 5db067dc..07c14308 100644 --- a/modules/models.py +++ b/modules/models.py @@ -147,7 +147,7 @@ def huggingface_loader(model_name): params['force_safetensors'] = True if shared.args.use_eager_attention: - params['attn_implementation'] = 'eager' + params['attn_implementation'] = 'eager' config = AutoConfig.from_pretrained(path_to_model, trust_remote_code=shared.args.trust_remote_code) diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index df53c859..9a4e7351 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -115,7 +115,7 @@ def create_ui(): shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit) shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant) shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.') - shared.gradio['use_eager_attention'] = gr.Checkbox(label="use_eager_attention", value=shared.args.use_eager_attention, info='Set attn_implementation= eager while loading the model.') + shared.gradio['use_eager_attention'] = gr.Checkbox(label="use_eager_attention", value=shared.args.use_eager_attention, info='Set attn_implementation= eager while loading the model.') shared.gradio['flash_attn'] = gr.Checkbox(label="flash_attn", value=shared.args.flash_attn, info='Use flash-attention.') shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices) shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards.')