From f6421355172e49f0ca05721f6d172afe06c38db2 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 25 Apr 2023 23:18:11 -0300 Subject: [PATCH] Make universal tokenizer, xformers, sdp-attention apply to monkey patch --- modules/models.py | 3 +-- modules/monkey_patch_gptq_lora.py | 10 +--------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/modules/models.py b/modules/models.py index 4ebb6597..d8131eda 100644 --- a/modules/models.py +++ b/modules/models.py @@ -149,8 +149,7 @@ def load_model(model_name): print("Warning: applying the monkey patch for using LoRAs in 4-bit mode.\nIt may cause undefined behavior outside its intended scope.") from modules.monkey_patch_gptq_lora import load_model_llama - model, tokenizer = load_model_llama(model_name) - return model, tokenizer + model, _ = load_model_llama(model_name) # No monkey patch else: diff --git a/modules/monkey_patch_gptq_lora.py b/modules/monkey_patch_gptq_lora.py index 872f7ce3..45318df6 100644 --- a/modules/monkey_patch_gptq_lora.py +++ b/modules/monkey_patch_gptq_lora.py @@ -18,17 +18,16 @@ from modules.GPTQ_loader import find_quantized_model_file replace_peft_model_with_gptq_lora_model() def load_model_llama(model_name): - config_path = str(Path(f'{shared.args.model_dir}/{model_name}')) model_path = str(find_quantized_model_file(model_name)) model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path, groupsize=shared.args.groupsize, is_v1_model=False) - for n, m in model.named_modules(): if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt): if m.is_v1_model: m.zeros = m.zeros.half() m.scales = m.scales.half() m.bias = m.bias.half() + autograd_4bit.use_new = True autograd_4bit.auto_switch = True @@ -36,11 +35,4 @@ def load_model_llama(model_name): wrapper = AMPWrapper(model) wrapper.apply_generate() - try: - tokenizer.eos_token_id = 2 - tokenizer.bos_token_id = 1 - tokenizer.pad_token_id = 0 - except: - pass - return model, tokenizer