Make universal tokenizer, xformers, sdp-attention apply to monkey patch

This commit is contained in:
oobabooga 2023-04-25 23:18:11 -03:00
parent f39c99fa14
commit f642135517
2 changed files with 2 additions and 11 deletions

View File

@ -149,8 +149,7 @@ def load_model(model_name):
print("Warning: applying the monkey patch for using LoRAs in 4-bit mode.\nIt may cause undefined behavior outside its intended scope.") print("Warning: applying the monkey patch for using LoRAs in 4-bit mode.\nIt may cause undefined behavior outside its intended scope.")
from modules.monkey_patch_gptq_lora import load_model_llama from modules.monkey_patch_gptq_lora import load_model_llama
model, tokenizer = load_model_llama(model_name) model, _ = load_model_llama(model_name)
return model, tokenizer
# No monkey patch # No monkey patch
else: else:

View File

@ -18,17 +18,16 @@ from modules.GPTQ_loader import find_quantized_model_file
replace_peft_model_with_gptq_lora_model() replace_peft_model_with_gptq_lora_model()
def load_model_llama(model_name): def load_model_llama(model_name):
config_path = str(Path(f'{shared.args.model_dir}/{model_name}')) config_path = str(Path(f'{shared.args.model_dir}/{model_name}'))
model_path = str(find_quantized_model_file(model_name)) model_path = str(find_quantized_model_file(model_name))
model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path, groupsize=shared.args.groupsize, is_v1_model=False) model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path, groupsize=shared.args.groupsize, is_v1_model=False)
for n, m in model.named_modules(): for n, m in model.named_modules():
if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt): if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
if m.is_v1_model: if m.is_v1_model:
m.zeros = m.zeros.half() m.zeros = m.zeros.half()
m.scales = m.scales.half() m.scales = m.scales.half()
m.bias = m.bias.half() m.bias = m.bias.half()
autograd_4bit.use_new = True autograd_4bit.use_new = True
autograd_4bit.auto_switch = True autograd_4bit.auto_switch = True
@ -36,11 +35,4 @@ def load_model_llama(model_name):
wrapper = AMPWrapper(model) wrapper = AMPWrapper(model)
wrapper.apply_generate() wrapper.apply_generate()
try:
tokenizer.eos_token_id = 2
tokenizer.bos_token_id = 1
tokenizer.pad_token_id = 0
except:
pass
return model, tokenizer return model, tokenizer