mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Make universal tokenizer, xformers, sdp-attention apply to monkey patch
This commit is contained in:
parent
f39c99fa14
commit
f642135517
@ -149,8 +149,7 @@ def load_model(model_name):
|
|||||||
print("Warning: applying the monkey patch for using LoRAs in 4-bit mode.\nIt may cause undefined behavior outside its intended scope.")
|
print("Warning: applying the monkey patch for using LoRAs in 4-bit mode.\nIt may cause undefined behavior outside its intended scope.")
|
||||||
from modules.monkey_patch_gptq_lora import load_model_llama
|
from modules.monkey_patch_gptq_lora import load_model_llama
|
||||||
|
|
||||||
model, tokenizer = load_model_llama(model_name)
|
model, _ = load_model_llama(model_name)
|
||||||
return model, tokenizer
|
|
||||||
|
|
||||||
# No monkey patch
|
# No monkey patch
|
||||||
else:
|
else:
|
||||||
|
@ -18,17 +18,16 @@ from modules.GPTQ_loader import find_quantized_model_file
|
|||||||
replace_peft_model_with_gptq_lora_model()
|
replace_peft_model_with_gptq_lora_model()
|
||||||
|
|
||||||
def load_model_llama(model_name):
|
def load_model_llama(model_name):
|
||||||
|
|
||||||
config_path = str(Path(f'{shared.args.model_dir}/{model_name}'))
|
config_path = str(Path(f'{shared.args.model_dir}/{model_name}'))
|
||||||
model_path = str(find_quantized_model_file(model_name))
|
model_path = str(find_quantized_model_file(model_name))
|
||||||
model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path, groupsize=shared.args.groupsize, is_v1_model=False)
|
model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path, groupsize=shared.args.groupsize, is_v1_model=False)
|
||||||
|
|
||||||
for n, m in model.named_modules():
|
for n, m in model.named_modules():
|
||||||
if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
|
if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
|
||||||
if m.is_v1_model:
|
if m.is_v1_model:
|
||||||
m.zeros = m.zeros.half()
|
m.zeros = m.zeros.half()
|
||||||
m.scales = m.scales.half()
|
m.scales = m.scales.half()
|
||||||
m.bias = m.bias.half()
|
m.bias = m.bias.half()
|
||||||
|
|
||||||
autograd_4bit.use_new = True
|
autograd_4bit.use_new = True
|
||||||
autograd_4bit.auto_switch = True
|
autograd_4bit.auto_switch = True
|
||||||
|
|
||||||
@ -36,11 +35,4 @@ def load_model_llama(model_name):
|
|||||||
wrapper = AMPWrapper(model)
|
wrapper = AMPWrapper(model)
|
||||||
wrapper.apply_generate()
|
wrapper.apply_generate()
|
||||||
|
|
||||||
try:
|
|
||||||
tokenizer.eos_token_id = 2
|
|
||||||
tokenizer.bos_token_id = 1
|
|
||||||
tokenizer.pad_token_id = 0
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return model, tokenizer
|
return model, tokenizer
|
||||||
|
Loading…
Reference in New Issue
Block a user