Allow --lora to use an absolute path

This commit is contained in:
oobabooga 2023-08-10 09:54:28 -07:00
parent 3929971b66
commit 0789554f65

View File

@ -17,6 +17,14 @@ def add_lora_to_model(lora_names):
add_lora_transformers(lora_names) add_lora_transformers(lora_names)
def get_lora_path(lora_name):
p = Path(lora_name)
if p.exists():
lora_name = p.parts[-1]
return Path(f"{shared.args.lora_dir}/{lora_name}")
def add_lora_exllama(lora_names): def add_lora_exllama(lora_names):
try: try:
@ -40,7 +48,7 @@ def add_lora_exllama(lora_names):
if len(lora_names) > 1: if len(lora_names) > 1:
logger.warning('ExLlama can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.') logger.warning('ExLlama can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')
lora_path = Path(f"{shared.args.lora_dir}/{lora_names[0]}") lora_path = get_lora_path(lora_names[0])
lora_config_path = lora_path / "adapter_config.json" lora_config_path = lora_path / "adapter_config.json"
lora_adapter_path = lora_path / "adapter_model.bin" lora_adapter_path = lora_path / "adapter_model.bin"
@ -81,7 +89,7 @@ def add_lora_autogptq(lora_names):
inference_mode=True, inference_mode=True,
) )
lora_path = Path(f"{shared.args.lora_dir}/{lora_names[0]}") lora_path = get_lora_path(lora_names[0])
logger.info("Applying the following LoRAs to {}: {}".format(shared.model_name, ', '.join([lora_names[0]]))) logger.info("Applying the following LoRAs to {}: {}".format(shared.model_name, ', '.join([lora_names[0]])))
shared.model = get_gptq_peft_model(shared.model, peft_config, lora_path) shared.model = get_gptq_peft_model(shared.model, peft_config, lora_path)
shared.lora_names = [lora_names[0]] shared.lora_names = [lora_names[0]]
@ -101,7 +109,7 @@ def add_lora_transformers(lora_names):
if len(removed_set) == 0 and len(prior_set) > 0: if len(removed_set) == 0 and len(prior_set) > 0:
logger.info(f"Adding the LoRA(s) named {added_set} to the model...") logger.info(f"Adding the LoRA(s) named {added_set} to the model...")
for lora in added_set: for lora in added_set:
shared.model.load_adapter(Path(f"{shared.args.lora_dir}/{lora}"), lora) shared.model.load_adapter(get_lora_path(lora), lora)
return return
@ -123,9 +131,9 @@ def add_lora_transformers(lora_names):
params['device_map'] = {"base_model.model." + k: v for k, v in shared.model.hf_device_map.items()} params['device_map'] = {"base_model.model." + k: v for k, v in shared.model.hf_device_map.items()}
logger.info("Applying the following LoRAs to {}: {}".format(shared.model_name, ', '.join(lora_names))) logger.info("Applying the following LoRAs to {}: {}".format(shared.model_name, ', '.join(lora_names)))
shared.model = PeftModel.from_pretrained(shared.model, Path(f"{shared.args.lora_dir}/{lora_names[0]}"), adapter_name=lora_names[0], **params) shared.model = PeftModel.from_pretrained(shared.model, get_lora_path(lora_names[0]), adapter_name=lora_names[0], **params)
for lora in lora_names[1:]: for lora in lora_names[1:]:
shared.model.load_adapter(Path(f"{shared.args.lora_dir}/{lora}"), lora) shared.model.load_adapter(get_lora_path(lora), lora)
shared.lora_names = lora_names shared.lora_names = lora_names