diff --git a/loras/place-your-loras-here.txt b/loras/place-your-loras-here.txt new file mode 100644 index 00000000..e69de29b diff --git a/modules/LoRA.py b/modules/LoRA.py new file mode 100644 index 00000000..84e128fb --- /dev/null +++ b/modules/LoRA.py @@ -0,0 +1,15 @@ +from pathlib import Path + +from peft import PeftModel + +import modules.shared as shared +from modules.models import load_model + + +def add_lora_to_model(lora_name): + + # Is there a more efficient way of returning to the base model? + if lora_name == "None": + shared.model, shared.tokenizer = load_model(shared.model_name) + else: + shared.model = PeftModel.from_pretrained(shared.model, Path(f"loras/{lora_name}"))