From 0cecfc684c6f5fa2531980d856c5ea56bc6f97ee Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 16 Mar 2023 21:35:53 -0300 Subject: [PATCH] Add files --- loras/place-your-loras-here.txt | 0 modules/LoRA.py | 15 +++++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 loras/place-your-loras-here.txt create mode 100644 modules/LoRA.py diff --git a/loras/place-your-loras-here.txt b/loras/place-your-loras-here.txt new file mode 100644 index 00000000..e69de29b diff --git a/modules/LoRA.py b/modules/LoRA.py new file mode 100644 index 00000000..84e128fb --- /dev/null +++ b/modules/LoRA.py @@ -0,0 +1,15 @@ +from pathlib import Path + +from peft import PeftModel + +import modules.shared as shared +from modules.models import load_model + + +def add_lora_to_model(lora_name): + + # Is there a more efficient way of returning to the base model? + if lora_name == "None": + shared.model, shared.tokenizer = load_model(shared.model_name) + else: + shared.model = PeftModel.from_pretrained(shared.model, Path(f"loras/{lora_name}"))