diff --git a/modules/LoRA.py b/modules/LoRA.py index c95da6ee..74030c25 100644 --- a/modules/LoRA.py +++ b/modules/LoRA.py @@ -10,7 +10,7 @@ def add_lora_to_model(lora_name): # Is there a more efficient way of returning to the base model? if lora_name == "None": - print(f"Reloading the model to remove the LoRA...") + print("Reloading the model to remove the LoRA...") shared.model, shared.tokenizer = load_model(shared.model_name) else: print(f"Adding the LoRA {lora_name} to the model...") diff --git a/modules/models.py b/modules/models.py index e4507e57..63060d43 100644 --- a/modules/models.py +++ b/modules/models.py @@ -8,7 +8,6 @@ import numpy as np import torch import transformers from accelerate import infer_auto_device_map, init_empty_weights -from peft import PeftModel from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig)