From 9256e937d6e7d34c539b99bcb35183d9cf6fe157 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 17 Mar 2023 17:45:28 -0300 Subject: [PATCH] Add some LoRA params --- modules/LoRA.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/LoRA.py b/modules/LoRA.py index fe11c0da..b568e57b 100644 --- a/modules/LoRA.py +++ b/modules/LoRA.py @@ -15,4 +15,8 @@ def add_lora_to_model(lora_name): else: # Why doesn't this work in 16-bit mode? print(f"Adding the LoRA {lora_name} to the model...") - shared.model = PeftModel.from_pretrained(shared.model, Path(f"loras/{lora_name}")) + + params = {} + #params['device_map'] = {'': 0} + #params['dtype'] = shared.model.dtype + shared.model = PeftModel.from_pretrained(shared.model, Path(f"loras/{lora_name}"), **params)