mirror of
https://github.com/tloen/alpaca-lora.git
synced 2024-10-01 01:05:56 -04:00
Suzhiba/fix resume from checkpoint bug (#322)
* fix bug when use resume_from_checkpoint to resume lora params * update --------- Co-authored-by: 苏志霸 <suzhiba@suzhibadeMacBook-Pro.local>
This commit is contained in:
parent
179f3974f8
commit
2d719c11cf
@ -202,7 +202,7 @@ def train(
|
|||||||
if os.path.exists(checkpoint_name):
|
if os.path.exists(checkpoint_name):
|
||||||
print(f"Restarting from {checkpoint_name}")
|
print(f"Restarting from {checkpoint_name}")
|
||||||
adapters_weights = torch.load(checkpoint_name)
|
adapters_weights = torch.load(checkpoint_name)
|
||||||
model = set_peft_model_state_dict(model, adapters_weights)
|
set_peft_model_state_dict(model, adapters_weights)
|
||||||
else:
|
else:
|
||||||
print(f"Checkpoint {checkpoint_name} not found")
|
print(f"Checkpoint {checkpoint_name} not found")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user