Suzhiba/fix resume from checkpoint bug (#322)

* fix bug when use resume_from_checkpoint to resume lora params

* update

---------

Co-authored-by: 苏志霸 <suzhiba@suzhibadeMacBook-Pro.local>
This commit is contained in:
suzhiba 2023-04-14 04:39:18 +08:00 committed by GitHub
parent 179f3974f8
commit 2d719c11cf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -202,7 +202,7 @@ def train(
if os.path.exists(checkpoint_name): if os.path.exists(checkpoint_name):
print(f"Restarting from {checkpoint_name}") print(f"Restarting from {checkpoint_name}")
adapters_weights = torch.load(checkpoint_name) adapters_weights = torch.load(checkpoint_name)
model = set_peft_model_state_dict(model, adapters_weights) set_peft_model_state_dict(model, adapters_weights)
else: else:
print(f"Checkpoint {checkpoint_name} not found") print(f"Checkpoint {checkpoint_name} not found")