alpaca-lora/export_hf_checkpoint.py

58 lines
1.5 KiB
Python
Raw Normal View History

2023-03-17 20:56:10 -04:00
import os
import torch
import transformers
from peft import PeftModel
from transformers import LlamaForCausalLM, LlamaTokenizer # noqa: F402
2023-03-17 20:56:10 -04:00
BASE_MODEL = os.environ.get("BASE_MODEL", None)
assert (
BASE_MODEL
), "Please specify a value for BASE_MODEL environment variable, e.g. `export BASE_MODEL=decapoda-research/llama-7b-hf`" # noqa: E501
tokenizer = LlamaTokenizer.from_pretrained(BASE_MODEL)
2023-03-17 20:56:10 -04:00
base_model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
2023-03-17 20:56:10 -04:00
load_in_8bit=False,
torch_dtype=torch.float16,
device_map={"": "cpu"},
)
first_weight = base_model.model.layers[0].self_attn.q_proj.weight
first_weight_old = first_weight.clone()
lora_model = PeftModel.from_pretrained(
base_model,
"tloen/alpaca-lora-7b",
device_map={"": "cpu"},
torch_dtype=torch.float16,
)
lora_weight = lora_model.base_model.model.model.layers[
0
].self_attn.q_proj.weight
2023-03-17 20:56:10 -04:00
assert torch.allclose(first_weight_old, first_weight)
# merge weights
for layer in lora_model.base_model.model.model.layers:
layer.self_attn.q_proj.merge_weights = True
layer.self_attn.v_proj.merge_weights = True
lora_model.train(False)
# did we do anything?
assert not torch.allclose(first_weight_old, first_weight)
lora_model_sd = lora_model.state_dict()
deloreanized_sd = {
2023-03-18 19:42:47 -04:00
k.replace("base_model.model.", ""): v
2023-03-17 20:56:10 -04:00
for k, v in lora_model_sd.items()
if "lora" not in k
}
LlamaForCausalLM.save_pretrained(
base_model, "./hf_ckpt", state_dict=deloreanized_sd, max_shard_size="400MB"
)