gpt4all/configs/train/finetune.yaml
2023-03-27 17:30:57 +00:00

31 lines
513 B
YAML

# model/tokenizer
model_name: "zpn/llama-7b"
tokenizer_name: "zpn/llama-7b"
gradient_checkpointing: true
save_name: "nomic-ai/vicuna-full-multi-turn"
# dataset
streaming: false
num_proc: 64
dataset_path: "data_multiturn"
max_length: 1024
batch_size: 32
# train dynamics
lr: 5.0e-5
eval_every: 800
eval_steps: 100
save_every: 800
output_dir: "ckpts/llama-7b-full-multi"
checkpoint: null
lora: false
warmup_steps: 100
num_epochs: 2
# logging
wandb: true
wandb_entity: vicuna
wandb_project_name: vicuna
seed: 42