gpt4all/configs/train/finetune_lora.yaml
2023-03-28 20:58:33 -07:00

30 lines
505 B
YAML

# model/tokenizer
model_name: # update
tokenizer_name: # update
gradient_checkpointing: false
save_name: "nomic-ai/gpt4all-lora-multi-turn"
# dataset
streaming: false
num_proc: 64
dataset_path: "data_multiturn"
max_length: 1024
batch_size: 4
# train dynamics
lr: 5.0e-5
eval_every: 2000
eval_steps: 100
save_every: 2000
output_dir: "ckpts/gpt4all-lora-multi"
checkpoint: null
lora: true
warmup_steps: 100
num_epochs: 2
# logging
wandb: true
wandb_entity: # update
wandb_project_name: # update
seed: 42