gpt4all/gpt4all-training/configs/train/finetune.yaml

31 lines
470 B
YAML
Raw Normal View History

2023-03-25 12:17:48 -04:00
# model/tokenizer
2023-03-28 23:58:03 -04:00
model_name: # add model here
tokenizer_name: # add model here
2023-03-25 12:17:48 -04:00
gradient_checkpointing: true
2023-04-13 21:04:30 -04:00
save_name: # CHANGE
2023-03-25 12:17:48 -04:00
# dataset
streaming: false
num_proc: 64
2023-03-28 23:58:03 -04:00
dataset_path: # update
2023-03-27 13:30:57 -04:00
max_length: 1024
2023-03-25 12:17:48 -04:00
batch_size: 32
# train dynamics
lr: 5.0e-5
2023-03-27 13:30:57 -04:00
eval_every: 800
2023-03-25 12:17:48 -04:00
eval_steps: 100
2023-03-27 13:30:57 -04:00
save_every: 800
2023-04-13 21:04:30 -04:00
output_dir: # CHANGE
2023-03-25 12:17:48 -04:00
checkpoint: null
lora: false
warmup_steps: 100
2023-03-27 13:30:57 -04:00
num_epochs: 2
2023-03-25 12:17:48 -04:00
# logging
2023-03-27 13:30:57 -04:00
wandb: true
2023-03-28 23:58:03 -04:00
wandb_entity: # update
wandb_project_name: # update
2023-03-25 12:17:48 -04:00
seed: 42