fix: config

This commit is contained in:
zanussbaum 2023-04-08 17:14:56 -04:00
parent acf3f8703b
commit f8fa321d64
2 changed files with 49 additions and 1 deletions

View File

@ -0,0 +1,48 @@
{
"train_batch_size": "auto",
"gradient_accumulation_steps": "auto",
"train_micro_batch_size_per_gpu": "auto",
"fp16": {
"enabled": "auto",
"min_loss_scale": 1,
"loss_scale_window": 1000,
"hysteresis": 2,
"initial_scale_power": 32
},
"bf16": {
"enabled": "auto"
},
"gradient_clipping": 1,
"zero_optimization": {
"stage": 2,
"offload_param": {
"device": "cpu"
},
"offload_optimizer": {
"device": "cpu"
},
"allgather_partitions": true,
"allgather_bucket_size": 5e8,
"contiguous_gradients": true
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": [
0.9,
0.999
],
"eps": 1e-08
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": 0,
"warmup_max_lr": "auto",
"warmup_num_steps": "auto",
"warmup_type": "linear"
}
}
}

View File

@ -2,7 +2,7 @@
model_name: # update model_name: # update
tokenizer_name: # update tokenizer_name: # update
gradient_checkpointing: false gradient_checkpointing: false
save_name: "nomic-ai/gpt4all-lora-multi-turn" save_name: "nomic-ai/gpt4all-lora-llama"
# dataset # dataset
streaming: false streaming: false