mirror of
https://github.com/tloen/alpaca-lora.git
synced 2024-10-01 01:05:56 -04:00
Print only on Rank 0 (#187)
* Print only on Rank 0 When training on multiple GPU, the settings are printed once per gpu. This only prints from rank 0 See https://github.com/tloen/alpaca-lora/issues/182#issuecomment-1485550636 for a sample output. Could apply to a few other prints further down as well. * Typo * Added failsafe So this works whether or not LOCAL_RANK is defined.
This commit is contained in:
parent
a48d947298
commit
fcbc45e4c0
@ -53,6 +53,7 @@ def train(
|
||||
wandb_log_model: str = "", # options: false | true
|
||||
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
|
||||
):
|
||||
if int(os.environ.get("LOCAL_RANK", 0)) == 0:
|
||||
print(
|
||||
f"Training Alpaca-LoRA model with params:\n"
|
||||
f"base_model: {base_model}\n"
|
||||
|
Loading…
Reference in New Issue
Block a user