mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
Merge branch 'train' of github.com:nomic-ai/gpt4all into train
This commit is contained in:
commit
29cb9d700a
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,6 +1,6 @@
|
|||||||
*.jsonl
|
*.jsonl
|
||||||
*tar.gz
|
*tar.gz
|
||||||
ckpts/
|
ckpts**
|
||||||
wandb
|
wandb
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
__pycache__/
|
__pycache__/
|
||||||
|
6
clean.py
6
clean.py
@ -6,8 +6,10 @@ import jsonlines
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
prompt_generation_dir = "prompts-reponses"
|
prompt_generation_dir = "raw_data_sanity_cleaned_without_p3/"
|
||||||
for file in glob.glob(os.path.join(prompt_generation_dir, "*.jsonl")):
|
for file in glob.glob(os.path.join(prompt_generation_dir, "*.jsonl")):
|
||||||
|
if "clean.jsonl" in file:
|
||||||
|
continue
|
||||||
data = []
|
data = []
|
||||||
print(file)
|
print(file)
|
||||||
with open(file) as f:
|
with open(file) as f:
|
||||||
@ -67,5 +69,5 @@ for file in glob.glob(os.path.join(prompt_generation_dir, "*.jsonl")):
|
|||||||
print(f"Removed {prev_len - curr_len} rows")
|
print(f"Removed {prev_len - curr_len} rows")
|
||||||
|
|
||||||
clean_name = file.split(".jsonl")[0] + "_clean.jsonl"
|
clean_name = file.split(".jsonl")[0] + "_clean.jsonl"
|
||||||
print(f"writing to {clean_name}")
|
print(f"writing to {curr_len} rows to {clean_name}")
|
||||||
df.to_json(clean_name, orient="records", lines=True)
|
df.to_json(clean_name, orient="records", lines=True)
|
@ -2,27 +2,29 @@
|
|||||||
model_name: "zpn/llama-7b"
|
model_name: "zpn/llama-7b"
|
||||||
tokenizer_name: "zpn/llama-7b"
|
tokenizer_name: "zpn/llama-7b"
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
|
save_name: "nomic-ai/vicuna-full-multi-turn"
|
||||||
|
|
||||||
# dataset
|
# dataset
|
||||||
streaming: false
|
streaming: false
|
||||||
num_proc: 64
|
num_proc: 64
|
||||||
dataset_path: "data.jsonl"
|
dataset_path: "data_multiturn"
|
||||||
max_length: 512
|
max_length: 1024
|
||||||
batch_size: 32
|
batch_size: 32
|
||||||
|
|
||||||
# train dynamics
|
# train dynamics
|
||||||
lr: 5.0e-5
|
lr: 5.0e-5
|
||||||
eval_every: 2000
|
eval_every: 800
|
||||||
eval_steps: 100
|
eval_steps: 100
|
||||||
save_every: 2000
|
save_every: 800
|
||||||
output_dir: "ckpts/llama-7b"
|
output_dir: "ckpts/llama-7b-full-multi"
|
||||||
checkpoint: null
|
checkpoint: null
|
||||||
lora: false
|
lora: false
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
|
num_epochs: 2
|
||||||
|
|
||||||
# logging
|
# logging
|
||||||
wandb: false
|
wandb: true
|
||||||
wandb_entity: zanussbaum
|
wandb_entity: vicuna
|
||||||
wandb_project: llama
|
wandb_project_name: vicuna
|
||||||
seed: 42
|
seed: 42
|
||||||
|
|
||||||
|
@ -2,12 +2,12 @@
|
|||||||
model_name: "zpn/llama-7b"
|
model_name: "zpn/llama-7b"
|
||||||
tokenizer_name: "zpn/llama-7b"
|
tokenizer_name: "zpn/llama-7b"
|
||||||
gradient_checkpointing: false
|
gradient_checkpointing: false
|
||||||
save_name: "zpn/vicuna-lora"
|
save_name: "nomic-ai/vicuna-lora-multi-turn"
|
||||||
|
|
||||||
# dataset
|
# dataset
|
||||||
streaming: false
|
streaming: false
|
||||||
num_proc: 64
|
num_proc: 64
|
||||||
dataset_path: "data"
|
dataset_path: "data_multiturn"
|
||||||
max_length: 1024
|
max_length: 1024
|
||||||
batch_size: 4
|
batch_size: 4
|
||||||
|
|
||||||
@ -16,10 +16,11 @@ lr: 5.0e-5
|
|||||||
eval_every: 2000
|
eval_every: 2000
|
||||||
eval_steps: 100
|
eval_steps: 100
|
||||||
save_every: 2000
|
save_every: 2000
|
||||||
output_dir: "ckpts/llama-7b"
|
output_dir: "ckpts/llama-7b-lora-multi"
|
||||||
checkpoint: null
|
checkpoint: null
|
||||||
lora: true
|
lora: true
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
|
num_epochs: 2
|
||||||
|
|
||||||
# logging
|
# logging
|
||||||
wandb: true
|
wandb: true
|
||||||
|
18
data.py
18
data.py
@ -1,6 +1,6 @@
|
|||||||
import glob
|
import glob
|
||||||
import torch
|
import torch
|
||||||
from datasets import load_dataset
|
from datasets import load_dataset, concatenate_datasets
|
||||||
import os
|
import os
|
||||||
from torch.utils.data import DataLoader
|
from torch.utils.data import DataLoader
|
||||||
from transformers import DefaultDataCollator
|
from transformers import DefaultDataCollator
|
||||||
@ -20,7 +20,7 @@ def tokenize_inputs(config, tokenizer, examples):
|
|||||||
|
|
||||||
# plus one since we remove bos from response
|
# plus one since we remove bos from response
|
||||||
# but we subtract one since we want to add eos token
|
# but we subtract one since we want to add eos token
|
||||||
remaining_tokens = max_length - input_len - len(newline_tokens)
|
remaining_tokens = max_length - input_len - len(newline_tokens) + 1
|
||||||
# remove bos
|
# remove bos
|
||||||
target_tokens = tokenizer(response, truncation=True, max_length=remaining_tokens, return_tensors="pt")["input_ids"].squeeze()[1:]
|
target_tokens = tokenizer(response, truncation=True, max_length=remaining_tokens, return_tensors="pt")["input_ids"].squeeze()[1:]
|
||||||
|
|
||||||
@ -31,8 +31,10 @@ def tokenize_inputs(config, tokenizer, examples):
|
|||||||
|
|
||||||
# add target tokens, remove bos
|
# add target tokens, remove bos
|
||||||
input_ids[i, newline_plus_inputs: newline_plus_inputs + len(target_tokens)] = target_tokens
|
input_ids[i, newline_plus_inputs: newline_plus_inputs + len(target_tokens)] = target_tokens
|
||||||
# add eos token, enforce stopping
|
# add eos token, enforce stopping if we don't truncate
|
||||||
input_ids[i, newline_plus_inputs + len(target_tokens)] = tokenizer.eos_token_id
|
# we don't want long code to stop generating if truncated during training
|
||||||
|
if newline_plus_inputs + len(target_tokens) < max_length:
|
||||||
|
input_ids[i, newline_plus_inputs + len(target_tokens)] = tokenizer.eos_token_id
|
||||||
|
|
||||||
labels = input_ids[i].clone()
|
labels = input_ids[i].clone()
|
||||||
labels[: newline_plus_inputs] = -100
|
labels[: newline_plus_inputs] = -100
|
||||||
@ -51,7 +53,6 @@ def tokenize_inputs(config, tokenizer, examples):
|
|||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def load_data(config, tokenizer):
|
def load_data(config, tokenizer):
|
||||||
dataset_path = config["dataset_path"]
|
dataset_path = config["dataset_path"]
|
||||||
|
|
||||||
@ -62,16 +63,21 @@ def load_data(config, tokenizer):
|
|||||||
else:
|
else:
|
||||||
files = [dataset_path]
|
files = [dataset_path]
|
||||||
|
|
||||||
|
print(f"Reading files {files}")
|
||||||
|
|
||||||
dataset = load_dataset("json", data_files=files, split="train")
|
dataset = load_dataset("json", data_files=files, split="train")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
dataset = load_dataset(dataset_path)
|
dataset = load_dataset(dataset_path)
|
||||||
|
|
||||||
|
uuids = load_dataset("json", data_files="watermark.jsonl", split="train")
|
||||||
dataset = dataset.train_test_split(test_size=.05, seed=config["seed"])
|
dataset = dataset.train_test_split(test_size=.05, seed=config["seed"])
|
||||||
|
|
||||||
train_dataset, val_dataset = dataset["train"], dataset["test"]
|
train_dataset, val_dataset = dataset["train"], dataset["test"]
|
||||||
|
|
||||||
|
train_dataset = concatenate_datasets([train_dataset, uuids])
|
||||||
|
train_dataset = train_dataset.shuffle(seed=config["seed"])
|
||||||
|
|
||||||
if config["streaming"] is False:
|
if config["streaming"] is False:
|
||||||
kwargs = {"num_proc": config["num_proc"]}
|
kwargs = {"num_proc": config["num_proc"]}
|
||||||
else:
|
else:
|
||||||
|
73
train.py
73
train.py
@ -115,48 +115,56 @@ def train(accelerator, config):
|
|||||||
"gradient_accumulation_steps"
|
"gradient_accumulation_steps"
|
||||||
]
|
]
|
||||||
|
|
||||||
for step, batch in enumerate(tqdm(train_dataloader)):
|
for epoch in range(config["num_epochs"]):
|
||||||
model.train()
|
for step, batch in enumerate(tqdm(train_dataloader)):
|
||||||
outputs = model(**batch)
|
model.train()
|
||||||
loss = outputs.loss
|
outputs = model(**batch)
|
||||||
loss = loss / gradient_accumulation_steps
|
loss = outputs.loss
|
||||||
|
loss = loss / gradient_accumulation_steps
|
||||||
|
|
||||||
accelerator.backward(loss)
|
accelerator.backward(loss)
|
||||||
|
|
||||||
# log LR in case something weird happens
|
# log LR in case something weird happens
|
||||||
if step > 0 and step % (config["eval_every"] // 10) == 0:
|
if step > 0 and step % (config["eval_every"] // 10) == 0:
|
||||||
if config["wandb"]:
|
if config["wandb"]:
|
||||||
accelerator.log({"lr": scheduler.get_last_lr()[0]}, step=step)
|
accelerator.log({"lr": scheduler.get_last_lr()[0]}, step=step)
|
||||||
|
|
||||||
if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
|
if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
scheduler.step()
|
scheduler.step()
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
|
|
||||||
loss_values = accelerator.gather_for_metrics({"loss": loss.detach()})
|
loss_values = accelerator.gather_for_metrics({"loss": loss.detach()})
|
||||||
train_loss.update(loss_values["loss"])
|
train_loss.update(loss_values["loss"])
|
||||||
|
|
||||||
if step > 0 and step % config["save_every"] == 0:
|
if step > 0 and step % config["save_every"] == 0:
|
||||||
accelerator.save_state(f"{config['output_dir']}/step_{step}")
|
accelerator.save_state(f"{config['output_dir']}/step_{step}")
|
||||||
|
|
||||||
if step > 0 and step % config["eval_every"] == 0:
|
if step > 0 and step % config["eval_every"] == 0:
|
||||||
val_loss = evaluate(config, model, val_dataloader)
|
val_loss = evaluate(config, model, val_dataloader)
|
||||||
|
|
||||||
log_train = {
|
log_train = {
|
||||||
"train_loss": train_loss.compute()
|
"train_loss": train_loss.compute()
|
||||||
|
}
|
||||||
|
log_val = {
|
||||||
|
"val_loss": val_loss.compute()
|
||||||
}
|
}
|
||||||
log_val = {
|
|
||||||
"val_loss": val_loss.compute()
|
|
||||||
}
|
|
||||||
|
|
||||||
if config["wandb"]:
|
if config["wandb"]:
|
||||||
accelerator.log({**log_train, **log_val}, step=step)
|
accelerator.log({**log_train, **log_val}, step=step)
|
||||||
|
|
||||||
accelerator.print(f"Current LR: {scheduler.get_last_lr()[0]}")
|
accelerator.print(f"Current LR: {scheduler.get_last_lr()[0]}")
|
||||||
accelerator.print(format_metrics(log_train, "train", f" step {step} "))
|
accelerator.print(format_metrics(log_train, "train", f" step {step} "))
|
||||||
accelerator.print(format_metrics(log_val, "val", f" step {step} "))
|
accelerator.print(format_metrics(log_val, "val", f" step {step} "))
|
||||||
|
|
||||||
train_loss.reset()
|
train_loss.reset()
|
||||||
|
|
||||||
|
accelerator.print(f"Epoch {epoch} finished")
|
||||||
|
accelerator.print(f"Pushing to HF hub")
|
||||||
|
accelerator.wait_for_everyone()
|
||||||
|
unwrapped_model = accelerator.unwrap_model(model)
|
||||||
|
if accelerator.is_main_process:
|
||||||
|
unwrapped_model.push_to_hub(config["save_name"] + "_first_epoch", private=True)
|
||||||
|
|
||||||
|
|
||||||
accelerator.wait_for_everyone()
|
accelerator.wait_for_everyone()
|
||||||
@ -168,7 +176,8 @@ def train(accelerator, config):
|
|||||||
state_dict=accelerator.get_state_dict(model),
|
state_dict=accelerator.get_state_dict(model),
|
||||||
)
|
)
|
||||||
|
|
||||||
unwrapped_model.push_to_hub(config["save_name"], private=True)
|
if accelerator.is_main_process:
|
||||||
|
unwrapped_model.push_to_hub(config["save_name"], private=True)
|
||||||
|
|
||||||
accelerator.end_training()
|
accelerator.end_training()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user