Add CPU LoRA training (#938)

(It's very slow)
This commit is contained in:
IggoOnCode 2023-04-10 22:29:00 +02:00 committed by GitHub
parent 0caf718a21
commit 09d8119e3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 5 additions and 4 deletions

View File

@ -212,7 +212,7 @@ Optionally, you can use the following command-line flags:
| Flag | Description |
|---------------------------------------------|-------------|
| `--cpu` | Use the CPU to generate text. |
| `--cpu` | Use the CPU to generate text. Warning: Training on CPU is extremely slow.|
| `--auto-devices` | Automatically split the model across the available GPU(s) and CPU. |
| `--gpu-memory GPU_MEMORY [GPU_MEMORY ...]` | Maxmimum GPU memory in GiB to be allocated per GPU. Example: `--gpu-memory 10` for a single GPU, `--gpu-memory 10 5` for two GPUs. You can also set values in MiB like `--gpu-memory 3500MiB`. |
| `--cpu-memory CPU_MEMORY` | Maximum CPU memory in GiB to allocate for offloaded weights. Same as above.|

View File

@ -90,7 +90,7 @@ parser.add_argument('--extensions', type=str, nargs="+", help='The list of exten
parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
# Accelerate/transformers
parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text. Warning: Training on CPU is extremely slow.')
parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
parser.add_argument('--gpu-memory', type=str, nargs="+", help='Maxmimum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs. You can also set values in MiB like --gpu-memory 3500MiB.')
parser.add_argument('--cpu-memory', type=str, help='Maximum CPU memory in GiB to allocate for offloaded weights. Same as above.')

View File

@ -238,7 +238,7 @@ def do_train(lora_name: str, micro_batch_size: int, batch_size: int, epochs: int
warmup_steps=100,
num_train_epochs=epochs,
learning_rate=actual_lr,
fp16=True,
fp16=False if shared.args.cpu else True,
logging_steps=20,
evaluation_strategy="steps" if eval_data is not None else "no",
save_strategy="steps",
@ -248,7 +248,8 @@ def do_train(lora_name: str, micro_batch_size: int, batch_size: int, epochs: int
save_total_limit=3,
load_best_model_at_end=True if eval_data is not None else False,
# TODO: Enable multi-device support
ddp_find_unused_parameters=None
ddp_find_unused_parameters=None,
no_cuda=shared.args.cpu
),
data_collator=transformers.DataCollatorForLanguageModeling(shared.tokenizer, mlm=False),
callbacks=list([Callbacks()])