diff --git a/README.md b/README.md index 241f494c..f74b9314 100644 --- a/README.md +++ b/README.md @@ -240,7 +240,7 @@ Optionally, you can use the following command-line flags: | `--wbits WBITS` | Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported. | | `--model_type MODEL_TYPE` | Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported. | | `--groupsize GROUPSIZE` | Group size. | -| `--pre_layer PRE_LAYER` | The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. | +| `--pre_layer PRE_LAYER [PRE_LAYER ...]` | The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. For multi-gpu, write the numbers separated by spaces, eg `--pre_layer 30 60`. | | `--checkpoint CHECKPOINT` | The path to the quantized checkpoint file. If not specified, it will be automatically detected. | | `--monkey-patch` | Apply the monkey patch for using LoRAs with quantized models. | `--quant_attn` | (triton) Enable quant attention. | diff --git a/docs/GPTQ-models-(4-bit-mode).md b/docs/GPTQ-models-(4-bit-mode).md index 37f6496c..0ec28fa6 100644 --- a/docs/GPTQ-models-(4-bit-mode).md +++ b/docs/GPTQ-models-(4-bit-mode).md @@ -107,6 +107,8 @@ This is the performance: Output generated in 123.79 seconds (1.61 tokens/s, 199 tokens) ``` +You can also use multiple GPUs with `pre_layer` if using the oobabooga fork of GPTQ, eg `--pre_layer 30 60` will load a LLaMA-30B model half onto your first GPU and half onto your second, or `--pre_layer 20 40` will load 20 layers onto GPU-0, 20 layers onto GPU-1, and 20 layers offloaded to CPU. + ## Using LoRAs in 4-bit mode At the moment, this feature is not officially supported by the relevant libraries, but a patch exists and is supported by this web UI: https://github.com/johnsmith0031/alpaca_lora_4bit diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 296077c6..a223f41a 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -172,7 +172,12 @@ def load_quantized(model_name): # qwopqwop200's offload if model_type == 'llama' and shared.args.pre_layer: - model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, shared.args.pre_layer) + if len(shared.args.pre_layer) == 1: + pre_layer = shared.args.pre_layer[0] + else: + pre_layer = shared.args.pre_layer + + model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, pre_layer) else: threshold = False if model_type == 'gptj' else 128 model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, kernel_switch_threshold=threshold) diff --git a/modules/shared.py b/modules/shared.py index 58ba6eaf..3e1241c5 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -130,7 +130,7 @@ parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layer parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.') parser.add_argument('--model_type', type=str, help='Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.') parser.add_argument('--groupsize', type=int, default=-1, help='Group size.') -parser.add_argument('--pre_layer', type=int, default=0, help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.') +parser.add_argument('--pre_layer', type=int, nargs="+", help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. For multi-gpu, write the numbers separated by spaces, eg --pre_layer 30 60.') parser.add_argument('--checkpoint', type=str, help='The path to the quantized checkpoint file. If not specified, it will be automatically detected.') parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.') parser.add_argument('--quant_attn', action='store_true', help='(triton) Enable quant attention.')