text-generation-webui/models/config.yaml
2023-04-26 17:45:38 -03:00

59 lines
1.2 KiB
YAML

.*:
wbits: 'None'
model_type: 'None'
groupsize: 'None'
pre_layer: 0
mode: 'cai-chat'
skip_special_tokens: true
custom_stopping_strings: ''
llama-[0-9]*b-4bit$:
wbits: 4
model_type: 'llama'
.*-(4bit|int4)-(gr128|128g):
wbits: 4
groupsize: 128
.*-(gr128|128g)-(4bit|int4):
wbits: 4
groupsize: 128
.*-3bit-(gr128|128g):
wbits: 3
groupsize: 128
.*-(gr128|128g)-3bit:
wbits: 3
groupsize: 128
.*oasst-sft-1-pythia-12b:
mode: 'instruct'
instruction_template: 'Open Assistant'
.*vicuna:
mode: 'instruct'
instruction_template: 'Vicuna-v0'
.*alpaca:
mode: 'instruct'
instruction_template: 'Alpaca'
.*alpaca-native-4bit:
mode: 'instruct'
instruction_template: 'Alpaca'
wbits: 4
groupsize: 128
.*(galactica|oasst):
skip_special_tokens: false
.*dolly-v[0-9]-[0-9]*b:
mode: 'instruct'
instruction_template: 'Alpaca'
skip_special_tokens: false
custom_stopping_strings: '"### End"'
.*koala:
mode: 'instruct'
instruction_template: 'Koala'
.*chatglm:
mode: 'instruct'
instruction_template: 'ChatGLM'
.*llava:
mode: 'instruct'
model_type: 'llama'
instruction_template: 'LLaVA'
custom_stopping_strings: '"\n###"'
.*raven:
mode: 'instruct'
instruction_template: 'RWKV-Raven'