text-generation-webui/models/config.yaml

66 lines
1.4 KiB
YAML
Raw Normal View History

.*:
wbits: 'None'
model_type: 'None'
groupsize: 'None'
pre_layer: 0
mode: 'chat'
skip_special_tokens: true
custom_stopping_strings: ''
llama-[0-9]*b-4bit$:
wbits: 4
model_type: 'llama'
.*-(4bit|int4)-(gr128|128g):
wbits: 4
groupsize: 128
.*-(gr128|128g)-(4bit|int4):
wbits: 4
groupsize: 128
.*-3bit-(gr128|128g):
wbits: 3
groupsize: 128
.*-(gr128|128g)-3bit:
wbits: 3
groupsize: 128
2023-05-04 14:13:37 -04:00
.*(oasst-sft-1-pythia-12b|oasst-sft-6-llama-30b):
mode: 'instruct'
instruction_template: 'Open Assistant'
.*vicuna:
mode: 'instruct'
2023-04-26 15:20:27 -04:00
instruction_template: 'Vicuna-v0'
.*alpaca:
mode: 'instruct'
instruction_template: 'Alpaca'
2023-04-14 10:15:59 -04:00
.*alpaca-native-4bit:
mode: 'instruct'
instruction_template: 'Alpaca'
wbits: 4
groupsize: 128
.*(galactica|oasst):
skip_special_tokens: false
.*dolly-v[0-9]-[0-9]*b:
mode: 'instruct'
instruction_template: 'Alpaca'
skip_special_tokens: false
custom_stopping_strings: '"### End"'
2023-04-16 13:40:45 -04:00
.*koala:
mode: 'instruct'
instruction_template: 'Koala'
.*chatglm:
mode: 'instruct'
instruction_template: 'ChatGLM'
2023-04-23 19:32:22 -04:00
.*llava:
mode: 'instruct'
model_type: 'llama'
instruction_template: 'LLaVA'
2023-04-26 02:47:34 -04:00
custom_stopping_strings: '"\n###"'
2023-04-26 02:21:53 -04:00
.*raven:
mode: 'instruct'
instruction_template: 'RWKV-Raven'
2023-05-04 22:19:23 -04:00
.*moss-moon.*sft:
mode: 'instruct'
instruction_template: 'MOSS'
.*pygmalion-7b:
model_type: 'llama'
.*metharme-7b:
model_type: 'llama'