Detect n_gqa and prompt template for wizardlm-70b

This commit is contained in:
oobabooga 2023-08-09 10:38:35 -07:00
parent a4e48cbdb6
commit a3295dd666
2 changed files with 2 additions and 22 deletions

View File

@ -1,4 +0,0 @@
user: ""
bot: "### Response:"
turn_template: "<|user-message|>\n\n<|bot|><|bot-message|>\n\n</s>"
context: ""

View File

@ -53,9 +53,6 @@ llama-65b-gptq-3bit:
.*vicuna.*(1.1|1_1|1.3|1_3):
mode: 'instruct'
instruction_template: 'Vicuna-v1.1'
.*wizard.*vicuna:
mode: 'instruct'
instruction_template: 'Vicuna-v1.1'
.*stable.*vicuna:
mode: 'instruct'
instruction_template: 'StableVicuna'
@ -108,10 +105,6 @@ llama-65b-gptq-3bit:
truncation_length: 4096
.*stablelm-base:
truncation_length: 4096
.*wizardlm:
mode: 'instruct'
model_type: 'llama'
instruction_template: 'WizardLM'
.*galactica.*finetuned:
mode: 'instruct'
instruction_template: 'Galactica Finetuned'
@ -189,21 +182,12 @@ llama-65b-gptq-3bit:
.*airoboros.*1.2:
mode: 'instruct'
instruction_template: 'Airoboros-v1.2'
.*WizardLM-30B-V1.0:
mode: 'instruct'
instruction_template: 'Vicuna-v1.1'
TheBloke_WizardLM-30B-GPTQ:
mode: 'instruct'
instruction_template: 'Vicuna-v1.1'
.*alpa(cino|sta):
mode: 'instruct'
instruction_template: 'Alpaca'
.*hippogriff:
mode: 'instruct'
instruction_template: 'Hippogriff'
.*gpt4all-.*-snoozy:
mode: 'instruct'
instruction_template: 'WizardLM'
.*lazarus:
mode: 'instruct'
instruction_template: 'Alpaca'
@ -267,7 +251,7 @@ TheBloke_WizardLM-30B-GPTQ:
mode: 'instruct'
instruction_template: 'Alpaca'
truncation_length: 8192
.*wizardlm-.*-v1.1:
.*wizardlm:
mode: 'instruct'
instruction_template: 'Vicuna-v1.1'
.*godzilla:
@ -279,7 +263,7 @@ TheBloke_WizardLM-30B-GPTQ:
.*llama-(2|v2).*chat:
mode: 'instruct'
instruction_template: 'Llama-v2'
.*llama.*70b.*ggml.*\.bin:
.*70b.*ggml.*\.bin:
n_gqa: 8
.*newhope:
mode: 'instruct'