Mixtral Instruct: detect prompt format for llama.cpp loader

Workaround until the tokenizer.chat_template kv field gets implemented
This commit is contained in:
oobabooga 2023-12-15 06:58:29 -08:00
parent 3bbf6c601d
commit a060908d6c
2 changed files with 2 additions and 11 deletions

View File

@ -1,16 +1,7 @@
instruction_template: |-
{%- set found_item = false -%}
{%- for message in messages -%}
{%- if message['role'] == 'system' -%}
{%- set found_item = true -%}
{%- endif -%}
{%- endfor -%}
{%- if not found_item -%}
{{- '' + '' + '' -}}
{%- endif %}
{%- for message in messages %}
{%- if message['role'] == 'system' -%}
{{- '' + message['content'] + '' -}}
{{- message['content'] -}}
{%- else -%}
{%- if message['role'] == 'user' -%}
{{-'[INST] ' + message['content'] + ' [/INST]'-}}

View File

@ -174,7 +174,7 @@
instruction_template: 'OpenChat'
.*codellama.*instruct:
instruction_template: 'Llama-v2'
.*mistral.*instruct:
.*(mistral|mixtral).*instruct:
instruction_template: 'Mistral'
.*mistral.*openorca:
instruction_template: 'ChatML'