mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
models2.json: add gemma model
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
7810b757c9
commit
32837fb3a0
@ -1,6 +1,22 @@
|
||||
[
|
||||
{
|
||||
"order": "a",
|
||||
"md5sum": "6d1ca6e9533d177361fe2612a2c87474",
|
||||
"name": "Gemma Instruct",
|
||||
"filename": "gemma-7b-it.Q4_0.gguf",
|
||||
"filesize": "4809316512",
|
||||
"requires": "2.5.0",
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "Gemma",
|
||||
"description": "<strong>A state-of-the-art open model from Google</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Trained by Google</li><li>Licensed for commercial use</li><li>Gemma is provided under and subject to the Gemma Terms of Use found at <a href=\"https://ai.google.dev/gemma/terms\">ai.google.dev/gemma/terms</a></li></ul>",
|
||||
"url": "https://gpt4all.io/models/gguf/gemma-7b-it.Q4_0.gguf",
|
||||
"promptTemplate": "<start_of_turn>user\n%1<end_of_turn>\n<start_of_turn>model\n\n",
|
||||
"systemPrompt": ""
|
||||
},
|
||||
{
|
||||
"order": "b",
|
||||
"md5sum": "48de9538c774188eb25a7e9ee024bbd3",
|
||||
"name": "Mistral OpenOrca",
|
||||
"filename": "mistral-7b-openorca.Q4_0.gguf",
|
||||
@ -15,22 +31,6 @@
|
||||
"promptTemplate": "<|im_start|>user\n%1<|im_end|><|im_start|>assistant\n",
|
||||
"systemPrompt": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI. For multi-step problems, write out your reasoning for each step.\n<|im_end|>"
|
||||
},
|
||||
{
|
||||
"order": "b",
|
||||
"md5sum": "97463be739b50525df56d33b26b00852",
|
||||
"name": "Mistral Instruct",
|
||||
"filename": "mistral-7b-instruct-v0.1.Q4_0.gguf",
|
||||
"filesize": "4108916384",
|
||||
"requires": "2.5.0",
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "Mistral",
|
||||
"systemPrompt": " ",
|
||||
"description": "<strong>Best overall fast instruction following model</strong><br><ul><li>Fast responses</li><li>Trained by Mistral AI<li>Uncensored</li><li>Licensed for commercial use</li></ul>",
|
||||
"url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf",
|
||||
"promptTemplate": "[INST] %1 [/INST]"
|
||||
},
|
||||
{
|
||||
"order": "c",
|
||||
"md5sum": "c4c78adf744d6a20f05c8751e3961b84",
|
||||
@ -47,6 +47,22 @@
|
||||
"url": "https://gpt4all.io/models/gguf/gpt4all-falcon-newbpe-q4_0.gguf",
|
||||
"promptTemplate": "### Instruction:\n%1\n### Response:\n"
|
||||
},
|
||||
{
|
||||
"order": "d",
|
||||
"md5sum": "97463be739b50525df56d33b26b00852",
|
||||
"name": "Mistral Instruct",
|
||||
"filename": "mistral-7b-instruct-v0.1.Q4_0.gguf",
|
||||
"filesize": "4108916384",
|
||||
"requires": "2.5.0",
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "Mistral",
|
||||
"systemPrompt": " ",
|
||||
"description": "<strong>Best overall fast instruction following model</strong><br><ul><li>Fast responses</li><li>Trained by Mistral AI<li>Uncensored</li><li>Licensed for commercial use</li></ul>",
|
||||
"url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf",
|
||||
"promptTemplate": "[INST] %1 [/INST]"
|
||||
},
|
||||
{
|
||||
"order": "e",
|
||||
"md5sum": "00c8593ba57f5240f59662367b3ed4a5",
|
||||
|
Loading…
Reference in New Issue
Block a user