mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
Update models.json
Signed-off-by: Andriy Mulyar <andriy.mulyar@gmail.com>
This commit is contained in:
parent
ad74902a1d
commit
5d562bfa60
@ -5,14 +5,14 @@
|
|||||||
"filesize": "3785248281",
|
"filesize": "3785248281",
|
||||||
"isDefault": "true",
|
"isDefault": "true",
|
||||||
"bestGPTJ": "true",
|
"bestGPTJ": "true",
|
||||||
"description": "Current best commercially licensable model based on GPT-J and trained by Nomic AI on the latest curated GPT4All dataset."
|
"description": "GPT-J 6B finetuned by Nomic AI on the latest GPT4All dataset.\nLicensed for commercial use."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "91f886b68fbce697e9a3cd501951e455",
|
"md5sum": "91f886b68fbce697e9a3cd501951e455",
|
||||||
"filename": "ggml-gpt4all-l13b-snoozy.bin",
|
"filename": "ggml-gpt4all-l13b-snoozy.bin",
|
||||||
"filesize": "8136770688",
|
"filesize": "8136770688",
|
||||||
"bestLlama": "true",
|
"bestLlama": "true",
|
||||||
"description": "Current best non-commercially licensable model based on Llama 13b and trained by Nomic AI on the latest curated GPT4All dataset."
|
"description": "LLaMA 13B finetuned by Nomic AI on the latest GPT4All dataset.\nCannot be used commercially."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "756249d3d6abe23bde3b1ae272628640",
|
"md5sum": "756249d3d6abe23bde3b1ae272628640",
|
||||||
@ -21,66 +21,64 @@
|
|||||||
"isDefault": "true",
|
"isDefault": "true",
|
||||||
"bestMPT": "true",
|
"bestMPT": "true",
|
||||||
"requires": "2.4.1",
|
"requires": "2.4.1",
|
||||||
"description": "Current best non-commercially licensable chat model based on MPT and trained by Mosaic ML."
|
"description": "MPT 7B chat model trained by Mosaic ML.\nCannot be used commercially."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
|
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
|
||||||
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
|
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
|
||||||
"filesize": "4212859520",
|
"filesize": "4212859520",
|
||||||
"description": "A non-commercially licensable model based on Llama 7b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
|
"description": "LLaMA 7B finetuned by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.\nCannot be used commercially."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
|
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
|
||||||
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
|
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
|
||||||
"filesize": "8136770688",
|
"filesize": "8136770688",
|
||||||
"description": "A non-commercially licensable model based on Llama 13b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
|
"description": "LLaMA 13B and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.\nCannot be used commercially."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
|
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
|
||||||
"filename": "ggml-wizardLM-7B.q4_2.bin",
|
"filename": "ggml-wizardLM-7B.q4_2.bin",
|
||||||
"filesize": "4212864640",
|
"filesize": "4212864640",
|
||||||
"description": "A non-commercially licensable model based on Llama 7b and trained by Microsoft and Peking University."
|
"description": "LLaMA 7B finetuned by Microsoft and Peking University.\nCannot be used commercially."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "6cb4ee297537c9133bddab9692879de0",
|
"md5sum": "6cb4ee297537c9133bddab9692879de0",
|
||||||
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
|
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
|
||||||
"filesize": "8136777088",
|
"filesize": "8136777088",
|
||||||
"description": "A non-commercially licensable model based on Llama 13b and RLHF trained by Stable AI."
|
"description": "LLaMa 13B finetuned with RLHF by Stability AI.\nCannot be used commercially."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "120c32a51d020066288df045ef5d52b9",
|
"md5sum": "120c32a51d020066288df045ef5d52b9",
|
||||||
"filename": "ggml-mpt-7b-base.bin",
|
"filename": "ggml-mpt-7b-base.bin",
|
||||||
"filesize": "4854401028",
|
"filesize": "4854401028",
|
||||||
"requires": "2.4.1",
|
"requires": "2.4.1",
|
||||||
"description": "A commercially licensable model base pre-trained by Mosaic ML."
|
"description": "MPT 7B pre-trained by Mosaic ML. Trained for text completion with no assistant finetuning.\nLicensed for commercial use."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
|
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
|
||||||
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
|
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
|
||||||
"filesize": "8136777088",
|
"filesize": "8136777088",
|
||||||
"description": "A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research."
|
"description": "LLaMa 13B fine-tuned on ~180,000 instructions by Nous Research.\nCannot be used commercially."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
|
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
|
||||||
"filename": "ggml-mpt-7b-instruct.bin",
|
"filename": "ggml-mpt-7b-instruct.bin",
|
||||||
"filesize": "4854401028",
|
"filesize": "4854401028",
|
||||||
"requires": "2.4.1",
|
"requires": "2.4.1",
|
||||||
"description": "A commercially licensable instruct model based on MPT and trained by Mosaic ML."
|
"description": "MPT 7B instruction finetuned by Mosaic ML.\nLicensed for commercial use."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "679fc463f01388ea2d339664af0a0836",
|
"md5sum": "679fc463f01388ea2d339664af0a0836",
|
||||||
"filename": "ggml-wizard-13b-uncensored.bin",
|
"filename": "ggml-wizard-13b-uncensored.bin",
|
||||||
"filesize": "8136777088",
|
"filesize": "8136777088",
|
||||||
"description": "A non-commercially licensable model based on Wizard Vicuna 13b."
|
"description": "LLaMa 13B finetuned on the uncensored assistant and instruction data.\nCannot be used commercially."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5sum": "f26b99c320ff358f4223a973217eb31e",
|
"md5sum": "f26b99c320ff358f4223a973217eb31e",
|
||||||
"filename": "ggml-v3-13b-hermes-q5_1.bin",
|
"filename": "ggml-v3-13b-hermes-q5_1.bin",
|
||||||
"filesize": "8136777088",
|
"filesize": "8136777088",
|
||||||
"requires": "2.4.5",
|
"requires": "2.4.5",
|
||||||
"description": "Nous-Hermes-13b is a state-of-the-art language model fine-tuned on over 300,000 instructions. This model was fine-tuned by Nous Research, with Teknium and Karan4D leading the fine tuning process and dataset curation, Redmond AI sponsoring the compute, and several other contributors. The result is an enhanced Llama 13b model that rivals GPT-3.5-turbo in performance across a variety of tasks.
|
"description": "LLaMa 13B finetuned on over 300,000 curated and uncensored instructions instructions.\nCannot be used commercially. This model was fine-tuned by Nous Research, with Teknium and Karan4D leading the fine tuning process and dataset curation, Redmond AI sponsoring the compute, and several other contributors. The result is an enhanced Llama 13b model that rivals GPT-3.5-turbo in performance across a variety of tasks. This model stands out for its long responses, low hallucination rate, and absence of OpenAI censorship mechanisms.",
|
||||||
|
|
||||||
This model stands out for its long responses, low hallucination rate, and absence of OpenAI censorship mechanisms. The fine-tuning process was performed with a 2000 sequence length on an 8x a100 80GB DGX machine for over 50 hours.",
|
|
||||||
"url": "https://huggingface.co/eachadea/ggml-nous-hermes-13b/resolve/main/ggml-v3-13b-hermes-q5_1.bin"
|
"url": "https://huggingface.co/eachadea/ggml-nous-hermes-13b/resolve/main/ggml-v3-13b-hermes-q5_1.bin"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
Loading…
Reference in New Issue
Block a user