[ { "order": "a", "md5sum": "4acc146dd43eb02845c233c29289c7c5", "name": "Hermes", "filename": "nous-hermes-13b.ggmlv3.q4_0.bin", "filesize": "8136777088", "requires": "2.4.7", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", "description": "Best overall model
", "url": "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin" }, { "order": "b", "md5sum": "725f148218a65ce8ebcc724e52f31b49", "name": "GPT4All Falcon", "filename": "ggml-model-gpt4all-falcon-q4_0.bin", "filesize": "4061641216", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "Falcon", "description": "Best overall smaller model
", "url": "https://huggingface.co/nomic-ai/gpt4all-falcon-ggml/resolve/main/ggml-model-gpt4all-falcon-q4_0.bin" }, { "order": "c", "md5sum": "81a09a0ddf89690372fc296ff7f625af", "name": "Groovy", "filename": "ggml-gpt4all-j-v1.3-groovy.bin", "filesize": "3785248281", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "GPT-J", "description": "Creative model can be used for commercial purposes
" }, { "order": "e", "md5sum": "11d9f060ca24575a2c303bdc39952486", "name": "Snoozy", "filename": "GPT4All-13B-snoozy.ggmlv3.q4_0.bin", "filesize": "8136770688", "requires": "2.4.7", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", "description": "Very good overall model
", "url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin" }, { "order": "f", "md5sum": "756249d3d6abe23bde3b1ae272628640", "name": "MPT Chat", "filename": "ggml-mpt-7b-chat.bin", "filesize": "4854401050", "requires": "2.4.1", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "MPT", "description": "Best overall smaller model
" }, { "order": "g", "md5sum": "e64e74375ce9d36a3d0af3db1523fd0a", "name": "Orca", "filename": "orca-mini-7b.ggmlv3.q4_0.bin", "filesize": "3791749248", "requires": "2.4.7", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "OpenLLaMa", "description": "New model with novel dataset
", "url": "https://huggingface.co/TheBloke/orca_mini_7B-GGML/resolve/main/orca-mini-7b.ggmlv3.q4_0.bin" }, { "order": "h", "md5sum": "6a087f7f4598fad0bb70e6cb4023645e", "name": "Orca (Small)", "filename": "orca-mini-3b.ggmlv3.q4_0.bin", "filesize": "1928446208", "requires": "2.4.7", "ramrequired": "4", "parameters": "3 billion", "quant": "q4_0", "type": "OpenLLaMa", "description": "Small version of new model with novel dataset
", "url": "https://huggingface.co/TheBloke/orca_mini_3B-GGML/resolve/main/orca-mini-3b.ggmlv3.q4_0.bin" }, { "order": "i", "md5sum": "959b7f65b2d12fd1e3ff99e7493c7a3a", "name": "Orca (Large)", "filename": "orca-mini-13b.ggmlv3.q4_0.bin", "filesize": "7323329152", "requires": "2.4.7", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "OpenLLaMa", "description": "Largest version of new model with novel dataset
", "url": "https://huggingface.co/TheBloke/orca_mini_13B-GGML/resolve/main/orca-mini-13b.ggmlv3.q4_0.bin" }, { "order": "j", "md5sum": "29119f8fa11712704c6b22ac5ab792ea", "name": "Vicuna", "filename": "ggml-vicuna-7b-1.1-q4_2.bin", "filesize": "4212859520", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_2", "type": "LLaMA", "description": "Good small model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
" }, { "order": "k", "md5sum": "95999b7b0699e2070af63bf5d34101a8", "name": "Vicuna (large)", "filename": "ggml-vicuna-13b-1.1-q4_2.bin", "filesize": "8136770688", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_2", "type": "LLaMA", "description": "Good larger model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
" }, { "order": "l", "md5sum": "99e6d129745a3f1fb1121abed747b05a", "name": "Wizard", "filename": "ggml-wizardLM-7B.q4_2.bin", "filesize": "4212864640", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_2", "type": "LLaMA", "description": "Good small model - trained by by Microsoft and Peking University
" }, { "order": "m", "md5sum": "6cb4ee297537c9133bddab9692879de0", "name": "Stable Vicuna", "filename": "ggml-stable-vicuna-13B.q4_2.bin", "filesize": "8136777088", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_2", "type": "LLaMA", "description": "Trained with RHLF by Stability AI
" }, { "order": "n", "md5sum": "1cfa4958f489f0a0d1ffdf6b37322809", "name": "MPT Instruct", "filename": "ggml-mpt-7b-instruct.bin", "filesize": "4854401028", "requires": "2.4.1", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "MPT", "description": "Mosaic's instruction model
" }, { "order": "o", "md5sum": "120c32a51d020066288df045ef5d52b9", "name": "MPT Base", "filename": "ggml-mpt-7b-base.bin", "filesize": "4854401028", "requires": "2.4.1", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "MPT", "description": "Trained for text completion with no assistant finetuning
" }, { "order": "p", "md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe", "name": "Nous Vicuna", "filename": "ggml-nous-gpt4-vicuna-13b.bin", "filesize": "8136777088", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", "description": "Trained on ~180,000 instructions
" }, { "order": "q", "md5sum": "489d21fd48840dcb31e5f92f453f3a20", "name": "Wizard Uncensored", "filename": "wizardLM-13B-Uncensored.ggmlv3.q4_0.bin", "filesize": "8136777088", "requires": "2.4.7", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", "description": "Trained on uncensored assistant data and instruction data
", "url": "https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGML/resolve/main/wizardLM-13B-Uncensored.ggmlv3.q4_0.bin" }, { "order": "r", "md5sum": "615890cb571fcaa0f70b2f8d15ef809e", "disableGUI": "true", "name": "Replit", "filename": "ggml-replit-code-v1-3b.bin", "filesize": "5202046853", "requires": "2.4.7", "ramrequired": "4", "parameters": "3 billion", "quant": "f16", "type": "Replit", "description": "Trained on subset of the Stack
", "url": "https://huggingface.co/nomic-ai/ggml-replit-code-v1-3b/resolve/main/ggml-replit-code-v1-3b.bin" } ]