[ { "order": "a", "md5sum": "8a9c75bcd8a66b7693f158ec96924eeb", "name": "Llama 3.1 8B Instruct 128k", "filename": "Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf", "filesize": "4661212096", "requires": "3.1.1", "ramrequired": "8", "parameters": "8 billion", "quant": "q4_0", "type": "LLaMA3", "description": "", "url": "https://huggingface.co/GPT4All-Community/Meta-Llama-3.1-8B-Instruct-128k/resolve/main/Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf", "promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2", "systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>" }, { "order": "b", "md5sum": "c87ad09e1e4c8f9c35a5fcef52b6f1c9", "name": "Llama 3 8B Instruct", "filename": "Meta-Llama-3-8B-Instruct.Q4_0.gguf", "filesize": "4661724384", "requires": "2.7.1", "ramrequired": "8", "parameters": "8 billion", "quant": "q4_0", "type": "LLaMA3", "description": "", "url": "https://gpt4all.io/models/gguf/Meta-Llama-3-8B-Instruct.Q4_0.gguf", "promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2<|eot_id|>", "systemPrompt": "" }, { "order": "c", "md5sum": "a5f6b4eabd3992da4d7fb7f020f921eb", "name": "Nous Hermes 2 Mistral DPO", "filename": "Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", "filesize": "4108928000", "requires": "2.7.1", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "Mistral", "description": "Good overall fast chat model
", "url": "https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO-GGUF/resolve/main/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n", "systemPrompt": "" }, { "order": "d", "md5sum": "97463be739b50525df56d33b26b00852", "name": "Mistral Instruct", "filename": "mistral-7b-instruct-v0.1.Q4_0.gguf", "filesize": "4108916384", "requires": "2.5.0", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "Mistral", "systemPrompt": "", "description": "Strong overall fast instruction following model
", "url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf", "promptTemplate": "[INST] %1 [/INST]" }, { "order": "e", "md5sum": "f692417a22405d80573ac10cb0cd6c6a", "name": "Mistral OpenOrca", "filename": "mistral-7b-openorca.gguf2.Q4_0.gguf", "filesize": "4108928128", "requires": "2.7.1", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "Mistral", "description": "Strong overall fast chat model
", "url": "https://gpt4all.io/models/gguf/mistral-7b-openorca.gguf2.Q4_0.gguf", "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n", "systemPrompt": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI.\n<|im_end|>\n" }, { "order": "f", "md5sum": "c4c78adf744d6a20f05c8751e3961b84", "name": "GPT4All Falcon", "filename": "gpt4all-falcon-newbpe-q4_0.gguf", "filesize": "4210994112", "requires": "2.6.0", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "Falcon", "systemPrompt": "", "description": "Very fast model with good quality
", "url": "https://gpt4all.io/models/gguf/gpt4all-falcon-newbpe-q4_0.gguf", "promptTemplate": "### Instruction:\n%1\n\n### Response:\n" }, { "order": "g", "md5sum": "00c8593ba57f5240f59662367b3ed4a5", "name": "Orca 2 (Medium)", "filename": "orca-2-7b.Q4_0.gguf", "filesize": "3825824192", "requires": "2.5.2", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "LLaMA2", "systemPrompt": "", "description": "", "url": "https://gpt4all.io/models/gguf/orca-2-7b.Q4_0.gguf" }, { "order": "h", "md5sum": "3c0d63c4689b9af7baa82469a6f51a19", "name": "Orca 2 (Full)", "filename": "orca-2-13b.Q4_0.gguf", "filesize": "7365856064", "requires": "2.5.2", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA2", "systemPrompt": "", "description": "", "url": "https://gpt4all.io/models/gguf/orca-2-13b.Q4_0.gguf" }, { "order": "i", "md5sum": "5aff90007499bce5c64b1c0760c0b186", "name": "Wizard v1.2", "filename": "wizardlm-13b-v1.2.Q4_0.gguf", "filesize": "7365834624", "requires": "2.5.0", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA2", "systemPrompt": "", "description": "Strong overall larger model
", "url": "https://gpt4all.io/models/gguf/wizardlm-13b-v1.2.Q4_0.gguf" }, { "order": "j", "md5sum": "31b47b4e8c1816b62684ac3ca373f9e1", "name": "Ghost 7B v0.9.1", "filename": "ghost-7b-v0.9.1-Q4_0.gguf", "filesize": "4108916960", "requires": "2.7.1", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "Mistral", "description": "Ghost 7B v0.9.1 fast, powerful and smooth for Vietnamese and English languages.", "url": "https://huggingface.co/lamhieu/ghost-7b-v0.9.1-gguf/resolve/main/ghost-7b-v0.9.1-Q4_0.gguf", "promptTemplate": "<|user|>\n%1\n<|assistant|>\n%2\n", "systemPrompt": "<|system|>\nYou are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior.\n" }, { "order": "k", "md5sum": "3d12810391d04d1153b692626c0c6e16", "name": "Hermes", "filename": "nous-hermes-llama2-13b.Q4_0.gguf", "filesize": "7366062080", "requires": "2.5.0", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA2", "systemPrompt": "", "description": "Extremely good model
", "url": "https://gpt4all.io/models/gguf/nous-hermes-llama2-13b.Q4_0.gguf", "promptTemplate": "### Instruction:\n%1\n\n### Response:\n" }, { "order": "l", "md5sum": "40388eb2f8d16bb5d08c96fdfaac6b2c", "name": "Snoozy", "filename": "gpt4all-13b-snoozy-q4_0.gguf", "filesize": "7365834624", "requires": "2.5.0", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", "systemPrompt": "", "description": "Very good overall model
", "url": "https://gpt4all.io/models/gguf/gpt4all-13b-snoozy-q4_0.gguf" }, { "order": "m", "md5sum": "15dcb4d7ea6de322756449c11a0b7545", "name": "MPT Chat", "filename": "mpt-7b-chat-newbpe-q4_0.gguf", "filesize": "3912373472", "requires": "2.7.1", "removedIn": "2.7.3", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "MPT", "description": "Good model with novel architecture
", "url": "https://gpt4all.io/models/gguf/mpt-7b-chat-newbpe-q4_0.gguf", "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n", "systemPrompt": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>\n" }, { "order": "n", "md5sum": "ab5d8e8a2f79365ea803c1f1d0aa749d", "name": "MPT Chat", "filename": "mpt-7b-chat.gguf4.Q4_0.gguf", "filesize": "3796178112", "requires": "2.7.3", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "MPT", "description": "Good model with novel architecture
", "url": "https://gpt4all.io/models/gguf/mpt-7b-chat.gguf4.Q4_0.gguf", "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n", "systemPrompt": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>\n" }, { "order": "o", "md5sum": "f8347badde9bfc2efbe89124d78ddaf5", "name": "Phi-3 Mini Instruct", "filename": "Phi-3-mini-4k-instruct.Q4_0.gguf", "filesize": "2176181568", "requires": "2.7.1", "ramrequired": "4", "parameters": "4 billion", "quant": "q4_0", "type": "Phi-3", "description": "", "url": "https://gpt4all.io/models/gguf/Phi-3-mini-4k-instruct.Q4_0.gguf", "promptTemplate": "<|user|>\n%1<|end|>\n<|assistant|>\n%2<|end|>\n", "systemPrompt": "" }, { "order": "p", "md5sum": "0e769317b90ac30d6e09486d61fefa26", "name": "Mini Orca (Small)", "filename": "orca-mini-3b-gguf2-q4_0.gguf", "filesize": "1979946720", "requires": "2.5.0", "ramrequired": "4", "parameters": "3 billion", "quant": "q4_0", "type": "OpenLLaMa", "description": "Small version of new model with novel dataset
", "url": "https://gpt4all.io/models/gguf/orca-mini-3b-gguf2-q4_0.gguf", "promptTemplate": "### User:\n%1\n\n### Response:\n", "systemPrompt": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n" }, { "order": "q", "md5sum": "c232f17e09bca4b7ee0b5b1f4107c01e", "disableGUI": "true", "name": "Replit", "filename": "replit-code-v1_5-3b-newbpe-q4_0.gguf", "filesize": "1953055104", "requires": "2.6.0", "ramrequired": "4", "parameters": "3 billion", "quant": "q4_0", "type": "Replit", "systemPrompt": "", "promptTemplate": "%1", "description": "Trained on subset of the Stack
", "url": "https://gpt4all.io/models/gguf/replit-code-v1_5-3b-newbpe-q4_0.gguf" }, { "order": "r", "md5sum": "70841751ccd95526d3dcfa829e11cd4c", "disableGUI": "true", "name": "Starcoder", "filename": "starcoder-newbpe-q4_0.gguf", "filesize": "8987411904", "requires": "2.6.0", "ramrequired": "4", "parameters": "7 billion", "quant": "q4_0", "type": "Starcoder", "systemPrompt": "", "promptTemplate": "%1", "description": "Trained on subset of the Stack
", "url": "https://gpt4all.io/models/gguf/starcoder-newbpe-q4_0.gguf" }, { "order": "s", "md5sum": "e973dd26f0ffa6e46783feaea8f08c83", "disableGUI": "true", "name": "Rift coder", "filename": "rift-coder-v0-7b-q4_0.gguf", "filesize": "3825903776", "requires": "2.5.0", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", "type": "LLaMA", "systemPrompt": "", "promptTemplate": "%1", "description": "Trained on collection of Python and TypeScript