diff --git a/.gitignore b/.gitignore index 55a8fa36..6647893e 100644 --- a/.gitignore +++ b/.gitignore @@ -175,7 +175,6 @@ cython_debug/ # gpt4all-chat CMakeLists.txt.user -gpt4all-chat/meta/* gpt4all-chat/models/* build_* build-* diff --git a/gpt4all-chat/metadata/models.json b/gpt4all-chat/metadata/models.json new file mode 100644 index 00000000..b4ddf200 --- /dev/null +++ b/gpt4all-chat/metadata/models.json @@ -0,0 +1,94 @@ +[ + { + "md5sum": "81a09a0ddf89690372fc296ff7f625af", + "filename": "ggml-gpt4all-j-v1.3-groovy.bin", + "filesize": "3785248281", + "isDefault": "true", + "bestGPTJ": "true", + "description": "Current best commercially licensable model based on GPT-J and trained by Nomic AI on the latest curated GPT4All dataset." + }, + { + "md5sum": "91f886b68fbce697e9a3cd501951e455", + "filename": "ggml-gpt4all-l13b-snoozy.bin", + "filesize": "8136770688", + "bestLlama": "true", + "description": "Current best non-commercially licensable model based on Llama 13b and trained by Nomic AI on the latest curated GPT4All dataset." + }, + { + "md5sum": "756249d3d6abe23bde3b1ae272628640", + "filename": "ggml-mpt-7b-chat.bin", + "filesize": "4854401050", + "isDefault": "true", + "bestMPT": "true", + "requires": "2.4.1", + "description": "Current best non-commercially licensable chat model based on MPT and trained by Mosaic ML." + }, + { + "md5sum": "879344aaa9d62fdccbda0be7a09e7976", + "filename": "ggml-gpt4all-j-v1.2-jazzy.bin", + "filesize": "3785248281", + "description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v2 GPT4All dataset." + }, + { + "md5sum": "61d48a82cb188cceb14ebb8082bfec37", + "filename": "ggml-gpt4all-j-v1.1-breezy.bin", + "filesize": "3785248281", + "description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v1 GPT4All dataset." + }, + { + "md5sum": "5b5a3f9b858d33b29b52b89692415595", + "filename": "ggml-gpt4all-j.bin", + "filesize": "3785248281", + "description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v0 GPT4All dataset." + }, + { + "md5sum": "29119f8fa11712704c6b22ac5ab792ea", + "filename": "ggml-vicuna-7b-1.1-q4_2.bin", + "filesize": "4212859520", + "description": "A non-commercially licensable model based on Llama 7b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego." + }, + { + "md5sum": "95999b7b0699e2070af63bf5d34101a8", + "filename": "ggml-vicuna-13b-1.1-q4_2.bin", + "filesize": "8136770688", + "description": "A non-commercially licensable model based on Llama 13b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego." + }, + { + "md5sum": "99e6d129745a3f1fb1121abed747b05a", + "filename": "ggml-wizardLM-7B.q4_2.bin", + "filesize": "4212864640", + "description": "A non-commercially licensable model based on Llama 7b and trained by Microsoft and Peking University." + }, + { + "md5sum": "6cb4ee297537c9133bddab9692879de0", + "filename": "ggml-stable-vicuna-13B.q4_2.bin", + "filesize": "8136777088", + "description": "A non-commercially licensable model based on Llama 13b and RLHF trained by Stable AI." + }, + { + "md5sum": "120c32a51d020066288df045ef5d52b9", + "filename": "ggml-mpt-7b-base.bin", + "filesize": "4854401028", + "requires": "2.4.1", + "description": "A commercially licensable model base pre-trained by Mosaic ML." + }, + { + "md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe", + "filename": "ggml-nous-gpt4-vicuna-13b.bin", + "filesize": "8136777088", + "description": "A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research." + }, + { + "md5sum": "1cfa4958f489f0a0d1ffdf6b37322809", + "filename": "ggml-mpt-7b-instruct.bin", + "filesize": "4854401028", + "requires": "2.4.1", + "description": "A commericially licensable instruct model based on MPT and trained by Mosaic ML." + }, + { + "md5sum": "679fc463f01388ea2d339664af0a0836", + "filename": "ggml-wizard-13b-uncensored.bin", + "filesize": "8136777088", + "description": "A non-commercially licensable model based on Wizard Vicuna 13b." + } +] diff --git a/gpt4all-chat/metadata/release.json b/gpt4all-chat/metadata/release.json new file mode 100644 index 00000000..2843d22e --- /dev/null +++ b/gpt4all-chat/metadata/release.json @@ -0,0 +1,177 @@ +[ + { + "version": "2.2.2", + "notes": +" +* repeat penalty for both gptj and llama models +* scroll the context window when conversation reaches context limit +* persistent thread count setting +* new default template +* new settings for model path, repeat penalty +* bugfix for settings dialog onEditingFinished +* new tab based settings dialog format +* bugfix for datalake when conversation contains forbidden json chars +* new C library API and split the backend into own separate lib for bindings +* apple signed/notarized dmg installer +* update llama.cpp submodule to latest +* bugfix for too large of a prompt +* support for opt-in only anonymous usage and statistics +* bugfixes for the model downloader and improve performance +* various UI bugfixes and enhancements including the send message textarea automatically wrapping by word +* new startup dialog on first start of a new release displaying release notes and opt-in buttons +* new logo and icons +* fixed apple installer so there is now a symlink in the applications folder +", + "contributors": +" +* Adam Treat (Nomic AI) +* Aaron Miller +* Matthieu Talbot +* Tim Jobbins +* chad (eachadea) +* Community (beta testers, bug reporters) +" + }, + { + "version": "2.3.0", + "notes": +" +* repeat penalty for both gptj and llama models +* scroll the context window when conversation reaches context limit +* persistent thread count setting +* new default template +* new settings for model path, repeat penalty +* bugfix for settings dialog onEditingFinished +* new tab based settings dialog format +* bugfix for datalake when conversation contains forbidden json chars +* new C library API and split the backend into own separate lib for bindings +* apple signed/notarized dmg installer +* update llama.cpp submodule to latest +* bugfix for too large of a prompt +* support for opt-in only anonymous usage and statistics +* bugfixes for the model downloader and improve performance +* various UI bugfixes and enhancements including the send message textarea automatically wrapping by word +* new startup dialog on first start of a new release displaying release notes and opt-in buttons +* new logo and icons +* fixed apple installer so there is now a symlink in the applications folder +* fixed bug with versions +* fixed optout marking +", + "contributors": +" +* Adam Treat (Nomic AI) +* Aaron Miller +* Matthieu Talbot +* Tim Jobbins +* chad (eachadea) +* Community (beta testers, bug reporters) +" + }, + { + "version": "2.4.0", + "notes": +" +* reverse prompt for both llama and gptj models which should help stop them from repeating the prompt template +* resumable downloads for models +* chat list in the drawer drop down +* add/remove/rename chats +* perist chats to disk and restore them with full context (WARNING: the average size of each chat on disk is ~1.5GB) +* NOTE: to turn on the persistent chats feature you need to do so via the settings dialog as it is off by default +* automatically rename chats using the AI after the first prompt/response pair +* new usage statistics including more detailed hardware info to help debug problems on older hardware +* fix dialog sizes for those with smaller displays +* add support for persistent contexts and internal model state to the C api +* add a confirm button for deletion of chats +* bugfix for blocking the gui when changing models +* datalake now captures all conversations when network opt-in is turned on +* new much shorter prompt template by default +", + "contributors": +" +* Adam Treat (Nomic AI) +* Aaron Miller +* Community (beta testers, bug reporters) +" + }, + { + "version": "2.4.1", + "notes": +" +* compress persistent chats and save order of magnitude disk space on some small chats +* persistent chat files are now stored in same folder as models +* use a thread for deserializing chats on startup so the gui shows window faster +* fail gracefully and early when we detect incompatible hardware +* repeat penalty restore default bugfix +* new mpt backend for mosaic ml's new base model and chat model +* add mpt chat and base model to downloads +* lower memory required for gptj models by using f16 for kv cache +* better error handling for when a model is deleted by user and persistent chat remains +* add a user default model setting so the users preferred model comes up on startup +", + "contributors": +" +* Adam Treat (Nomic AI) +* Zach Nussbaum (Nomic AI) +* Aaron Miller +* Community (beta testers, bug reporters) +" + }, + { + "version": "2.4.2", + "notes": +" +* add webserver feature that offers mirror api to chatgpt on localhost:4891 +* add chatgpt models installed using openai key to chat client gui +* fixup the memory handling when switching between chats/models to decrease RAM load across the board +* fix bug in thread safety for mpt model and de-duplicated code +* uses compact json format for network +* add remove model option in download dialog +", + "contributors": +" +* Adam Treat (Nomic AI) +* Aaron Miller +* Community (beta testers, bug reporters) +" + }, + { + "version": "2.4.3", + "notes": +" +* add webserver feature that offers mirror api to chatgpt on localhost:4891 +* add chatgpt models installed using openai key to chat client gui +* fixup the memory handling when switching between chats/models to decrease RAM load across the board +* fix bug in thread safety for mpt model and de-duplicated code +* uses compact json format for network +* add remove model option in download dialog +* remove text-davinci-003 as it is not a chat model +* fix installers on mac and linux to include libllmodel versions +", + "contributors": +" +* Adam Treat (Nomic AI) +* Aaron Miller +* Community (beta testers, bug reporters) +" + }, + { + "version": "2.4.4", + "notes": +" +* fix buffer overrun in backend +* bugfix for browse for model directory +* dedup of qml code +* revamp settings dialog UI +* add localdocs plugin (beta) feature allowing scanning of local docs +* various other bugfixes and performance improvements +", + "contributors": +" +* Adam Treat (Nomic AI) +* Aaron Miller +* Juuso Alasuutari +* Justin Wang +* Community (beta testers, bug reporters) +" + } +]