Bump version and release notes for v2.7.1

Signed-off-by: Adam Treat <treat.adam@gmail.com>
This commit is contained in:
Adam Treat 2024-02-21 16:53:47 -05:00
parent ef0a67eb94
commit a010a8a7ca
2 changed files with 23 additions and 1 deletions

View File

@ -18,7 +18,7 @@ endif()
set(APP_VERSION_MAJOR 2)
set(APP_VERSION_MINOR 7)
set(APP_VERSION_PATCH 1)
set(APP_VERSION_PATCH 2)
set(APP_VERSION "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}")
# Include the binary directory for the generated header file

View File

@ -683,6 +683,28 @@
* Jared Van Bortel (Nomic AI)
* Adam Treat (Nomic AI)
* Community (beta testers, bug reporters, bindings authors)
"
},
{
"version": "2.7.1",
"notes":
"
* Update to latest llama.cpp with support for Google Gemma
* Gemma, Phi and Phi-2, Qwen2, and StableLM are now all GPU accelerated
* Large revamp of the model loading to support explicit unload/reload
* Bugfixes for ChatML and improved version of Mistral OpenOrca
* We no longer load a model by default on application start
* We no longer load a model by default on chat context switch
* Fixes for visual artifacts in update reminder dialog
* Blacklist Intel GPU's for now as we don't support yet
* Fixes for binary save/restore of chat
* Save and restore of window geometry across application starts
",
"contributors":
"
* Jared Van Bortel (Nomic AI)
* Adam Treat (Nomic AI)
* Community (beta testers, bug reporters, bindings authors)
"
}
]