llamamodel: fix semantic typo in nomic client dynamic mode (#2216)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-04-12 17:25:15 -04:00 committed by GitHub
parent 46818e466e
commit 3f8257c563
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 2 additions and 2 deletions

View File

@ -302,8 +302,8 @@ bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl)
if (llama_verbose()) {
std::cerr << "llama.cpp: using Metal" << std::endl;
d_ptr->backend_name = "metal";
}
d_ptr->backend_name = "metal";
// always fully offload on Metal
// TODO(cebtenzzre): use this parameter to allow using more than 53% of system RAM to load a model

View File

@ -68,7 +68,7 @@ def get_long_description():
setup(
name=package_name,
version="2.5.0",
version="2.5.1",
description="Python bindings for GPT4All",
long_description=get_long_description(),
long_description_content_type="text/markdown",