chat: fix build on Windows and Nomic Embed path on macOS (#2467)

* chat: remove unused oscompat source files

These files are no longer needed now that the hnswlib index is gone.
This fixes an issue with the Windows build as there was a compilation
error in oscompat.cpp.

Signed-off-by: Jared Van Bortel <jared@nomic.ai>

* llm: fix pragma to be recognized by MSVC

Replaces this MSVC warning:
C:\msys64\home\Jared\gpt4all\gpt4all-chat\llm.cpp(53,21): warning C4081: expected '('; found 'string'

With this:
C:\msys64\home\Jared\gpt4all\gpt4all-chat\llm.cpp : warning : offline installer build will not check for updates!

Signed-off-by: Jared Van Bortel <jared@nomic.ai>

* usearch: fork usearch to fix `CreateFile` build error

Signed-off-by: Jared Van Bortel <jared@nomic.ai>

* dlhandle: fix incorrect assertion on Windows

SetErrorMode returns the previous value of the error mode flags, not an
indicator of success.

Signed-off-by: Jared Van Bortel <jared@nomic.ai>

* llamamodel: fix UB in LLamaModel::embedInternal

It is undefined behavior to increment an STL iterator past the end of
the container. Use offsets to do the math instead.

Signed-off-by: Jared Van Bortel <jared@nomic.ai>

* cmake: install embedding model to bundle's Resources dir on macOS

Signed-off-by: Jared Van Bortel <jared@nomic.ai>

* ci: fix macOS build by explicitly installing Rosetta

Signed-off-by: Jared Van Bortel <jared@nomic.ai>

---------

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-06-25 17:22:51 -04:00 committed by GitHub
parent bbf0c2f246
commit 88d85be0f9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 61 additions and 125 deletions

View File

@ -40,6 +40,9 @@ jobs:
- restore_cache: # this is the new step to restore cache - restore_cache: # this is the new step to restore cache
keys: keys:
- macos-qt-cache-v3 - macos-qt-cache-v3
- run:
name: Install Rosetta
command: softwareupdate --install-rosetta --agree-to-license # needed for QtIFW
- run: - run:
name: Installing Qt name: Installing Qt
command: | command: |
@ -325,6 +328,9 @@ jobs:
- restore_cache: # this is the new step to restore cache - restore_cache: # this is the new step to restore cache
keys: keys:
- macos-qt-cache-v3 - macos-qt-cache-v3
- run:
name: Install Rosetta
command: softwareupdate --install-rosetta --agree-to-license # needed for QtIFW
- run: - run:
name: Installing Qt name: Installing Qt
command: | command: |

2
.gitmodules vendored
View File

@ -4,4 +4,4 @@
branch = master branch = master
[submodule "gpt4all-chat/usearch"] [submodule "gpt4all-chat/usearch"]
path = gpt4all-chat/usearch path = gpt4all-chat/usearch
url = https://github.com/unum-cloud/usearch.git url = https://github.com/nomic-ai/usearch.git

View File

@ -46,13 +46,11 @@ Dlhandle::Dlhandle(const fs::path &fpath)
// Suppress the "Entry Point Not Found" dialog, caused by outdated nvcuda.dll from the GPU driver // Suppress the "Entry Point Not Found" dialog, caused by outdated nvcuda.dll from the GPU driver
UINT lastErrorMode = GetErrorMode(); UINT lastErrorMode = GetErrorMode();
UINT success = SetErrorMode(lastErrorMode | SEM_FAILCRITICALERRORS); SetErrorMode(lastErrorMode | SEM_FAILCRITICALERRORS);
assert(success);
chandle = LoadLibraryExW(afpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR); chandle = LoadLibraryExW(afpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR);
success = SetErrorMode(lastErrorMode); SetErrorMode(lastErrorMode);
assert(success);
if (!chandle) { if (!chandle) {
DWORD err = GetLastError(); DWORD err = GetLastError();

View File

@ -1003,14 +1003,14 @@ void LLamaModel::embedInternal(
size_t totalTokens = 0; size_t totalTokens = 0;
for (unsigned i = 0; i < inputs.size(); i++) { for (unsigned i = 0; i < inputs.size(); i++) {
auto &input = inputs[i]; auto &input = inputs[i];
for (auto it = input.begin(); it < input.end(); it += max_len) { for (unsigned j = 0; j < input.size(); j += max_len) {
if (it > input.begin()) { it -= chunkOverlap; } if (j) { j -= chunkOverlap; }
auto end = std::min(it + max_len, input.end()); unsigned end = std::min(j + max_len, unsigned(input.size()));
batches.push_back({ i, {} }); batches.push_back({ i, {} });
auto &batch = batches.back().batch; auto &batch = batches.back().batch;
batch = prefixTokens; batch = prefixTokens;
batch.insert(batch.end(), it, end); batch.insert(batch.end(), input.begin() + j, input.begin() + end);
totalTokens += end - it; totalTokens += end - j;
batch.push_back(eos_token); batch.push_back(eos_token);
if (!doMean) { break; /* limit text to one chunk */ } if (!doMean) { break; /* limit text to one chunk */ }
} }

View File

@ -64,14 +64,16 @@ set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
add_subdirectory(../gpt4all-backend llmodel) add_subdirectory(../gpt4all-backend llmodel)
set(METAL_SHADER_FILE) set(CHAT_EXE_RESOURCES)
if(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
set(METAL_SHADER_FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib) # Metal shader library
if (APPLE)
list(APPEND CHAT_EXE_RESOURCES "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib")
endif() endif()
set(APP_ICON_RESOURCE) # App icon
if (WIN32) if (WIN32)
set(APP_ICON_RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.rc") list(APPEND CHAT_EXE_RESOURCES "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.rc")
elseif (APPLE) elseif (APPLE)
# The MACOSX_BUNDLE_ICON_FILE variable is added to the Info.plist # The MACOSX_BUNDLE_ICON_FILE variable is added to the Info.plist
# generated by CMake. This variable contains the .icns file name, # generated by CMake. This variable contains the .icns file name,
@ -82,8 +84,24 @@ elseif (APPLE)
set(APP_ICON_RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns") set(APP_ICON_RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
set_source_files_properties(${APP_ICON_RESOURCE} PROPERTIES set_source_files_properties(${APP_ICON_RESOURCE} PROPERTIES
MACOSX_PACKAGE_LOCATION "Resources") MACOSX_PACKAGE_LOCATION "Resources")
list(APPEND CHAT_EXE_RESOURCES "${APP_ICON_RESOURCE}")
endif() endif()
# Embedding model
set(LOCAL_EMBEDDING_MODEL "nomic-embed-text-v1.5.f16.gguf")
set(LOCAL_EMBEDDING_MODEL_MD5 "a5401e7f7e46ed9fcaed5b60a281d547")
set(LOCAL_EMBEDDING_MODEL_PATH "${CMAKE_BINARY_DIR}/resources/${LOCAL_EMBEDDING_MODEL}")
set(LOCAL_EMBEDDING_MODEL_URL "https://gpt4all.io/models/gguf/${LOCAL_EMBEDDING_MODEL}")
message(STATUS "Downloading embedding model from ${LOCAL_EMBEDDING_MODEL_URL} ...")
file(DOWNLOAD
"${LOCAL_EMBEDDING_MODEL_URL}"
"${LOCAL_EMBEDDING_MODEL_PATH}"
EXPECTED_HASH "MD5=${LOCAL_EMBEDDING_MODEL_MD5}"
)
message(STATUS "Embedding model downloaded to ${LOCAL_EMBEDDING_MODEL_PATH}")
if (APPLE)
list(APPEND CHAT_EXE_RESOURCES "${LOCAL_EMBEDDING_MODEL_PATH}")
endif()
qt_add_executable(chat qt_add_executable(chat
main.cpp main.cpp
@ -102,9 +120,8 @@ qt_add_executable(chat
server.h server.cpp server.h server.cpp
logger.h logger.cpp logger.h logger.cpp
responsetext.h responsetext.cpp responsetext.h responsetext.cpp
oscompat.h oscompat.cpp
${METAL_SHADER_FILE}
${APP_ICON_RESOURCE} ${APP_ICON_RESOURCE}
${CHAT_EXE_RESOURCES}
) )
qt_add_qml_module(chat qt_add_qml_module(chat
@ -197,23 +214,18 @@ qt_add_qml_module(chat
) )
set_target_properties(chat PROPERTIES set_target_properties(chat PROPERTIES
MACOSX_BUNDLE_GUI_IDENTIFIER gpt4all
MACOSX_BUNDLE_BUNDLE_VERSION ${PROJECT_VERSION}
MACOSX_BUNDLE_SHORT_VERSION_STRING ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}
MACOSX_BUNDLE TRUE
WIN32_EXECUTABLE TRUE WIN32_EXECUTABLE TRUE
) )
if(${CMAKE_SYSTEM_NAME} MATCHES Darwin) if (APPLE)
set_target_properties(chat PROPERTIES set_target_properties(chat PROPERTIES
MACOSX_BUNDLE TRUE
MACOSX_BUNDLE_GUI_IDENTIFIER gpt4all
MACOSX_BUNDLE_BUNDLE_VERSION ${PROJECT_VERSION}
MACOSX_BUNDLE_SHORT_VERSION_STRING ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}
RESOURCE "${CHAT_EXE_RESOURCES}"
OUTPUT_NAME gpt4all OUTPUT_NAME gpt4all
) )
endif()
if(METAL_SHADER_FILE)
set_target_properties(chat PROPERTIES
RESOURCE ${METAL_SHADER_FILE}
)
add_dependencies(chat ggml-metal) add_dependencies(chat ggml-metal)
endif() endif()
@ -237,17 +249,6 @@ target_link_libraries(chat
PRIVATE llmodel) PRIVATE llmodel)
# -- extra resources --
set(LOCAL_EMBEDDING_MODEL "nomic-embed-text-v1.5.f16.gguf")
set(LOCAL_EMBEDDING_MODEL_MD5 "a5401e7f7e46ed9fcaed5b60a281d547")
file(DOWNLOAD
"https://gpt4all.io/models/gguf/${LOCAL_EMBEDDING_MODEL}"
"${CMAKE_BINARY_DIR}/resources/${LOCAL_EMBEDDING_MODEL}"
EXPECTED_HASH "MD5=${LOCAL_EMBEDDING_MODEL_MD5}"
)
# -- install -- # -- install --
set(COMPONENT_NAME_MAIN ${PROJECT_NAME}) set(COMPONENT_NAME_MAIN ${PROJECT_NAME})
@ -314,9 +315,11 @@ if (LLMODEL_CUDA)
endif() endif()
endif() endif()
install(FILES "${CMAKE_BINARY_DIR}/resources/${LOCAL_EMBEDDING_MODEL}" if (NOT APPLE)
DESTINATION resources install(FILES "${CMAKE_BINARY_DIR}/resources/${LOCAL_EMBEDDING_MODEL}"
COMPONENT ${COMPONENT_NAME_MAIN}) DESTINATION resources
COMPONENT ${COMPONENT_NAME_MAIN})
endif()
set(CPACK_GENERATOR "IFW") set(CPACK_GENERATOR "IFW")
set(CPACK_VERBATIM_VARIABLES YES) set(CPACK_VERBATIM_VARIABLES YES)

View File

@ -71,7 +71,13 @@ bool EmbeddingLLMWorker::loadModel()
return true; return true;
} }
QString filePath = u"%1/../resources/%2"_s.arg(QCoreApplication::applicationDirPath(), LOCAL_EMBEDDING_MODEL); #ifdef Q_OS_DARWIN
static const QString embPathFmt = u"%1/../Resources/%2"_s;
#else
static const QString embPathFmt = u"%1/../resources/%2"_s;
#endif
QString filePath = embPathFmt.arg(QCoreApplication::applicationDirPath(), LOCAL_EMBEDDING_MODEL);
if (!QFileInfo::exists(filePath)) { if (!QFileInfo::exists(filePath)) {
qWarning() << "WARNING: Local embedding model not found"; qWarning() << "WARNING: Local embedding model not found";
return false; return false;

View File

@ -49,10 +49,10 @@ bool LLM::hasSettingsAccess() const
bool LLM::checkForUpdates() const bool LLM::checkForUpdates() const
{ {
#ifdef GPT4ALL_OFFLINE_INSTALLER #ifdef GPT4ALL_OFFLINE_INSTALLER
#pragma message "offline installer build will not check for updates!" # pragma message(__FILE__ ": WARNING: offline installer build will not check for updates!")
return QDesktopServices::openUrl(QUrl("https://gpt4all.io/")); return QDesktopServices::openUrl(QUrl("https://gpt4all.io/"));
#else #else
Network::globalInstance()->trackEvent("check_for_updates"); Network::globalInstance()->trackEvent("check_for_updates");
#if defined(Q_OS_LINUX) #if defined(Q_OS_LINUX)
@ -71,7 +71,7 @@ bool LLM::checkForUpdates() const
} }
return QProcess::startDetached(fileName); return QProcess::startDetached(fileName);
#endif #endif
} }
bool LLM::directoryExists(const QString &path) bool LLM::directoryExists(const QString &path)

View File

@ -1,70 +0,0 @@
#include "oscompat.h"
#include <QByteArray>
#include <QString>
#include <QtGlobal>
#ifdef Q_OS_WIN32
# define WIN32_LEAN_AND_MEAN
# ifndef NOMINMAX
# define NOMINMAX
# endif
# include <windows.h>
# include <errno.h>
#else
# include <fcntl.h>
# include <unistd.h>
#endif
bool gpt4all_fsync(int fd)
{
#if defined(Q_OS_WIN32)
HANDLE handle = HANDLE(_get_osfhandle(fd));
if (handle == INVALID_HANDLE_VALUE) {
errno = EBADF;
return false;
}
if (FlushFileBuffers(handle))
return true;
DWORD error = GetLastError();
switch (error) {
case ERROR_ACCESS_DENIED: // read-only file
return true;
case ERROR_INVALID_HANDLE: // not a regular file
errno = EINVAL;
default:
errno = EIO;
}
return false;
#elif defined(Q_OS_DARWIN)
return fcntl(fd, F_FULLFSYNC, 0) == 0;
#else
return fsync(fd) == 0;
#endif
}
bool gpt4all_fdatasync(int fd)
{
#if defined(Q_OS_WIN32) || defined(Q_OS_DARWIN)
return gpt4all_fsync(fd);
#else
return fdatasync(fd) == 0;
#endif
}
bool gpt4all_syncdir(const QString &path)
{
#if defined(Q_OS_WIN32)
(void)path; // cannot sync a directory on Windows
return true;
#else
int fd = open(path.toLocal8Bit().constData(), O_RDONLY | O_DIRECTORY);
if (fd == -1) return false;
bool ok = gpt4all_fdatasync(fd);
close(fd);
return ok;
#endif
}

View File

@ -1,7 +0,0 @@
#pragma once
class QString;
bool gpt4all_fsync(int fd);
bool gpt4all_fdatasync(int fd);
bool gpt4all_syncdir(const QString &path);

@ -1 +1 @@
Subproject commit 5ea48c87c56a25ab57634a8f207f80ae675ed58e Subproject commit 22cfa3bd00ea542132ee826cdb220f9d6434bd43