mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
We no longer have an avx_only repository and better error handling for minimum hardware requirements. (#833)
This commit is contained in:
parent
9f590db98d
commit
5f95aa9fc6
@ -11,6 +11,20 @@
|
|||||||
|
|
||||||
std::string LLModel::m_implementations_search_path = ".";
|
std::string LLModel::m_implementations_search_path = ".";
|
||||||
|
|
||||||
|
static bool has_at_least_minimal_hardware() {
|
||||||
|
#ifdef __x86_64__
|
||||||
|
#ifndef _MSC_VER
|
||||||
|
return __builtin_cpu_supports("avx");
|
||||||
|
#else
|
||||||
|
int cpuInfo[4];
|
||||||
|
__cpuid(cpuInfo, 1);
|
||||||
|
return cpuInfo[2] & (1 << 28);
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
return true; // Don't know how to handle non-x86_64
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static bool requires_avxonly() {
|
static bool requires_avxonly() {
|
||||||
#ifdef __x86_64__
|
#ifdef __x86_64__
|
||||||
#ifndef _MSC_VER
|
#ifndef _MSC_VER
|
||||||
@ -98,6 +112,10 @@ const LLModel::Implementation* LLModel::implementation(std::ifstream& f, const s
|
|||||||
}
|
}
|
||||||
|
|
||||||
LLModel *LLModel::construct(const std::string &modelPath, std::string buildVariant) {
|
LLModel *LLModel::construct(const std::string &modelPath, std::string buildVariant) {
|
||||||
|
|
||||||
|
if (!has_at_least_minimal_hardware())
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
//TODO: Auto-detect CUDA/OpenCL
|
//TODO: Auto-detect CUDA/OpenCL
|
||||||
if (buildVariant == "auto") {
|
if (buildVariant == "auto") {
|
||||||
if (requires_avxonly()) {
|
if (requires_avxonly()) {
|
||||||
|
@ -30,7 +30,6 @@ set(CMAKE_AUTORCC ON)
|
|||||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
|
|
||||||
option(GPT4ALL_LOCALHOST OFF "Build installer for localhost repo")
|
option(GPT4ALL_LOCALHOST OFF "Build installer for localhost repo")
|
||||||
option(GPT4ALL_AVX_ONLY OFF "Build for avx only")
|
|
||||||
option(GPT4ALL_OFFLINE_INSTALLER "Build an offline installer" OFF)
|
option(GPT4ALL_OFFLINE_INSTALLER "Build an offline installer" OFF)
|
||||||
|
|
||||||
# Generate a header file with the version number
|
# Generate a header file with the version number
|
||||||
@ -215,23 +214,11 @@ elseif(GPT4ALL_OFFLINE_INSTALLER)
|
|||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "file://${CMAKE_BINARY_DIR}/packages")
|
cpack_ifw_add_repository("GPT4AllRepository" URL "file://${CMAKE_BINARY_DIR}/packages")
|
||||||
else()
|
else()
|
||||||
if(${CMAKE_SYSTEM_NAME} MATCHES Linux)
|
if(${CMAKE_SYSTEM_NAME} MATCHES Linux)
|
||||||
if (GPT4ALL_AVX_ONLY)
|
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/avx_only/linux/repository")
|
|
||||||
else()
|
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/linux/repository")
|
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/linux/repository")
|
||||||
endif()
|
|
||||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES Windows)
|
elseif(${CMAKE_SYSTEM_NAME} MATCHES Windows)
|
||||||
#To sign the target on windows have to create a batch script add use it as a custom target and then use CPACK_IFW_EXTRA_TARGETS to set this extra target
|
#To sign the target on windows have to create a batch script add use it as a custom target and then use CPACK_IFW_EXTRA_TARGETS to set this extra target
|
||||||
if (GPT4ALL_AVX_ONLY)
|
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/avx_only/windows/repository")
|
|
||||||
else()
|
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/windows/repository")
|
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/windows/repository")
|
||||||
endif()
|
|
||||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
|
elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
|
||||||
if (GPT4ALL_AVX_ONLY)
|
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/avx_only/mac/repository")
|
|
||||||
else()
|
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/mac/repository")
|
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/mac/repository")
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
@ -2,6 +2,5 @@
|
|||||||
#define CONFIG_H
|
#define CONFIG_H
|
||||||
|
|
||||||
#define APP_VERSION "@APP_VERSION@"
|
#define APP_VERSION "@APP_VERSION@"
|
||||||
#define GPT4ALL_AVX_ONLY "@GPT4ALL_AVX_ONLY@"
|
|
||||||
|
|
||||||
#endif // CONFIG_H
|
#endif // CONFIG_H
|
||||||
|
@ -30,15 +30,20 @@ LLM::LLM()
|
|||||||
connect(this, &LLM::serverEnabledChanged,
|
connect(this, &LLM::serverEnabledChanged,
|
||||||
m_chatListModel, &ChatListModel::handleServerEnabledChanged);
|
m_chatListModel, &ChatListModel::handleServerEnabledChanged);
|
||||||
|
|
||||||
#if defined(__x86_64__) || defined(__i386__)
|
#if defined(__x86_64__)
|
||||||
if (QString(GPT4ALL_AVX_ONLY) == "OFF") {
|
#ifndef _MSC_VER
|
||||||
const bool avx(__builtin_cpu_supports("avx"));
|
const bool minimal(__builtin_cpu_supports("avx"));
|
||||||
const bool avx2(__builtin_cpu_supports("avx2"));
|
#else
|
||||||
const bool fma(__builtin_cpu_supports("fma"));
|
int cpuInfo[4];
|
||||||
m_compatHardware = avx && avx2 && fma;
|
__cpuid(cpuInfo, 1);
|
||||||
emit compatHardwareChanged();
|
const bool minimal(cpuInfo[2] & (1 << 28));
|
||||||
}
|
#endif
|
||||||
|
#else
|
||||||
|
const bool minimal = true; // Don't know how to handle non-x86_64
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
m_compatHardware = minimal;
|
||||||
|
emit compatHardwareChanged();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LLM::checkForUpdates() const
|
bool LLM::checkForUpdates() const
|
||||||
|
@ -95,7 +95,7 @@ Window {
|
|||||||
shouldShowBusy: false
|
shouldShowBusy: false
|
||||||
closePolicy: Popup.NoAutoClose
|
closePolicy: Popup.NoAutoClose
|
||||||
modal: true
|
modal: true
|
||||||
text: qsTr("Incompatible hardware detected. Please try the avx-only installer on https://gpt4all.io")
|
text: qsTr("Incompatible hardware detected. Your hardware does not meet the minimal requirements to run GPT4All. In particular, it does not seem to support AVX intrinsics. See here for more: https://en.wikipedia.org/wiki/Advanced_Vector_Extensions")
|
||||||
}
|
}
|
||||||
|
|
||||||
StartupDialog {
|
StartupDialog {
|
||||||
|
@ -412,11 +412,6 @@ void Network::sendMixpanelEvent(const QString &ev, const QVector<KeyValue> &valu
|
|||||||
const QSize display = QGuiApplication::primaryScreen()->size();
|
const QSize display = QGuiApplication::primaryScreen()->size();
|
||||||
properties.insert("display", QString("%1x%2").arg(display.width()).arg(display.height()));
|
properties.insert("display", QString("%1x%2").arg(display.width()).arg(display.height()));
|
||||||
properties.insert("ram", getSystemTotalRAM());
|
properties.insert("ram", getSystemTotalRAM());
|
||||||
#if defined(__x86_64__) || defined(__i386__)
|
|
||||||
properties.insert("avx", bool(__builtin_cpu_supports("avx")));
|
|
||||||
properties.insert("avx2", bool(__builtin_cpu_supports("avx2")));
|
|
||||||
properties.insert("fma", bool(__builtin_cpu_supports("fma")));
|
|
||||||
#endif
|
|
||||||
#if defined(Q_OS_MAC)
|
#if defined(Q_OS_MAC)
|
||||||
properties.insert("cpu", QString::fromStdString(getCPUModel()));
|
properties.insert("cpu", QString::fromStdString(getCPUModel()));
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user