From d918b02c29b7dddfabae740c2156ad44ff1af2f5 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 10 May 2023 11:46:40 -0400 Subject: [PATCH] Move the llmodel C API to new top-level directory and version it. --- .gitmodules | 2 +- .../llmodel => gpt4all-backend}/CMakeLists.txt | 10 +++++++++- {gpt4all-chat/llmodel => gpt4all-backend}/gptj.cpp | 0 {gpt4all-chat/llmodel => gpt4all-backend}/gptj.h | 0 {gpt4all-chat/llmodel => gpt4all-backend}/llama.cpp | 0 .../llmodel => gpt4all-backend}/llamamodel.cpp | 0 {gpt4all-chat/llmodel => gpt4all-backend}/llamamodel.h | 0 {gpt4all-chat/llmodel => gpt4all-backend}/llmodel.h | 0 .../llmodel => gpt4all-backend}/llmodel_c.cpp | 0 {gpt4all-chat/llmodel => gpt4all-backend}/llmodel_c.h | 0 {gpt4all-chat/llmodel => gpt4all-backend}/mpt.cpp | 0 {gpt4all-chat/llmodel => gpt4all-backend}/mpt.h | 0 .../scripts/convert_mpt_hf_to_ggml.py | 0 {gpt4all-chat/llmodel => gpt4all-backend}/utils.cpp | 0 {gpt4all-chat/llmodel => gpt4all-backend}/utils.h | 0 gpt4all-chat/CMakeLists.txt | 2 +- gpt4all-chat/chatllm.cpp | 6 +++--- gpt4all-chat/chatllm.h | 2 +- 18 files changed, 15 insertions(+), 7 deletions(-) rename {gpt4all-chat/llmodel => gpt4all-backend}/CMakeLists.txt (76%) rename {gpt4all-chat/llmodel => gpt4all-backend}/gptj.cpp (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/gptj.h (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/llama.cpp (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/llamamodel.cpp (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/llamamodel.h (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/llmodel.h (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/llmodel_c.cpp (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/llmodel_c.h (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/mpt.cpp (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/mpt.h (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/scripts/convert_mpt_hf_to_ggml.py (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/utils.cpp (100%) rename {gpt4all-chat/llmodel => gpt4all-backend}/utils.h (100%) diff --git a/.gitmodules b/.gitmodules index eb06ee48..e00584ea 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "llama.cpp"] - path = gpt4all-chat/llmodel/llama.cpp + path = gpt4all-backend/llama.cpp url = https://github.com/manyoso/llama.cpp.git diff --git a/gpt4all-chat/llmodel/CMakeLists.txt b/gpt4all-backend/CMakeLists.txt similarity index 76% rename from gpt4all-chat/llmodel/CMakeLists.txt rename to gpt4all-backend/CMakeLists.txt index 704faccc..a7f1c6f0 100644 --- a/gpt4all-chat/llmodel/CMakeLists.txt +++ b/gpt4all-backend/CMakeLists.txt @@ -15,7 +15,11 @@ endif() # Include the binary directory for the generated header file include_directories("${CMAKE_CURRENT_BINARY_DIR}") -project(llmodel VERSION ${APP_VERSION} LANGUAGES CXX C) +set(LLMODEL_VERSION_MAJOR 0) +set(LLMODEL_VERSION_MINOR 1) +set(LLMODEL_VERSION_PATCH 0) +set(LLMODEL_VERSION "${LLMODEL_VERSION_MAJOR}.${LLMODEL_VERSION_MINOR}.${LLMODEL_VERSION_PATCH}") +project(llmodel VERSION ${LLMODEL_VERSION} LANGUAGES CXX C) set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -43,5 +47,9 @@ add_library(llmodel target_link_libraries(llmodel PRIVATE llama) +set_target_properties(llmodel PROPERTIES + VERSION ${PROJECT_VERSION} + SOVERSION ${PROJECT_VERSION_MAJOR}) + set(COMPONENT_NAME_MAIN ${PROJECT_NAME}) set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install) diff --git a/gpt4all-chat/llmodel/gptj.cpp b/gpt4all-backend/gptj.cpp similarity index 100% rename from gpt4all-chat/llmodel/gptj.cpp rename to gpt4all-backend/gptj.cpp diff --git a/gpt4all-chat/llmodel/gptj.h b/gpt4all-backend/gptj.h similarity index 100% rename from gpt4all-chat/llmodel/gptj.h rename to gpt4all-backend/gptj.h diff --git a/gpt4all-chat/llmodel/llama.cpp b/gpt4all-backend/llama.cpp similarity index 100% rename from gpt4all-chat/llmodel/llama.cpp rename to gpt4all-backend/llama.cpp diff --git a/gpt4all-chat/llmodel/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp similarity index 100% rename from gpt4all-chat/llmodel/llamamodel.cpp rename to gpt4all-backend/llamamodel.cpp diff --git a/gpt4all-chat/llmodel/llamamodel.h b/gpt4all-backend/llamamodel.h similarity index 100% rename from gpt4all-chat/llmodel/llamamodel.h rename to gpt4all-backend/llamamodel.h diff --git a/gpt4all-chat/llmodel/llmodel.h b/gpt4all-backend/llmodel.h similarity index 100% rename from gpt4all-chat/llmodel/llmodel.h rename to gpt4all-backend/llmodel.h diff --git a/gpt4all-chat/llmodel/llmodel_c.cpp b/gpt4all-backend/llmodel_c.cpp similarity index 100% rename from gpt4all-chat/llmodel/llmodel_c.cpp rename to gpt4all-backend/llmodel_c.cpp diff --git a/gpt4all-chat/llmodel/llmodel_c.h b/gpt4all-backend/llmodel_c.h similarity index 100% rename from gpt4all-chat/llmodel/llmodel_c.h rename to gpt4all-backend/llmodel_c.h diff --git a/gpt4all-chat/llmodel/mpt.cpp b/gpt4all-backend/mpt.cpp similarity index 100% rename from gpt4all-chat/llmodel/mpt.cpp rename to gpt4all-backend/mpt.cpp diff --git a/gpt4all-chat/llmodel/mpt.h b/gpt4all-backend/mpt.h similarity index 100% rename from gpt4all-chat/llmodel/mpt.h rename to gpt4all-backend/mpt.h diff --git a/gpt4all-chat/llmodel/scripts/convert_mpt_hf_to_ggml.py b/gpt4all-backend/scripts/convert_mpt_hf_to_ggml.py similarity index 100% rename from gpt4all-chat/llmodel/scripts/convert_mpt_hf_to_ggml.py rename to gpt4all-backend/scripts/convert_mpt_hf_to_ggml.py diff --git a/gpt4all-chat/llmodel/utils.cpp b/gpt4all-backend/utils.cpp similarity index 100% rename from gpt4all-chat/llmodel/utils.cpp rename to gpt4all-backend/utils.cpp diff --git a/gpt4all-chat/llmodel/utils.h b/gpt4all-backend/utils.h similarity index 100% rename from gpt4all-chat/llmodel/utils.h rename to gpt4all-backend/utils.h diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index fbe018b9..0ec305f2 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -52,7 +52,7 @@ get_filename_component(Qt6_ROOT_DIR "${Qt6_ROOT_DIR}/.." ABSOLUTE) message(STATUS "qmake binary: ${QMAKE_EXECUTABLE}") message(STATUS "Qt 6 root directory: ${Qt6_ROOT_DIR}") -add_subdirectory(llmodel) +add_subdirectory(../gpt4all-backend llmodel) set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 2ffbc3c7..cea13fb5 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -2,9 +2,9 @@ #include "chat.h" #include "download.h" #include "network.h" -#include "llmodel/gptj.h" -#include "llmodel/llamamodel.h" -#include "llmodel/mpt.h" +#include "../gpt4all-backend/gptj.h" +#include "../gpt4all-backend/llamamodel.h" +#include "../gpt4all-backend/mpt.h" #include #include diff --git a/gpt4all-chat/chatllm.h b/gpt4all-chat/chatllm.h index bb488b16..d134e414 100644 --- a/gpt4all-chat/chatllm.h +++ b/gpt4all-chat/chatllm.h @@ -4,7 +4,7 @@ #include #include -#include "llmodel/llmodel.h" +#include "../gpt4all-backend/llmodel.h" class Chat; class ChatLLM : public QObject