2023-04-25 19:16:45 -04:00
cmake_minimum_required ( VERSION 3.16 )
2023-05-25 15:22:45 -04:00
set ( CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON )
2023-07-12 10:49:24 -04:00
set ( CMAKE_EXPORT_COMPILE_COMMANDS ON )
2023-04-25 19:16:45 -04:00
if ( APPLE )
2023-05-08 08:23:00 -04:00
option ( BUILD_UNIVERSAL "Build a Universal binary on macOS" ON )
2023-04-25 19:16:45 -04:00
if ( BUILD_UNIVERSAL )
# Build a Universal binary on macOS
# This requires that the found Qt library is compiled as Universal binaries.
set ( CMAKE_OSX_ARCHITECTURES "arm64;x86_64" CACHE STRING "" FORCE )
else ( )
# Build for the host architecture on macOS
2023-06-13 08:05:34 -04:00
if ( NOT CMAKE_OSX_ARCHITECTURES )
set ( CMAKE_OSX_ARCHITECTURES "${CMAKE_HOST_SYSTEM_PROCESSOR}" CACHE STRING "" FORCE )
endif ( )
2023-04-25 19:16:45 -04:00
endif ( )
endif ( )
# Include the binary directory for the generated header file
include_directories ( "${CMAKE_CURRENT_BINARY_DIR}" )
2023-05-10 11:46:40 -04:00
set ( LLMODEL_VERSION_MAJOR 0 )
2023-10-05 18:18:07 -04:00
set ( LLMODEL_VERSION_MINOR 5 )
2023-05-31 17:04:01 -04:00
set ( LLMODEL_VERSION_PATCH 0 )
2023-05-10 11:46:40 -04:00
set ( LLMODEL_VERSION "${LLMODEL_VERSION_MAJOR}.${LLMODEL_VERSION_MINOR}.${LLMODEL_VERSION_PATCH}" )
project ( llmodel VERSION ${ LLMODEL_VERSION } LANGUAGES CXX C )
2023-04-25 19:16:45 -04:00
2023-05-31 17:04:01 -04:00
set ( CMAKE_CXX_STANDARD 20 )
2023-04-25 19:16:45 -04:00
set ( CMAKE_CXX_STANDARD_REQUIRED ON )
2023-05-31 17:04:01 -04:00
set ( CMAKE_LIBRARY_OUTPUT_DIRECTORY ${ CMAKE_RUNTIME_OUTPUT_DIRECTORY } )
set ( BUILD_SHARED_LIBS ON )
2023-04-25 19:16:45 -04:00
2023-05-31 17:04:01 -04:00
# Check for IPO support
include ( CheckIPOSupported )
check_ipo_supported ( RESULT IPO_SUPPORTED OUTPUT IPO_ERROR )
if ( NOT IPO_SUPPORTED )
message ( WARNING "Interprocedural optimization is not supported by your toolchain! This will lead to bigger file sizes and worse performance: ${IPO_ERROR}" )
else ( )
message ( STATUS "Interprocedural optimization support detected" )
endif ( )
include ( llama.cpp.cmake )
set ( BUILD_VARIANTS default avxonly )
2023-06-09 16:48:46 -04:00
if ( ${ CMAKE_SYSTEM_NAME } MATCHES "Darwin" )
set ( BUILD_VARIANTS ${ BUILD_VARIANTS } metal )
endif ( )
2023-04-25 19:16:45 -04:00
set ( CMAKE_VERBOSE_MAKEFILE ON )
2023-05-31 17:04:01 -04:00
# Go through each build variant
foreach ( BUILD_VARIANT IN LISTS BUILD_VARIANTS )
# Determine flags
if ( BUILD_VARIANT STREQUAL avxonly )
set ( GPT4ALL_ALLOW_NON_AVX NO )
else ( )
set ( GPT4ALL_ALLOW_NON_AVX YES )
endif ( )
set ( LLAMA_AVX2 ${ GPT4ALL_ALLOW_NON_AVX } )
set ( LLAMA_F16C ${ GPT4ALL_ALLOW_NON_AVX } )
set ( LLAMA_FMA ${ GPT4ALL_ALLOW_NON_AVX } )
2023-06-09 16:48:46 -04:00
if ( BUILD_VARIANT STREQUAL metal )
set ( LLAMA_METAL YES )
else ( )
set ( LLAMA_METAL NO )
endif ( )
2023-05-31 17:04:01 -04:00
# Include GGML
2023-06-15 17:06:14 -04:00
set ( LLAMA_K_QUANTS YES )
2023-05-31 17:04:01 -04:00
include_ggml ( llama.cpp-mainline -mainline- ${ BUILD_VARIANT } ON )
# Function for preparing individual implementations
function ( prepare_target TARGET_NAME BASE_LIB )
set ( TARGET_NAME ${ TARGET_NAME } - ${ BUILD_VARIANT } )
message ( STATUS "Configuring model implementation target ${TARGET_NAME}" )
# Link to ggml/llama
target_link_libraries ( ${ TARGET_NAME }
2023-06-05 14:30:56 -04:00
P R I V A T E $ { B A S E _ L I B } - $ { B U I L D _ V A R I A N T } )
2023-05-31 17:04:01 -04:00
# Let it know about its build variant
target_compile_definitions ( ${ TARGET_NAME }
P R I V A T E G G M L _ B U I L D _ V A R I A N T = " $ { B U I L D _ V A R I A N T } " )
# Enable IPO if possible
2023-06-07 12:58:21 -04:00
# FIXME: Doesn't work with msvc reliably. See https://github.com/nomic-ai/gpt4all/issues/841
# set_property(TARGET ${TARGET_NAME}
# PROPERTY INTERPROCEDURAL_OPTIMIZATION ${IPO_SUPPORTED})
2023-05-31 17:04:01 -04:00
endfunction ( )
# Add each individual implementations
add_library ( llamamodel-mainline- ${ BUILD_VARIANT } SHARED
2023-06-02 10:47:12 -04:00
l l a m a m o d e l . c p p l l m o d e l _ s h a r e d . c p p )
2023-05-31 17:04:01 -04:00
target_compile_definitions ( llamamodel-mainline- ${ BUILD_VARIANT } PRIVATE
L L A M A _ V E R S I O N S = > = 3 L L A M A _ D A T E = 9 9 9 9 9 9 )
prepare_target ( llamamodel-mainline llama-mainline )
2023-06-09 16:48:46 -04:00
if ( NOT LLAMA_METAL )
2023-09-28 16:17:06 -04:00
add_library ( gptj- ${ BUILD_VARIANT } SHARED
g p t j . c p p u t i l s . h u t i l s . c p p l l m o d e l _ s h a r e d . c p p l l m o d e l _ s h a r e d . h )
prepare_target ( gptj llama-mainline )
2023-06-09 16:48:46 -04:00
2023-07-07 09:31:34 -04:00
add_library ( bert- ${ BUILD_VARIANT } SHARED
b e r t . c p p u t i l s . h u t i l s . c p p l l m o d e l _ s h a r e d . c p p l l m o d e l _ s h a r e d . h )
2023-07-14 11:10:41 -04:00
target_compile_definitions ( bert- ${ BUILD_VARIANT } PRIVATE LLAMA_VERSIONS=>=3 LLAMA_DATE=999999 )
2023-07-07 09:31:34 -04:00
prepare_target ( bert llama-mainline )
2023-06-09 16:48:46 -04:00
endif ( )
2023-05-31 17:04:01 -04:00
endforeach ( )
2023-04-25 19:16:45 -04:00
add_library ( llmodel
2023-06-04 08:59:24 -04:00
l l m o d e l . h l l m o d e l . c p p l l m o d e l _ s h a r e d . c p p
2023-05-31 17:04:01 -04:00
l l m o d e l _ c . h l l m o d e l _ c . c p p
d l h a n d l e . h
2023-04-25 19:16:45 -04:00
)
2023-05-31 17:04:01 -04:00
target_compile_definitions ( llmodel PRIVATE LIB_FILE_EXT= "${CMAKE_SHARED_LIBRARY_SUFFIX}" )
2023-04-25 19:16:45 -04:00
2023-05-10 11:46:40 -04:00
set_target_properties ( llmodel PROPERTIES
V E R S I O N $ { P R O J E C T _ V E R S I O N }
S O V E R S I O N $ { P R O J E C T _ V E R S I O N _ M A J O R } )
2023-04-25 19:16:45 -04:00
set ( COMPONENT_NAME_MAIN ${ PROJECT_NAME } )
set ( CMAKE_INSTALL_PREFIX ${ CMAKE_BINARY_DIR } /install )