2018-01-07 00:05:16 -05:00
# Copyright (c) 2014-2018, The Monero Project
2015-12-13 23:54:39 -05:00
#
2014-09-11 02:25:07 -04:00
# All rights reserved.
2015-12-13 23:54:39 -05:00
#
2014-09-11 02:25:07 -04:00
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
2015-12-13 23:54:39 -05:00
#
2014-09-11 02:25:07 -04:00
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
2015-12-13 23:54:39 -05:00
#
2014-09-11 02:25:07 -04:00
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
2015-12-13 23:54:39 -05:00
#
2014-09-11 02:25:07 -04:00
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
2015-12-13 23:54:39 -05:00
#
2014-09-11 02:25:07 -04:00
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2015-12-13 23:54:39 -05:00
#
2014-09-11 02:25:07 -04:00
# Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
2017-12-22 16:48:31 -05:00
list ( INSERT CMAKE_MODULE_PATH 0
" $ { C M A K E _ S O U R C E _ D I R } / c m a k e " )
include ( CheckCCompilerFlag )
include ( CheckCXXCompilerFlag )
include ( CheckLinkerFlag )
include ( CheckLibraryExists )
2018-01-28 03:57:51 -05:00
include ( CheckFunctionExists )
2017-12-22 16:48:31 -05:00
2017-04-02 06:19:25 -04:00
if ( IOS )
INCLUDE ( CmakeLists_IOS.txt )
endif ( )
2014-09-11 02:25:07 -04:00
2014-10-24 14:48:14 -04:00
cmake_minimum_required ( VERSION 2.8.7 )
2018-10-13 05:46:19 -04:00
message ( STATUS "CMake version ${CMAKE_VERSION}" )
2014-03-03 17:07:58 -05:00
2016-09-03 15:38:20 -04:00
project ( monero )
2014-10-23 14:03:54 -04:00
2019-02-16 12:47:08 -05:00
enable_language ( C ASM )
2014-10-21 13:33:26 -04:00
function ( die msg )
if ( NOT WIN32 )
string ( ASCII 27 Esc )
set ( ColourReset "${Esc}[m" )
set ( BoldRed "${Esc}[1;31m" )
else ( )
set ( ColourReset "" )
set ( BoldRed "" )
endif ( )
message ( FATAL_ERROR "${BoldRed}${msg}${ColourReset}" )
endfunction ( )
2014-09-29 14:13:15 -04:00
2017-12-22 16:48:31 -05:00
function ( add_c_flag_if_supported flag var )
string ( REPLACE "-" "_" supported ${ flag } _c )
check_c_compiler_flag ( ${ flag } ${ supported } )
if ( ${ ${supported } } )
set ( ${ var } "${${var}} ${flag}" PARENT_SCOPE )
endif ( )
endfunction ( )
function ( add_cxx_flag_if_supported flag var )
string ( REPLACE "-" "_" supported ${ flag } _cxx )
check_cxx_compiler_flag ( ${ flag } ${ supported } )
if ( ${ ${supported } } )
set ( ${ var } "${${var}} ${flag}" PARENT_SCOPE )
endif ( )
endfunction ( )
function ( add_linker_flag_if_supported flag var )
string ( REPLACE "-" "_" supported ${ flag } _ld )
string ( REPLACE "," "_" supported ${ flag } _ld )
check_linker_flag ( ${ flag } ${ supported } )
if ( ${ ${supported } } )
set ( ${ var } "${${var}} ${flag}" PARENT_SCOPE )
endif ( )
endfunction ( )
2018-01-28 03:57:51 -05:00
function ( add_definition_if_function_found function var )
string ( REPLACE "-" "_" supported ${ function } _function )
check_function_exists ( ${ function } ${ supported } )
if ( ${ ${supported } } )
add_definitions ( "-D${var}" )
endif ( )
endfunction ( )
function ( add_definition_if_library_exists library function header var )
string ( REPLACE "-" "_" supported ${ function } _library )
check_library_exists ( ${ library } ${ function } ${ header } ${ supported } )
if ( ${ ${supported } } )
add_definitions ( "-D${var}" )
endif ( )
endfunction ( )
2016-08-28 06:53:46 -04:00
if ( NOT CMAKE_BUILD_TYPE )
set ( CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE )
message ( STATUS "Setting default build type: ${CMAKE_BUILD_TYPE}" )
endif ( )
2016-09-17 14:03:51 -04:00
string ( TOLOWER ${ CMAKE_BUILD_TYPE } CMAKE_BUILD_TYPE_LOWER )
2016-08-28 06:53:46 -04:00
2016-08-28 01:37:34 -04:00
# ARCH defines the target architecture, either by an explicit identifier or
# one of the following two keywords. By default, ARCH a value of 'native':
# target arch = host arch, binary is not portable. When ARCH is set to the
# string 'default', no -march arg is passed, which creates a binary that is
# portable across processors in the same family as host processor. In cases
# when ARCH is not set to an explicit identifier, cmake's builtin is used
# to identify the target architecture, to direct logic in this cmake script.
# Since ARCH is a cached variable, it will not be set on first cmake invocation.
if ( NOT ARCH OR ARCH STREQUAL "" OR ARCH STREQUAL "native" OR ARCH STREQUAL "default" )
set ( ARCH_ID "${CMAKE_SYSTEM_PROCESSOR}" )
else ( )
set ( ARCH_ID "${ARCH}" )
2016-06-21 20:07:19 -04:00
endif ( )
2016-09-17 14:31:25 -04:00
string ( TOLOWER "${ARCH_ID}" ARM_ID )
2016-09-18 17:56:38 -04:00
string ( SUBSTRING "${ARM_ID}" 0 3 ARM_TEST )
2016-08-28 07:28:05 -04:00
if ( ARM_TEST STREQUAL "arm" )
set ( ARM 1 )
2016-09-18 17:56:38 -04:00
string ( SUBSTRING "${ARM_ID}" 0 5 ARM_TEST )
2016-08-28 07:28:05 -04:00
if ( ARM_TEST STREQUAL "armv6" )
set ( ARM6 1 )
endif ( )
if ( ARM_TEST STREQUAL "armv7" )
set ( ARM7 1 )
2015-12-13 23:54:39 -05:00
endif ( )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 16:09:32 -04:00
endif ( )
2017-04-12 18:54:33 -04:00
if ( ARM_ID STREQUAL "aarch64" OR ARM_ID STREQUAL "arm64" OR ARM_ID STREQUAL "armv8-a" )
2016-08-29 06:13:55 -04:00
set ( ARM 1 )
set ( ARM8 1 )
2017-04-12 18:54:33 -04:00
set ( ARCH "armv8-a" )
2016-08-29 06:13:55 -04:00
endif ( )
2017-06-27 13:06:01 -04:00
if ( ARCH_ID STREQUAL "ppc64le" )
set ( PPC64LE 1 )
2018-05-19 18:03:59 -04:00
set ( PPC64 0 )
set ( PPC 0 )
endif ( )
if ( ARCH_ID STREQUAL "powerpc64" OR ARCH_ID STREQUAL "ppc64" )
set ( PPC64LE 0 )
set ( PPC64 1 )
set ( PPC 0 )
2017-06-27 13:06:01 -04:00
endif ( )
2018-10-31 23:11:09 -04:00
if ( ARCH_ID STREQUAL "powerpc" OR ARCH_ID STREQUAL "ppc" )
2018-05-19 18:03:59 -04:00
set ( PPC64LE 0 )
set ( PPC64 0 )
set ( PPC 1 )
endif ( )
2018-05-01 01:13:10 -04:00
if ( ARCH_ID STREQUAL "s390x" )
set ( S390X 1 )
endif ( )
2018-05-19 18:03:59 -04:00
if ( WIN32 OR ARM OR PPC64LE OR PPC64 OR PPC )
2016-07-28 23:34:05 -04:00
set ( OPT_FLAGS_RELEASE "-O2" )
2016-01-02 14:39:03 -05:00
else ( )
2016-07-28 23:34:05 -04:00
set ( OPT_FLAGS_RELEASE "-Ofast" )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 16:09:32 -04:00
endif ( )
2017-02-18 11:30:29 -05:00
# BUILD_TAG is used to select the build type to check for a new version
if ( BUILD_TAG )
message ( STATUS "Building build tag ${BUILD_TAG}" )
add_definitions ( "-DBUILD_TAG=${BUILD_TAG}" )
else ( )
message ( STATUS "Building without build tag" )
endif ( )
2018-10-04 09:07:00 -04:00
if ( NOT MANUAL_SUBMODULES )
find_package ( Git )
if ( GIT_FOUND )
2018-10-16 10:43:56 -04:00
function ( check_submodule relative_path )
2018-10-30 01:04:57 -04:00
execute_process ( COMMAND git rev-parse "HEAD" WORKING_DIRECTORY ${ CMAKE_CURRENT_SOURCE_DIR } / ${ relative_path } OUTPUT_VARIABLE localHead )
execute_process ( COMMAND git rev-parse "HEAD:${relative_path}" WORKING_DIRECTORY ${ CMAKE_CURRENT_SOURCE_DIR } OUTPUT_VARIABLE checkedHead )
2018-10-16 10:43:56 -04:00
string ( COMPARE EQUAL "${localHead}" "${checkedHead}" upToDate )
if ( upToDate )
message ( STATUS "Submodule '${relative_path}' is up-to-date" )
else ( )
message ( FATAL_ERROR "Submodule '${relative_path}' is not up-to-date. Please update with\ngit submodule update --init --force ${relative_path}\nor run cmake with -DMANUAL_SUBMODULES=1" )
endif ( )
endfunction ( )
2018-10-04 09:07:00 -04:00
message ( STATUS "Checking submodules" )
2018-10-16 10:43:56 -04:00
check_submodule ( external/miniupnp )
check_submodule ( external/unbound )
check_submodule ( external/rapidjson )
2018-08-23 17:50:53 -04:00
check_submodule ( external/trezor-common )
2018-10-04 09:07:00 -04:00
endif ( )
endif ( )
2016-07-28 23:34:05 -04:00
set ( CMAKE_C_FLAGS_RELEASE "-DNDEBUG ${OPT_FLAGS_RELEASE}" )
set ( CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG ${OPT_FLAGS_RELEASE}" )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 16:09:32 -04:00
# set this to 0 if per-block checkpoint needs to be disabled
set ( PER_BLOCK_CHECKPOINT 1 )
if ( PER_BLOCK_CHECKPOINT )
add_definitions ( "-DPER_BLOCK_CHECKPOINT" )
2018-10-09 08:33:39 -04:00
set ( Blocks "blocks" )
else ( )
set ( Blocks "" )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 16:09:32 -04:00
endif ( )
2014-10-22 15:17:52 -04:00
list ( INSERT CMAKE_MODULE_PATH 0
" $ { C M A K E _ S O U R C E _ D I R } / c m a k e " )
2014-09-17 18:37:32 -04:00
2014-09-22 06:09:46 -04:00
if ( NOT DEFINED ENV{DEVELOPER_LOCAL_TOOLS} )
2014-10-21 14:24:49 -04:00
message ( STATUS "Could not find DEVELOPER_LOCAL_TOOLS in env (not required)" )
2015-02-12 14:59:39 -05:00
set ( BOOST_IGNORE_SYSTEM_PATHS_DEFAULT OFF )
elseif ( "$ENV{DEVELOPER_LOCAL_TOOLS}" EQUAL 1 )
2014-10-21 14:24:49 -04:00
message ( STATUS "Found: env DEVELOPER_LOCAL_TOOLS = 1" )
2015-02-12 14:59:39 -05:00
set ( BOOST_IGNORE_SYSTEM_PATHS_DEFAULT ON )
2014-09-22 06:09:46 -04:00
else ( )
2014-10-21 14:24:49 -04:00
message ( STATUS "Found: env DEVELOPER_LOCAL_TOOLS = 0" )
2015-02-12 14:59:39 -05:00
set ( BOOST_IGNORE_SYSTEM_PATHS_DEFAULT OFF )
2014-09-22 06:09:46 -04:00
endif ( )
2014-09-29 14:13:15 -04:00
2015-02-12 14:59:39 -05:00
message ( STATUS "BOOST_IGNORE_SYSTEM_PATHS defaults to ${BOOST_IGNORE_SYSTEM_PATHS_DEFAULT}" )
option ( BOOST_IGNORE_SYSTEM_PATHS "Ignore boost system paths for local boost installation" ${ BOOST_IGNORE_SYSTEM_PATHS_DEFAULT } )
2014-09-22 06:09:46 -04:00
2015-04-01 13:00:45 -04:00
if ( NOT DEFINED ENV{DEVELOPER_LIBUNBOUND_OLD} )
message ( STATUS "Could not find DEVELOPER_LIBUNBOUND_OLD in env (not required)" )
elseif ( "$ENV{DEVELOPER_LIBUNBOUND_OLD}" EQUAL 1 )
message ( STATUS "Found: env DEVELOPER_LIBUNBOUND_OLD = 1, will use the work around" )
add_definitions ( -DDEVELOPER_LIBUNBOUND_OLD )
elseif ( "$ENV{DEVELOPER_LIBUNBOUND_OLD}" EQUAL 0 )
message ( STATUS "Found: env DEVELOPER_LIBUNBOUND_OLD = 0" )
else ( )
message ( STATUS "Found: env DEVELOPER_LIBUNBOUND_OLD with bad value. Will NOT use the work around" )
endif ( )
2014-03-03 17:07:58 -05:00
set_property ( GLOBAL PROPERTY USE_FOLDERS ON )
enable_testing ( )
2015-04-01 13:00:45 -04:00
option ( BUILD_DOCUMENTATION "Build the Doxygen documentation." ON )
2017-10-08 20:45:45 -04:00
option ( BUILD_TESTS "Build tests." OFF )
2015-04-01 13:00:45 -04:00
2015-03-02 10:10:16 -05:00
# Check whether we're on a 32-bit or 64-bit system
if ( CMAKE_SIZEOF_VOID_P EQUAL "8" )
2015-03-02 11:04:58 -05:00
set ( DEFAULT_BUILD_64 ON )
2015-03-02 10:10:16 -05:00
else ( )
2015-03-02 11:04:58 -05:00
set ( DEFAULT_BUILD_64 OFF )
endif ( )
2015-03-03 14:57:40 -05:00
option ( BUILD_64 "Build for 64-bit? 'OFF' builds for 32-bit." ${ DEFAULT_BUILD_64 } )
2015-03-02 11:04:58 -05:00
if ( BUILD_64 )
set ( ARCH_WIDTH "64" )
else ( )
set ( ARCH_WIDTH "32" )
2015-03-02 10:10:16 -05:00
endif ( )
2015-03-02 10:20:42 -05:00
message ( STATUS "Building for a ${ARCH_WIDTH}-bit system" )
2015-04-01 13:00:45 -04:00
2014-09-09 08:03:42 -04:00
# Check if we're on FreeBSD so we can exclude the local miniupnpc (it should be installed from ports instead)
# CMAKE_SYSTEM_NAME checks are commonly known, but specifically taken from libsdl's CMakeLists
2016-12-15 05:27:53 -05:00
if ( CMAKE_SYSTEM_NAME MATCHES "kFreeBSD.*|FreeBSD" )
2014-09-09 08:03:42 -04:00
set ( FREEBSD TRUE )
endif ( )
2016-12-15 05:27:53 -05:00
# Check if we're on DragonFly BSD. See the README.md for build instructions.
if ( CMAKE_SYSTEM_NAME MATCHES "DragonFly.*" )
set ( DRAGONFLY TRUE )
endif ( )
2016-01-21 13:34:02 -05:00
# Check if we're on OpenBSD. See the README.md for build instructions.
2016-01-21 13:18:26 -05:00
if ( CMAKE_SYSTEM_NAME MATCHES "kOpenBSD.*|OpenBSD.*" )
set ( OPENBSD TRUE )
endif ( )
# TODO: check bsdi, NetBSD, to see if they need the same FreeBSD changes
2015-12-13 23:54:39 -05:00
#
2014-09-09 08:03:42 -04:00
# elseif(CMAKE_SYSTEM_NAME MATCHES "kNetBSD.*|NetBSD.*")
# set(NETBSD TRUE)
# elseif(CMAKE_SYSTEM_NAME MATCHES ".*BSDI.*")
# set(BSDI TRUE)
2018-05-25 00:50:20 -04:00
include_directories ( external/rapidjson/include external/easylogging++ src contrib/epee/include external )
2014-03-03 17:07:58 -05:00
2014-04-30 13:52:21 -04:00
if ( APPLE )
2014-04-30 16:50:06 -04:00
include_directories ( SYSTEM /usr/include/malloc )
2016-12-23 02:54:34 -05:00
if ( POLICY CMP0042 )
cmake_policy ( SET CMP0042 NEW )
endif ( )
2014-04-30 13:52:21 -04:00
endif ( )
2014-08-06 12:27:16 -04:00
if ( MSVC OR MINGW )
set ( DEFAULT_STATIC true )
else ( )
set ( DEFAULT_STATIC false )
endif ( )
2014-10-21 14:20:26 -04:00
option ( STATIC "Link libraries statically" ${ DEFAULT_STATIC } )
2014-03-03 17:07:58 -05:00
2016-09-17 14:03:51 -04:00
# This is a CMake built-in switch that concerns internal libraries
if ( NOT DEFINED BUILD_SHARED_LIBS AND NOT STATIC AND CMAKE_BUILD_TYPE_LOWER STREQUAL "debug" )
2016-11-10 19:35:58 -05:00
set ( BUILD_SHARED_LIBS ON )
2016-09-17 14:03:51 -04:00
endif ( )
2016-11-10 19:35:58 -05:00
2016-09-17 14:03:51 -04:00
if ( BUILD_SHARED_LIBS )
2016-11-10 19:35:58 -05:00
message ( STATUS "Building internal libraries with position independent code" )
2017-01-26 10:07:23 -05:00
add_definitions ( "-DBUILD_SHARED_LIBS" )
2016-09-17 14:03:51 -04:00
else ( )
message ( STATUS "Building internal libraries as static" )
endif ( )
2017-12-22 16:48:31 -05:00
set ( PIC_FLAG "-fPIC" )
2016-09-17 14:03:51 -04:00
2014-11-17 18:09:21 -05:00
if ( MINGW )
2015-07-17 22:49:22 -04:00
string ( REGEX MATCH "^[^/]:/[^/]*" msys2_install_path "${CMAKE_C_COMPILER}" )
message ( STATUS "MSYS location: ${msys2_install_path}" )
2015-03-02 10:20:42 -05:00
set ( CMAKE_INCLUDE_PATH "${msys2_install_path}/mingw${ARCH_WIDTH}/include" )
2014-11-18 17:04:47 -05:00
# This is necessary because otherwise CMake will make Boost libraries -lfoo
# rather than a full path. Unfortunately, this makes the shared libraries get
# linked due to a bug in CMake which misses putting -static flags around the
# -lfoo arguments.
2015-07-17 22:49:22 -04:00
set ( DEFLIB ${ msys2_install_path } /mingw ${ ARCH_WIDTH } /lib )
list ( REMOVE_ITEM CMAKE_C_IMPLICIT_LINK_DIRECTORIES ${ DEFLIB } )
list ( REMOVE_ITEM CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES ${ DEFLIB } )
2014-11-17 18:09:21 -05:00
endif ( )
2014-10-21 14:24:49 -04:00
if ( STATIC )
if ( MSVC )
set ( CMAKE_FIND_LIBRARY_SUFFIXES .lib .dll.a .a ${ CMAKE_FIND_LIBRARY_SUFFIXES } )
else ( )
set ( CMAKE_FIND_LIBRARY_SUFFIXES .a ${ CMAKE_FIND_LIBRARY_SUFFIXES } )
endif ( )
2017-09-19 19:47:24 -04:00
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DZMQ_STATIC" )
2014-10-21 14:24:49 -04:00
endif ( )
2014-10-06 12:46:18 -04:00
2017-12-10 06:25:12 -05:00
if ( SANITIZE )
if ( MSVC )
message ( FATAL_ERROR "Cannot sanitize with MSVC" )
else ( )
message ( STATUS "Using ASAN" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address" )
endif ( )
endif ( )
2017-03-08 08:29:08 -05:00
# Set default blockchain storage location:
2018-01-19 02:54:14 -05:00
# memory was the default in Cryptonote before Monero implemented LMDB, it still works but is unnecessary.
2015-01-26 00:36:09 -05:00
# set(DATABASE memory)
set ( DATABASE lmdb )
if ( DEFINED ENV{DATABASE} )
set ( DATABASE $ENV{ DATABASE } )
message ( STATUS "DATABASE set: ${DATABASE}" )
else ( )
message ( STATUS "Could not find DATABASE in env (not required unless you want to change database type from default: ${DATABASE})" )
endif ( )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 16:09:32 -04:00
2016-02-01 06:11:13 -05:00
set ( BERKELEY_DB_OVERRIDE 0 )
if ( DEFINED ENV{BERKELEY_DB} )
set ( BERKELEY_DB_OVERRIDE 1 )
set ( BERKELEY_DB $ENV{ BERKELEY_DB } )
elseif ( )
set ( BERKELEY_DB 0 )
endif ( )
2015-01-26 00:36:09 -05:00
if ( DATABASE STREQUAL "lmdb" )
2016-03-12 14:20:00 -05:00
message ( STATUS "Using LMDB as default DB type" )
2015-01-26 00:36:09 -05:00
set ( BLOCKCHAIN_DB DB_LMDB )
2016-03-12 14:20:00 -05:00
add_definitions ( "-DDEFAULT_DB_TYPE=\" lmdb\ "" )
elseif ( DATABASE STREQUAL "berkeleydb" )
find_package ( BerkeleyDB )
if ( NOT BERKELEY_DB )
die ( "Found BerkeleyDB includes, but could not find BerkeleyDB library. Please make sure you have installed libdb and libdb-dev / libdb++-dev or the equivalent." )
else ( )
message ( STATUS "Found BerkeleyDB include (db.h) in ${BERKELEY_DB_INCLUDE_DIR}" )
if ( BERKELEY_DB_LIBRARIES )
message ( STATUS "Found BerkeleyDB shared library" )
set ( BDB_STATIC false CACHE BOOL "BDB Static flag" )
set ( BDB_INCLUDE ${ BERKELEY_DB_INCLUDE_DIR } CACHE STRING "BDB include path" )
set ( BDB_LIBRARY ${ BERKELEY_DB_LIBRARIES } CACHE STRING "BDB library name" )
set ( BDB_LIBRARY_DIRS "" CACHE STRING "BDB Library dirs" )
set ( BERKELEY_DB 1 )
2015-12-13 23:54:39 -05:00
else ( )
2016-03-12 14:20:00 -05:00
die ( "Found BerkeleyDB includes, but could not find BerkeleyDB library. Please make sure you have installed libdb and libdb-dev / libdb++-dev or the equivalent." )
2015-12-13 23:54:39 -05:00
endif ( )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 16:09:32 -04:00
endif ( )
2015-10-04 14:01:33 -04:00
2016-03-12 14:20:00 -05:00
message ( STATUS "Using Berkeley DB as default DB type" )
add_definitions ( "-DDEFAULT_DB_TYPE=\" berkeley\ "" )
2015-01-26 00:36:09 -05:00
else ( )
die ( "Invalid database type: ${DATABASE}" )
endif ( )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 16:09:32 -04:00
if ( BERKELEY_DB )
add_definitions ( "-DBERKELEY_DB" )
endif ( )
2015-12-13 23:54:39 -05:00
2015-01-26 00:36:09 -05:00
add_definitions ( "-DBLOCKCHAIN_DB=${BLOCKCHAIN_DB}" )
2016-07-27 01:02:55 -04:00
# Can't install hook in static build on OSX, because OSX linker does not support --wrap
cmake: ARM: exclude libunwind in static build
Else error in build with STATIC=ON:
cd /home/redfish/bitmonero/build/release/src/miner && /usr/bin/cmake -E
cmake_link_script CMakeFiles/simpleminer.dir/link.txt --verbose=1
/usr/bin/c++ -std=c++11 -D_GNU_SOURCE -Wall -Wextra -Wpointer-arith
-Wundef -Wvla -Wwrite-strings -Wno-error=extra
-Wno-error=deprecated-declarations -Wno-unused-parameter
-Wno-unused-variable -Wno-error=unused-variable -Wno-error=undef
-Wno-error=uninitialized -Wlogical-op -Wno-error=maybe-uninitialized
-Wno-reorder -Wno-missing-field-initializers -march=armv7-a
-fno-strict-aliasing -mfloat-abi=hard -DNDEBUG -O2 -flto
-ffat-lto-objects -static-libgcc -static-libstdc++
-Wl,--wrap=__cxa_throw CMakeFiles/simpleminer.dir/simpleminer.cpp.o -o
../../bin/simpleminer -rdynamic -Wl,-Bstatic -lrt -Wl,-Bdynamic -ldl
../cryptonote_core/libcryptonote_core.a ../common/libcommon.a
-Wl,-Bstatic -lboost_filesystem -lboost_program_options -lboost_regex
-lboost_chrono -lboost_system -lboost_thread -Wl,-Bdynamic -pthread
-Wl,-Bstatic -lrt -Wl,-Bdynamic -ldl ../blockchain_db/libblockchain_db.a
../cryptonote_core/libcryptonote_core.a
../blockchain_db/libblockchain_db.a
../../contrib/otshell_utils/libotshell_utils.a ../blocks/libblocks.a
../common/libcommon.a ../../external/unbound/libunbound.a -lssl -lcrypto
-lunwind -Wl,-Bstatic -lboost_program_options ../crypto/libcrypto.a
-lboost_date_time -lboost_serialization -lboost_filesystem
../../external/db_drivers/liblmdb/liblmdb.a -Wl,-Bdynamic -pthread
-Wl,-Bstatic -lboost_chrono -lboost_system -lboost_thread -lrt
-Wl,-Bdynamic -ldl
/usr/bin/ld: ../../bin/simpleminer: hidden symbol
`__aeabi_unwind_cpp_pr0' in
/usr/lib/gcc/armv7l-unknown-linux-gnueabihf/6.1.1/libgcc_eh.a(unwind-arm.o)
is referenced by DSO
/usr/bin/ld: final link failed: Bad value
collect2: error: ld returned 1 exit status
2016-08-28 07:12:36 -04:00
# On ARM, having libunwind package (with .so's only) installed breaks static link.
2017-10-07 12:13:31 -04:00
# When possible, avoid stack tracing using libunwind in favor of using easylogging++.
2017-02-04 07:33:03 -05:00
if ( APPLE )
set ( DEFAULT_STACK_TRACE OFF )
set ( LIBUNWIND_LIBRARIES "" )
2018-03-19 19:35:47 -04:00
elseif ( DEPENDS AND NOT LINUX )
set ( DEFAULT_STACK_TRACE OFF )
set ( LIBUNWIND_LIBRARIES "" )
2017-02-04 07:33:03 -05:00
elseif ( CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT MINGW )
set ( DEFAULT_STACK_TRACE ON )
2017-10-07 12:13:31 -04:00
set ( STACK_TRACE_LIB "easylogging++" ) # for diag output only
2017-02-04 07:33:03 -05:00
set ( LIBUNWIND_LIBRARIES "" )
elseif ( ARM AND STATIC )
2016-07-27 01:02:55 -04:00
set ( DEFAULT_STACK_TRACE OFF )
2016-06-20 14:20:14 -04:00
set ( LIBUNWIND_LIBRARIES "" )
2017-01-09 08:38:54 -05:00
else ( )
find_package ( Libunwind )
2017-02-04 07:33:03 -05:00
if ( LIBUNWIND_FOUND )
set ( DEFAULT_STACK_TRACE ON )
2017-10-07 12:13:31 -04:00
set ( STACK_TRACE_LIB "libunwind" ) # for diag output only
2017-02-04 07:33:03 -05:00
else ( )
set ( DEFAULT_STACK_TRACE OFF )
set ( LIBUNWIND_LIBRARIES "" )
endif ( )
2016-03-28 14:00:18 -04:00
endif ( )
2016-07-27 01:02:55 -04:00
option ( STACK_TRACE "Install a hook that dumps stack on exception" ${ DEFAULT_STACK_TRACE } )
if ( STACK_TRACE )
2017-10-07 12:13:31 -04:00
message ( STATUS "Stack trace on exception enabled (using ${STACK_TRACE_LIB})" )
2016-07-27 01:02:55 -04:00
else ( )
message ( STATUS "Stack trace on exception disabled" )
endif ( )
2017-09-19 20:38:43 -04:00
if ( UNIX AND NOT APPLE )
# Note that at the time of this writing the -Wstrict-prototypes flag added below will make this fail
set ( THREADS_PREFER_PTHREAD_FLAG ON )
find_package ( Threads )
2018-10-13 14:11:09 -04:00
add_c_flag_if_supported ( -pthread CMAKE_C_FLAGS )
add_cxx_flag_if_supported ( -pthread CMAKE_CXX_FLAGS )
2017-09-19 20:38:43 -04:00
endif ( )
2017-06-20 11:22:24 -04:00
# Handle OpenSSL, used for sha256sum on binary updates and light wallet ssl http
2017-10-10 09:05:28 -04:00
if ( CMAKE_SYSTEM_NAME MATCHES "(SunOS|Solaris)" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthreads" )
endif ( )
2017-04-02 06:19:25 -04:00
if ( APPLE AND NOT IOS )
2018-03-19 19:33:49 -04:00
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=x86-64 -fvisibility=default" )
2017-02-21 10:11:12 -05:00
if ( NOT OpenSSL_DIR )
EXECUTE_PROCESS ( COMMAND brew --prefix openssl
O U T P U T _ V A R I A B L E O P E N S S L _ R O O T _ D I R
O U T P U T _ S T R I P _ T R A I L I N G _ W H I T E S P A C E )
message ( STATUS "Using OpenSSL found at ${OPENSSL_ROOT_DIR}" )
endif ( )
endif ( )
find_package ( OpenSSL REQUIRED )
2017-08-05 08:28:50 -04:00
message ( STATUS "Using OpenSSL include dir at ${OPENSSL_INCLUDE_DIR}" )
include_directories ( ${ OPENSSL_INCLUDE_DIR } )
2017-04-02 06:19:25 -04:00
if ( STATIC AND NOT IOS )
2017-02-21 10:11:12 -05:00
if ( UNIX )
2017-09-19 20:38:43 -04:00
set ( OPENSSL_LIBRARIES "${OPENSSL_LIBRARIES};${CMAKE_DL_LIBS};${CMAKE_THREAD_LIBS_INIT}" )
2017-02-21 10:11:12 -05:00
endif ( )
endif ( )
2018-08-01 03:24:53 -04:00
find_package ( HIDAPI )
2018-02-20 11:01:27 -05:00
2018-01-28 03:57:51 -05:00
add_definition_if_library_exists ( c memset_s "string.h" HAVE_MEMSET_S )
add_definition_if_library_exists ( c explicit_bzero "strings.h" HAVE_EXPLICIT_BZERO )
add_definition_if_function_found ( strptime HAVE_STRPTIME )
2017-05-16 21:17:08 -04:00
add_definitions ( -DAUTO_INITIALIZE_EASYLOGGINGPP )
2017-12-15 12:44:01 -05:00
# Generate header for embedded translations
2018-09-10 16:18:18 -04:00
# Generate header for embedded translations, use target toolchain if depends, otherwise use the
# lrelease and lupdate binaries from the host
include ( ExternalProject )
ExternalProject_Add ( generate_translations_header
S O U R C E _ D I R " $ { C M A K E _ C U R R E N T _ S O U R C E _ D I R } / t r a n s l a t i o n s "
B I N A R Y _ D I R " $ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R } / t r a n s l a t i o n s "
S T A M P _ D I R $ { L R E L E A S E _ P A T H }
C M A K E _ A R G S - D L R E L E A S E _ P A T H = $ { L R E L E A S E _ P A T H }
I N S T A L L _ C O M M A N D c m a k e - E e c h o " " )
2017-12-15 12:44:01 -05:00
include_directories ( "${CMAKE_CURRENT_BINARY_DIR}/translations" )
2014-10-06 09:00:06 -04:00
add_subdirectory ( external )
# Final setup for libunbound
2014-09-24 14:38:24 -04:00
include_directories ( ${ UNBOUND_INCLUDE } )
2014-10-21 18:39:15 -04:00
link_directories ( ${ UNBOUND_LIBRARY_DIRS } )
2014-09-17 17:44:35 -04:00
2017-03-25 15:06:14 -04:00
# Final setup for easylogging++
include_directories ( ${ EASYLOGGING_INCLUDE } )
link_directories ( ${ EASYLOGGING_LIBRARY_DIRS } )
2014-12-01 14:15:50 -05:00
2015-01-18 18:30:31 -05:00
# Final setup for liblmdb
2014-12-06 10:47:49 -05:00
include_directories ( ${ LMDB_INCLUDE } )
2014-10-23 15:35:49 -04:00
2015-03-16 03:12:54 -04:00
# Final setup for Berkeley DB
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 16:09:32 -04:00
if ( BERKELEY_DB )
2015-04-07 14:27:37 -04:00
include_directories ( ${ BDB_INCLUDE } )
endif ( )
2015-03-16 03:12:54 -04:00
2016-03-28 14:00:18 -04:00
# Final setup for libunwind
include_directories ( ${ LIBUNWIND_INCLUDE } )
link_directories ( ${ LIBUNWIND_LIBRARY_DIRS } )
2018-08-01 03:24:53 -04:00
# Final setup for hid
if ( HIDAPI_FOUND )
message ( STATUS "Using HIDAPI include dir at ${HIDAPI_INCLUDE_DIR}" )
add_definitions ( -DHAVE_HIDAPI )
include_directories ( ${ HIDAPI_INCLUDE_DIR } )
link_directories ( ${ LIBHIDAPI_LIBRARY_DIRS } )
else ( HIDAPI_FOUND )
message ( STATUS "Could not find HIDAPI" )
2018-02-20 11:01:27 -05:00
endif ( )
2018-11-04 18:38:58 -05:00
# Trezor support check
include ( CheckTrezor )
2018-08-23 17:50:53 -04:00
2014-03-03 17:07:58 -05:00
if ( MSVC )
add_definitions ( "/bigobj /MP /W3 /GS- /D_CRT_SECURE_NO_WARNINGS /wd4996 /wd4345 /D_WIN32_WINNT=0x0600 /DWIN32_LEAN_AND_MEAN /DGTEST_HAS_TR1_TUPLE=0 /FIinline_c.h /D__SSE4_1__" )
2016-07-28 21:19:01 -04:00
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Dinline=__inline")
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:10485760" )
2014-03-03 17:07:58 -05:00
if ( STATIC )
foreach ( VAR CMAKE_C_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELEASE )
string ( REPLACE "/MD" "/MT" ${ VAR } "${${VAR}}" )
endforeach ( )
endif ( )
include_directories ( SYSTEM src/platform/msc )
else ( )
2016-09-17 22:23:15 -04:00
include ( TestCXXAcceptsFlag )
2018-10-13 05:46:19 -04:00
if ( NOT ARCH )
set ( ARCH native CACHE STRING "CPU to build for: -march value or 'default' to not pass -march at all" )
2018-03-19 19:33:49 -04:00
endif ( )
2016-08-28 01:37:34 -04:00
message ( STATUS "Building on ${CMAKE_SYSTEM_PROCESSOR} for ${ARCH}" )
if ( ARCH STREQUAL "default" )
2014-03-03 17:07:58 -05:00
set ( ARCH_FLAG "" )
2017-06-27 13:06:01 -04:00
elseif ( PPC64LE )
2018-05-19 18:03:59 -04:00
set ( ARCH_FLAG "-mcpu=power8" )
elseif ( PPC64 )
set ( ARCH_FLAG "-mcpu=970" )
elseif ( PPC )
set ( ARCH_FLAG "-mcpu=7400" )
2017-06-23 04:02:07 -04:00
elseif ( IOS AND ARCH STREQUAL "arm64" )
message ( STATUS "IOS: Changing arch from arm64 to armv8" )
set ( ARCH_FLAG "-march=armv8" )
2014-03-03 17:07:58 -05:00
else ( )
2016-08-28 01:42:59 -04:00
set ( ARCH_FLAG "-march=${ARCH}" )
2018-09-02 10:51:23 -04:00
if ( ARCH STREQUAL "native" )
check_c_compiler_flag ( -march=native CC_SUPPORTS_MARCH_NATIVE )
if ( NOT CC_SUPPORTS_MARCH_NATIVE )
check_c_compiler_flag ( -mtune=native CC_SUPPORTS_MTUNE_NATIVE )
if ( CC_SUPPORTS_MTUNE_NATIVE )
set ( ARCH_FLAG "-mtune=${ARCH}" )
else ( )
set ( ARCH_FLAG "" )
endif ( )
endif ( )
endif ( )
2014-03-03 17:07:58 -05:00
endif ( )
2018-09-29 16:16:19 -04:00
option ( NO_AES "Explicitly disable AES support" ${ NO_AES } )
if ( NO_AES )
message ( STATUS "AES support explicitly disabled" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DNO_AES" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNO_AES" )
elseif ( NOT ARM AND NOT PPC64LE AND NOT PPC64 AND NOT PPC AND NOT S390X )
message ( STATUS "AES support enabled" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maes" )
elseif ( PPC64LE OR PPC64 OR PPC )
message ( STATUS "AES support not available on POWER" )
elseif ( S390X )
message ( STATUS "AES support not available on s390x" )
elseif ( ARM6 )
message ( STATUS "AES support not available on ARMv6" )
elseif ( ARM7 )
message ( STATUS "AES support not available on ARMv7" )
elseif ( ARM8 )
CHECK_CXX_ACCEPTS_FLAG ( "-march=${ARCH}+crypto" ARCH_PLUS_CRYPTO )
if ( ARCH_PLUS_CRYPTO )
message ( STATUS "Crypto extensions enabled for ARMv8" )
set ( ARCH_FLAG "-march=${ARCH}+crypto" )
else ( )
message ( STATUS "Crypto extensions unavailable on your ARMv8 device" )
endif ( )
else ( )
message ( STATUS "AES support disabled" )
endif ( )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ARCH_FLAG}" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARCH_FLAG}" )
2016-07-28 23:34:05 -04:00
set ( WARNINGS "-Wall -Wextra -Wpointer-arith -Wundef -Wvla -Wwrite-strings -Wno-error=extra -Wno-error=deprecated-declarations -Wno-unused-parameter -Wno-unused-variable -Wno-error=unused-variable -Wno-error=undef -Wno-error=uninitialized" )
2014-03-03 17:07:58 -05:00
if ( CMAKE_C_COMPILER_ID STREQUAL "Clang" )
2016-08-28 07:28:05 -04:00
if ( ARM )
2016-07-10 21:11:18 -04:00
set ( WARNINGS "${WARNINGS} -Wno-error=inline-asm" )
endif ( )
2014-03-03 17:07:58 -05:00
else ( )
cmake: make warning about headers not fatal
Warning issued on older boost and/or OS:
In file included from /usr/include/boost/asio/detail/socket_types.hpp:61:0,
from /usr/include/boost/asio/detail/epoll_reactor.hpp:30,
from /usr/include/boost/asio/detail/reactor.hpp:21,
from /usr/include/boost/asio/detail/impl/task_io_service.ipp:24,
from /usr/include/boost/asio/detail/task_io_service.hpp:198,
from /usr/include/boost/asio/impl/io_service.hpp:71,
from /usr/include/boost/asio/io_service.hpp:767,
from /usr/include/boost/asio/basic_io_object.hpp:19,
from /usr/include/boost/asio/basic_socket.hpp:20,
from /usr/include/boost/asio/basic_datagram_socket.hpp:20,
from /usr/include/boost/asio.hpp:21,
from /home/vagrant/slave/monero-static-alpine-3_5-x86_64/build/src/common/download.cpp:32:
/usr/include/sys/poll.h:1:2: warning: #warning redirecting incorrect #include <sys/poll.h> to <poll.h> [-Wcpp]
#warning redirecting incorrect #include <sys/poll.h> to <poll.h>
2017-09-08 22:20:44 -04:00
set ( WARNINGS "${WARNINGS} -Wlogical-op -Wno-error=maybe-uninitialized -Wno-error=cpp" )
2014-03-03 17:07:58 -05:00
endif ( )
if ( MINGW )
2014-08-06 12:32:59 -04:00
set ( WARNINGS "${WARNINGS} -Wno-error=unused-value -Wno-error=unused-but-set-variable" )
2016-06-21 12:16:25 -04:00
set ( MINGW_FLAG "${MINGW_FLAG} -DWIN32_LEAN_AND_MEAN" )
2014-08-06 12:43:01 -04:00
set ( Boost_THREADAPI win32 )
2014-03-03 17:07:58 -05:00
include_directories ( SYSTEM src/platform/mingw )
2014-11-17 18:10:52 -05:00
# mingw doesn't support LTO (multiple definition errors at link time)
set ( USE_LTO_DEFAULT false )
2016-07-28 21:19:01 -04:00
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--stack,10485760" )
2016-01-02 14:39:03 -05:00
if ( NOT BUILD_64 )
add_definitions ( -DWINVER=0x0501 -D_WIN32_WINNT=0x0501 )
endif ( )
2014-03-03 17:07:58 -05:00
endif ( )
set ( C_WARNINGS "-Waggregate-return -Wnested-externs -Wold-style-definition -Wstrict-prototypes" )
set ( CXX_WARNINGS "-Wno-reorder -Wno-missing-field-initializers" )
2015-05-31 09:39:56 -04:00
try_compile ( STATIC_ASSERT_RES "${CMAKE_CURRENT_BINARY_DIR}/static-assert" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/test-static-assert.c" COMPILE_DEFINITIONS "-std=c11" )
2014-03-03 17:07:58 -05:00
if ( STATIC_ASSERT_RES )
set ( STATIC_ASSERT_FLAG "" )
else ( )
set ( STATIC_ASSERT_FLAG "-Dstatic_assert=_Static_assert" )
endif ( )
2015-01-26 16:19:53 -05:00
2017-01-04 20:11:05 -05:00
try_compile ( STATIC_ASSERT_CPP_RES "${CMAKE_CURRENT_BINARY_DIR}/static-assert" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/test-static-assert.cpp" COMPILE_DEFINITIONS "-std=c++11" )
if ( STATIC_ASSERT_CPP_RES )
set ( STATIC_ASSERT_CPP_FLAG "" )
else ( )
set ( STATIC_ASSERT_CPP_FLAG "-Dstatic_assert=_Static_assert" )
endif ( )
2016-08-29 14:23:57 -04:00
option ( COVERAGE "Enable profiling for test coverage report" 0 )
if ( COVERAGE )
message ( STATUS "Building with profiling for test coverage report" )
set ( COVERAGE_FLAGS "-fprofile-arcs -ftest-coverage --coverage" )
endif ( )
2016-07-04 21:36:55 -04:00
# With GCC 6.1.1 the compiled binary malfunctions due to aliasing. Until that
# is fixed in the code (Issue #847), force compiler to be conservative.
2016-07-28 21:19:01 -04:00
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing" )
2016-07-04 21:36:55 -04:00
2017-12-22 16:48:31 -05:00
# if those don't work for your compiler, single it out where appropriate
if ( CMAKE_BUILD_TYPE STREQUAL "Release" )
set ( C_SECURITY_FLAGS "${C_SECURITY_FLAGS} -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" )
set ( CXX_SECURITY_FLAGS "${CXX_SECURITY_FLAGS} -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" )
endif ( )
# warnings
add_c_flag_if_supported ( -Wformat C_SECURITY_FLAGS )
add_cxx_flag_if_supported ( -Wformat CXX_SECURITY_FLAGS )
add_c_flag_if_supported ( -Wformat-security C_SECURITY_FLAGS )
add_cxx_flag_if_supported ( -Wformat-security CXX_SECURITY_FLAGS )
# -fstack-protector
2018-08-21 18:40:03 -04:00
if ( NOT WIN32 )
add_c_flag_if_supported ( -fstack-protector C_SECURITY_FLAGS )
add_cxx_flag_if_supported ( -fstack-protector CXX_SECURITY_FLAGS )
add_c_flag_if_supported ( -fstack-protector-strong C_SECURITY_FLAGS )
add_cxx_flag_if_supported ( -fstack-protector-strong CXX_SECURITY_FLAGS )
endif ( )
2017-12-22 16:48:31 -05:00
2018-07-26 09:07:04 -04:00
# New in GCC 8.2
if ( NOT WIN32 )
add_c_flag_if_supported ( -fcf-protection=full C_SECURITY_FLAGS )
add_cxx_flag_if_supported ( -fcf-protection=full CXX_SECURITY_FLAGS )
add_c_flag_if_supported ( -fstack-clash-protection C_SECURITY_FLAGS )
add_cxx_flag_if_supported ( -fstack-clash-protection CXX_SECURITY_FLAGS )
endif ( )
2018-08-07 09:05:52 -04:00
add_c_flag_if_supported ( -mmitigate-rop C_SECURITY_FLAGS )
add_cxx_flag_if_supported ( -mmitigate-rop CXX_SECURITY_FLAGS )
2017-12-22 16:48:31 -05:00
# linker
if ( NOT WIN32 )
# Windows binaries die on startup with PIE
add_linker_flag_if_supported ( -pie LD_SECURITY_FLAGS )
endif ( )
add_linker_flag_if_supported ( -Wl,-z,relro LD_SECURITY_FLAGS )
add_linker_flag_if_supported ( -Wl,-z,now LD_SECURITY_FLAGS )
add_linker_flag_if_supported ( -Wl,-z,noexecstack noexecstack_SUPPORTED )
if ( noexecstack_SUPPORTED )
set ( LD_SECURITY_FLAGS "${LD_SECURITY_FLAGS} -Wl,-z,noexecstack" )
endif ( )
add_linker_flag_if_supported ( -Wl,-z,noexecheap noexecheap_SUPPORTED )
if ( noexecheap_SUPPORTED )
set ( LD_SECURITY_FLAGS "${LD_SECURITY_FLAGS} -Wl,-z,noexecheap" )
endif ( )
2018-10-13 11:07:56 -04:00
if ( BACKCOMPAT )
add_linker_flag_if_supported ( -Wl,--wrap=__divmoddi4 LD_BACKCOMPAT_FLAGS )
add_linker_flag_if_supported ( -Wl,--wrap=glob LD_BACKCOMPAT_FLAGS )
message ( STATUS "Using Lib C back compat flags: ${LD_BACKCOMPAT_FLAGS}" )
endif ( )
2017-12-22 16:48:31 -05:00
# some windows linker bits
if ( WIN32 )
add_linker_flag_if_supported ( -Wl,--dynamicbase LD_SECURITY_FLAGS )
add_linker_flag_if_supported ( -Wl,--nxcompat LD_SECURITY_FLAGS )
endif ( )
message ( STATUS "Using C security hardening flags: ${C_SECURITY_FLAGS}" )
message ( STATUS "Using C++ security hardening flags: ${CXX_SECURITY_FLAGS}" )
message ( STATUS "Using linker security hardening flags: ${LD_SECURITY_FLAGS}" )
2018-09-29 16:16:19 -04:00
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -D_GNU_SOURCE ${MINGW_FLAG} ${STATIC_ASSERT_FLAG} ${WARNINGS} ${C_WARNINGS} ${COVERAGE_FLAGS} ${PIC_FLAG} ${C_SECURITY_FLAGS}" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -D_GNU_SOURCE ${MINGW_FLAG} ${STATIC_ASSERT_CPP_FLAG} ${WARNINGS} ${CXX_WARNINGS} ${COVERAGE_FLAGS} ${PIC_FLAG} ${CXX_SECURITY_FLAGS}" )
2018-10-13 11:07:56 -04:00
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${LD_SECURITY_FLAGS} ${LD_BACKCOMPAT_FLAGS}" )
2016-09-17 22:23:15 -04:00
# With GCC 6.1.1 the compiled binary malfunctions due to aliasing. Until that
# is fixed in the code (Issue #847), force compiler to be conservative.
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing" )
2016-08-29 06:13:55 -04:00
if ( ARM )
message ( STATUS "Setting FPU Flags for ARM Processors" )
#NB NEON hardware does not fully implement the IEEE 754 standard for floating-point arithmetic
#Need custom assembly code to take full advantage of NEON SIMD
#Cortex-A5/9 -mfpu=neon-fp16
#Cortex-A7/15 -mfpu=neon-vfpv4
#Cortex-A8 -mfpu=neon
2016-09-17 22:23:15 -04:00
#ARMv8 -FP and SIMD on by default for all ARM8v-A series, NO -mfpu setting needed
2016-08-29 06:13:55 -04:00
#For custom -mtune, processor IDs for ARMv8-A series:
#0xd04 - Cortex-A35
#0xd07 - Cortex-A57
#0xd08 - Cortex-A72
#0xd03 - Cortex-A73
if ( NOT ARM8 )
CHECK_CXX_ACCEPTS_FLAG ( -mfpu=vfp3-d16 CXX_ACCEPTS_VFP3_D16 )
CHECK_CXX_ACCEPTS_FLAG ( -mfpu=vfp4 CXX_ACCEPTS_VFP4 )
CHECK_CXX_ACCEPTS_FLAG ( -mfloat-abi=hard CXX_ACCEPTS_MFLOAT_HARD )
CHECK_CXX_ACCEPTS_FLAG ( -mfloat-abi=softfp CXX_ACCEPTS_MFLOAT_SOFTFP )
endif ( )
2015-04-06 08:00:09 -04:00
2016-08-29 06:13:55 -04:00
if ( ARM8 )
CHECK_CXX_ACCEPTS_FLAG ( -mfix-cortex-a53-835769 CXX_ACCEPTS_MFIX_CORTEX_A53_835769 )
CHECK_CXX_ACCEPTS_FLAG ( -mfix-cortex-a53-843419 CXX_ACCEPTS_MFIX_CORTEX_A53_843419 )
endif ( )
if ( ARM6 )
message ( STATUS "Selecting VFP for ARMv6" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=vfp" )
2018-03-19 19:33:49 -04:00
if ( DEPENDS )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -marm" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -marm" )
endif ( )
2016-08-29 06:13:55 -04:00
endif ( ARM6 )
if ( ARM7 )
if ( CXX_ACCEPTS_VFP3_D16 AND NOT CXX_ACCEPTS_VFP4 )
message ( STATUS "Selecting VFP3 for ARMv7" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp3-d16" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=vfp3-d16" )
endif ( )
if ( CXX_ACCEPTS_VFP4 )
message ( STATUS "Selecting VFP4 for ARMv7" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp4" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=vfp4" )
endif ( )
if ( CXX_ACCEPTS_MFLOAT_HARD )
message ( STATUS "Setting Hardware ABI for Floating Point" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfloat-abi=hard" )
endif ( )
if ( CXX_ACCEPTS_MFLOAT_SOFTFP AND NOT CXX_ACCEPTS_MFLOAT_HARD )
message ( STATUS "Setting Software ABI for Floating Point" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfloat-abi=softfp" )
endif ( )
endif ( ARM7 )
if ( ARM8 )
if ( CXX_ACCEPTS_MFIX_CORTEX_A53_835769 )
message ( STATUS "Enabling Cortex-A53 workaround 835769" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfix-cortex-a53-835769" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfix-cortex-a53-835769" )
endif ( )
if ( CXX_ACCEPTS_MFIX_CORTEX_A53_843419 )
message ( STATUS "Enabling Cortex-A53 workaround 843419" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfix-cortex-a53-843419" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfix-cortex-a53-843419" )
endif ( )
endif ( ARM8 )
endif ( ARM )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 16:09:32 -04:00
2017-04-02 06:19:25 -04:00
if ( ANDROID AND NOT BUILD_GUI_DEPS STREQUAL "ON" OR IOS )
2017-01-04 20:11:05 -05:00
#From Android 5: "only position independent executables (PIE) are supported"
2017-04-02 06:19:25 -04:00
message ( STATUS "Enabling PIE executable" )
2017-12-22 16:48:31 -05:00
set ( PIC_FLAG "" )
2017-01-04 20:11:05 -05:00
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIE" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE" )
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -pie" )
endif ( )
2014-04-30 16:50:06 -04:00
if ( APPLE )
2018-09-29 16:14:28 -04:00
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default -DGTEST_HAS_TR1_TUPLE=0" )
2014-04-30 16:50:06 -04:00
endif ( )
2016-08-29 14:23:57 -04:00
set ( DEBUG_FLAGS "-g3" )
2014-03-03 17:07:58 -05:00
if ( CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT ( CMAKE_C_COMPILER_VERSION VERSION_LESS 4.8 ) )
2016-08-29 14:23:57 -04:00
set ( DEBUG_FLAGS "${DEBUG_FLAGS} -Og " )
2014-03-03 17:07:58 -05:00
else ( )
2016-08-29 14:23:57 -04:00
set ( DEBUG_FLAGS "${DEBUG_FLAGS} -O0 " )
2014-03-03 17:07:58 -05:00
endif ( )
2014-08-06 12:52:25 -04:00
2018-11-12 17:35:21 -05:00
# At least some CLANGs default to not enough for monero
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ftemplate-depth=900" )
2014-08-07 19:34:10 -04:00
if ( NOT DEFINED USE_LTO_DEFAULT )
2017-03-20 08:24:30 -04:00
set ( USE_LTO_DEFAULT false )
2014-08-07 19:34:10 -04:00
endif ( )
set ( USE_LTO ${ USE_LTO_DEFAULT } CACHE BOOL "Use Link-Time Optimization (Release mode only)" )
2014-09-15 16:47:26 -04:00
if ( CMAKE_CXX_COMPILER_ID STREQUAL "Clang" )
2015-05-26 05:07:58 -04:00
# There is a clang bug that does not allow to compile code that uses AES-NI intrinsics if -flto is enabled, so explicitly disable
2014-10-21 14:24:49 -04:00
set ( USE_LTO false )
2014-09-15 16:47:26 -04:00
endif ( )
2015-05-26 05:07:58 -04:00
2014-08-06 12:52:25 -04:00
if ( USE_LTO )
2014-05-22 07:00:48 -04:00
set ( RELEASE_FLAGS "${RELEASE_FLAGS} -flto" )
2014-08-06 12:52:25 -04:00
if ( STATIC )
set ( RELEASE_FLAGS "${RELEASE_FLAGS} -ffat-lto-objects" )
endif ( )
# Since gcc 4.9 the LTO format is non-standard (slim), so we need the gcc-specific ar and ranlib binaries
2016-12-15 05:27:53 -05:00
if ( CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT ( CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9.0 ) AND NOT OPENBSD AND NOT DRAGONFLY )
2016-09-16 23:59:46 -04:00
# When invoking cmake on distributions on which gcc's binaries are prefixed
# with an arch-specific triplet, the user must specify -DCHOST=<prefix>
2016-08-31 22:21:22 -04:00
if ( DEFINED CHOST )
2016-09-16 23:59:46 -04:00
set ( CMAKE_AR "${CHOST}-gcc-ar" )
set ( CMAKE_RANLIB "${CHOST}-gcc-ranlib" )
2016-09-10 16:56:40 -04:00
else ( )
set ( CMAKE_AR "gcc-ar" )
set ( CMAKE_RANLIB "gcc-ranlib" )
2016-08-31 22:21:22 -04:00
endif ( )
2014-08-06 12:52:25 -04:00
endif ( )
2014-05-22 07:00:48 -04:00
endif ( )
2014-08-06 12:52:25 -04:00
2014-03-03 17:07:58 -05:00
set ( CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${DEBUG_FLAGS}" )
set ( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${DEBUG_FLAGS}" )
set ( CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${RELEASE_FLAGS}" )
set ( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${RELEASE_FLAGS}" )
2016-07-25 21:06:12 -04:00
2016-08-27 18:47:07 -04:00
if ( STATIC )
# STATIC already configures most deps to be linked in statically,
# here we make more deps static if the platform permits it
if ( MINGW )
# On Windows, this is as close to fully-static as we get:
# this leaves only deps on /c/Windows/system32/*.dll
set ( STATIC_FLAGS "-static" )
2016-12-15 05:27:53 -05:00
elseif ( NOT ( APPLE OR FREEBSD OR OPENBSD OR DRAGONFLY ) )
2016-08-27 18:47:07 -04:00
# On Linux, we don't support fully static build, but these can be static
set ( STATIC_FLAGS "-static-libgcc -static-libstdc++" )
endif ( )
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${STATIC_FLAGS} " )
2016-07-25 22:30:59 -04:00
endif ( )
2014-03-03 17:07:58 -05:00
endif ( )
2015-01-05 14:30:17 -05:00
if ( ${ BOOST_IGNORE_SYSTEM_PATHS } STREQUAL "ON" )
2014-10-21 14:24:49 -04:00
set ( Boost_NO_SYSTEM_PATHS TRUE )
2014-09-22 06:30:53 -04:00
endif ( )
2015-03-22 05:26:30 -04:00
set ( OLD_LIB_SUFFIXES ${ CMAKE_FIND_LIBRARY_SUFFIXES } )
2014-03-03 17:07:58 -05:00
if ( STATIC )
2015-03-22 05:26:30 -04:00
if ( MINGW )
set ( CMAKE_FIND_LIBRARY_SUFFIXES .a )
endif ( )
2014-03-03 17:07:58 -05:00
set ( Boost_USE_STATIC_LIBS ON )
set ( Boost_USE_STATIC_RUNTIME ON )
endif ( )
2018-02-22 13:52:55 -05:00
find_package ( Boost 1.58 QUIET REQUIRED COMPONENTS system filesystem thread date_time chrono regex serialization program_options locale )
2014-09-29 14:13:15 -04:00
2015-03-22 05:26:30 -04:00
set ( CMAKE_FIND_LIBRARY_SUFFIXES ${ OLD_LIB_SUFFIXES } )
2014-09-29 14:13:15 -04:00
if ( NOT Boost_FOUND )
2016-08-16 08:59:15 -04:00
die ( "Could not find Boost libraries, please make sure you have installed Boost or libboost-all-dev (1.58) or the equivalent" )
2016-10-27 16:38:29 -04:00
elseif ( Boost_FOUND )
message ( STATUS "Found Boost Version: ${Boost_VERSION}" )
2017-12-26 07:09:54 -05:00
if ( Boost_VERSION VERSION_LESS 106200 AND NOT ( OPENSSL_VERSION VERSION_LESS 1.1 ) )
2017-10-15 22:41:32 -04:00
message ( FATAL_ERROR "Boost older than 1.62 is too old to link with OpenSSL 1.1 or newer. "
" U p d a t e B o o s t o r i n s t a l l O p e n S S L 1 . 0 a n d s e t p a t h t o i t w h e n r u n n i n g c m a k e : "
" c m a k e - D O P E N S S L _ R O O T _ D I R = ' / u s r / i n c l u d e / o p e n s s l - 1 . 0 ; / u s r / l i b / o p e n s s l - 1 . 0 ' " )
endif ( )
2014-03-03 17:07:58 -05:00
endif ( )
2014-09-29 14:13:15 -04:00
2014-03-03 17:07:58 -05:00
include_directories ( SYSTEM ${ Boost_INCLUDE_DIRS } )
if ( MINGW )
2018-05-31 05:40:00 -04:00
set ( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Wa,-mbig-obj" )
2018-10-12 11:20:42 -04:00
set ( EXTRA_LIBRARIES mswsock;ws2_32;iphlpapi;crypt32;bcrypt )
2018-03-19 19:33:49 -04:00
if ( DEPENDS )
2019-01-29 10:18:47 -05:00
set ( ICU_LIBRARIES icuio icui18n icuuc icudata icutu iconv )
2018-03-19 19:33:49 -04:00
else ( )
2018-11-14 10:18:31 -05:00
set ( ICU_LIBRARIES icuio icuin icuuc icudt icutu iconv )
2018-03-19 19:33:49 -04:00
endif ( )
2017-02-08 17:50:14 -05:00
elseif ( APPLE OR OPENBSD OR ANDROID )
2014-10-06 16:29:07 -04:00
set ( EXTRA_LIBRARIES "" )
2017-02-08 17:50:14 -05:00
elseif ( FREEBSD )
set ( EXTRA_LIBRARIES execinfo )
2016-12-15 05:27:53 -05:00
elseif ( DRAGONFLY )
find_library ( COMPAT compat )
2017-02-08 17:50:14 -05:00
set ( EXTRA_LIBRARIES execinfo ${ COMPAT } )
2017-10-10 09:05:28 -04:00
elseif ( CMAKE_SYSTEM_NAME MATCHES "(SunOS|Solaris)" )
set ( EXTRA_LIBRARIES socket nsl resolv )
2018-09-18 11:07:54 -04:00
elseif ( NOT MSVC AND NOT DEPENDS )
2014-10-06 16:29:07 -04:00
find_library ( RT rt )
2016-02-12 16:25:39 -05:00
set ( EXTRA_LIBRARIES ${ RT } )
2014-03-03 17:07:58 -05:00
endif ( )
2016-07-10 18:57:53 -04:00
list ( APPEND EXTRA_LIBRARIES ${ CMAKE_DL_LIBS } )
2016-03-28 14:00:18 -04:00
2018-11-04 18:38:58 -05:00
if ( HIDAPI_FOUND OR LibUSB_COMPILE_TEST_PASSED )
2018-08-01 03:24:53 -04:00
if ( APPLE )
2018-09-29 16:14:56 -04:00
if ( DEPENDS )
list ( APPEND EXTRA_LIBRARIES "-framework Foundation -framework IOKit" )
else ( )
find_library ( COREFOUNDATION CoreFoundation )
find_library ( IOKIT IOKit )
list ( APPEND EXTRA_LIBRARIES ${ IOKIT } )
list ( APPEND EXTRA_LIBRARIES ${ COREFOUNDATION } )
endif ( )
2018-08-01 03:24:53 -04:00
endif ( )
if ( WIN32 )
list ( APPEND EXTRA_LIBRARIES setupapi )
endif ( )
endif ( )
2017-05-29 18:39:49 -04:00
option ( USE_READLINE "Build with GNU readline support." ON )
if ( USE_READLINE )
find_package ( Readline )
if ( READLINE_FOUND AND GNU_READLINE_FOUND )
add_definitions ( -DHAVE_READLINE )
include_directories ( ${ Readline_INCLUDE_DIR } )
message ( STATUS "Found readline library at: ${Readline_ROOT_DIR}" )
2017-10-29 10:51:51 -04:00
set ( EPEE_READLINE epee_readline )
2017-05-29 18:39:49 -04:00
else ( )
message ( STATUS "Could not find GNU readline library so building without readline support" )
endif ( )
endif ( )
2017-01-20 23:15:00 -05:00
if ( ANDROID )
set ( ATOMIC libatomic.a )
2018-08-21 19:25:14 -04:00
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=user-defined-warnings" )
2017-01-20 23:15:00 -05:00
endif ( )
2017-11-11 08:29:42 -05:00
if ( CMAKE_C_COMPILER_ID STREQUAL "Clang" AND ARCH_WIDTH EQUAL "32" AND NOT IOS AND NOT FREEBSD )
2016-07-28 23:26:51 -04:00
find_library ( ATOMIC atomic )
2018-10-20 17:36:51 -04:00
if ( ATOMIC_FOUND )
list ( APPEND EXTRA_LIBRARIES ${ ATOMIC } )
endif ( )
2016-03-28 14:00:18 -04:00
endif ( )
2017-09-05 12:20:40 -04:00
find_path ( ZMQ_INCLUDE_PATH zmq.hpp )
2017-09-05 12:20:27 -04:00
find_library ( ZMQ_LIB zmq )
2017-10-20 16:33:07 -04:00
find_library ( PGM_LIBRARY pgm )
find_library ( NORM_LIBRARY norm )
2017-09-19 19:47:24 -04:00
find_library ( SODIUM_LIBRARY sodium )
2017-09-05 12:20:27 -04:00
2017-09-05 12:20:40 -04:00
if ( NOT ZMQ_INCLUDE_PATH )
message ( FATAL_ERROR "Could not find required header zmq.hpp" )
endif ( )
if ( NOT ZMQ_LIB )
2017-09-19 19:47:24 -04:00
message ( FATAL_ERROR "Could not find required libzmq" )
endif ( )
2017-10-20 16:33:07 -04:00
if ( PGM_LIBRARY )
set ( ZMQ_LIB "${ZMQ_LIB};${PGM_LIBRARY}" )
endif ( )
if ( NORM_LIBRARY )
set ( ZMQ_LIB "${ZMQ_LIB};${NORM_LIBRARY}" )
endif ( )
2017-09-19 19:47:24 -04:00
if ( SODIUM_LIBRARY )
set ( ZMQ_LIB "${ZMQ_LIB};${SODIUM_LIBRARY}" )
2017-09-05 12:20:40 -04:00
endif ( )
2017-10-08 20:45:45 -04:00
add_subdirectory ( contrib )
add_subdirectory ( src )
2016-05-27 03:42:31 -04:00
2014-12-01 13:00:22 -05:00
if ( BUILD_TESTS )
add_subdirectory ( tests )
2015-01-02 11:52:46 -05:00
endif ( )
2015-04-01 13:00:45 -04:00
if ( BUILD_DOCUMENTATION )
set ( DOC_GRAPHS "YES" CACHE STRING "Create dependency graphs (needs graphviz)" )
set ( DOC_FULLGRAPHS "NO" CACHE STRING "Create call/callee graphs (large)" )
find_program ( DOT_PATH dot )
if ( DOT_PATH STREQUAL "DOT_PATH-NOTFOUND" )
message ( "Doxygen: graphviz not found - graphs disabled" )
set ( DOC_GRAPHS "NO" )
endif ( )
find_package ( Doxygen )
if ( DOXYGEN_FOUND )
configure_file ( "cmake/Doxyfile.in" "Doxyfile" @ONLY )
configure_file ( "cmake/Doxygen.extra.css.in" "Doxygen.extra.css" @ONLY )
add_custom_target ( doc
$ { D O X Y G E N _ E X E C U T A B L E } $ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R } / D o x y f i l e
W O R K I N G _ D I R E C T O R Y $ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R }
C O M M E N T " G e n e r a t i n g A P I d o c u m e n t a t i o n w i t h D o x y g e n . . " V E R B A T I M )
endif ( )
endif ( )
2016-09-03 08:11:26 -04:00
# when ON - will install libwallet_merged into "lib"
2016-09-03 06:32:06 -04:00
option ( BUILD_GUI_DEPS "Build GUI dependencies." OFF )
2016-09-03 08:11:26 -04:00
# This is not nice, distribution packagers should not enable this, but depend
# on libunbound shipped with their distribution instead
option ( INSTALL_VENDORED_LIBUNBOUND "Install libunbound binary built from source vendored with this repo." OFF )
2016-09-03 06:32:06 -04:00
2017-10-26 05:16:48 -04:00
CHECK_C_COMPILER_FLAG ( -std=c11 HAVE_C11 )