Eliminate dependence on boost::interprocess #8223

In this repo, `boost::interprocess` was being used soley to make `uint32_t` operations atomic. So I replaced each instance of
`boost::interprocess::ipcdetail::atomic(...)32` with `std::atomic` methods. I replaced member declarations as applicable. For example,
when I needed to change a `volatile uint32_t` into a `std::atomic<uint32_t>`. Sometimes, a member was being used a boolean flag, so
I replaced it with `std::atomic<bool>`.

You may notice that I didn't touch `levin_client_async.h`. That is because this file is entirely unused and will be deleted in PR monero-project#8211.

Additional changes from review:
* Make some local variables const
* Change postfix operators to prefix operators where value was not need
This commit is contained in:
Jeffrey 2022-03-30 13:18:32 -05:00
parent 70ceab6c10
commit 17772ef53e
10 changed files with 43 additions and 50 deletions

View file

@ -28,7 +28,6 @@
#include <boost/asio/deadline_timer.hpp>
#include <boost/uuid/uuid_generators.hpp>
#include <boost/unordered_map.hpp>
#include <boost/interprocess/detail/atomic.hpp>
#include <boost/smart_ptr/make_shared.hpp>
#include <atomic>
@ -166,7 +165,7 @@ public:
};
std::atomic<bool> m_protocol_released;
volatile uint32_t m_invoke_buf_ready;
std::atomic<bool> m_invoke_buf_ready;
volatile int m_invoke_result_code;
@ -175,8 +174,8 @@ public:
critical_section m_call_lock;
volatile uint32_t m_wait_count;
volatile uint32_t m_close_called;
std::atomic<uint32_t> m_wait_count;
std::atomic<uint32_t> m_close_called;
bucket_head2 m_current_head;
net_utils::i_service_endpoint* m_pservice_endpoint;
config_type& m_config;
@ -319,7 +318,7 @@ public:
m_wait_count = 0;
m_oponent_protocol_ver = 0;
m_connection_initialized = false;
m_invoke_buf_ready = 0;
m_invoke_buf_ready = false;
m_invoke_result_code = LEVIN_ERROR_CONNECTION;
}
virtual ~async_protocol_handler()
@ -332,11 +331,11 @@ public:
m_config.del_connection(this);
}
for (size_t i = 0; i < 60 * 1000 / 100 && 0 != boost::interprocess::ipcdetail::atomic_read32(&m_wait_count); ++i)
for (size_t i = 0; i < 60 * 1000 / 100 && 0 != m_wait_count; ++i)
{
misc_utils::sleep_no_w(100);
}
CHECK_AND_ASSERT_MES_NO_RET(0 == boost::interprocess::ipcdetail::atomic_read32(&m_wait_count), "Failed to wait for operation completion. m_wait_count = " << m_wait_count);
CHECK_AND_ASSERT_MES_NO_RET(0 == m_wait_count, "Failed to wait for operation completion. m_wait_count = " << m_wait_count.load());
MTRACE(m_connection_context << "~async_protocol_handler()");
@ -352,13 +351,13 @@ public:
MERROR(m_connection_context << "[levin_protocol] -->> start_outer_call failed");
return false;
}
boost::interprocess::ipcdetail::atomic_inc32(&m_wait_count);
++m_wait_count;
return true;
}
bool finish_outer_call()
{
MTRACE(m_connection_context << "[levin_protocol] <<-- finish_outer_call");
boost::interprocess::ipcdetail::atomic_dec32(&m_wait_count);
--m_wait_count;
m_pservice_endpoint->release();
return true;
}
@ -382,7 +381,7 @@ public:
bool close()
{
boost::interprocess::ipcdetail::atomic_inc32(&m_close_called);
++m_close_called;
m_pservice_endpoint->close();
return true;
@ -408,7 +407,7 @@ public:
virtual bool handle_recv(const void* ptr, size_t cb)
{
if(boost::interprocess::ipcdetail::atomic_read32(&m_close_called))
if(m_close_called)
return false; //closing connections
if(!m_config.m_pcommands_handler)
@ -524,7 +523,7 @@ public:
{
invoke_response_handlers_guard.unlock();
//use sync call scenario
if(!boost::interprocess::ipcdetail::atomic_read32(&m_wait_count) && !boost::interprocess::ipcdetail::atomic_read32(&m_close_called))
if(!m_wait_count && !m_close_called)
{
MERROR(m_connection_context << "no active invoke when response came, wtf?");
return false;
@ -535,7 +534,7 @@ public:
buff_to_invoke = epee::span<const uint8_t>((const uint8_t*)NULL, 0);
m_invoke_result_code = m_current_head.m_return_code;
CRITICAL_REGION_END();
boost::interprocess::ipcdetail::atomic_write32(&m_invoke_buf_ready, 1);
m_invoke_buf_ready = true;
}
}
}else
@ -642,7 +641,7 @@ public:
{
CRITICAL_REGION_LOCAL(m_call_lock);
boost::interprocess::ipcdetail::atomic_write32(&m_invoke_buf_ready, 0);
m_invoke_buf_ready = false;
CRITICAL_REGION_BEGIN(m_invoke_response_handlers_lock);
if (command == m_connection_context.handshake_command())
@ -681,7 +680,7 @@ public:
CRITICAL_REGION_LOCAL(m_call_lock);
boost::interprocess::ipcdetail::atomic_write32(&m_invoke_buf_ready, 0);
m_invoke_buf_ready = false;
if (command == m_connection_context.handshake_command())
m_max_packet_size = m_config.m_max_packet_size;
@ -695,7 +694,7 @@ public:
uint64_t ticks_start = misc_utils::get_tick_count();
size_t prev_size = 0;
while(!boost::interprocess::ipcdetail::atomic_read32(&m_invoke_buf_ready) && !m_protocol_released)
while(!m_invoke_buf_ready && !m_protocol_released)
{
if(m_cache_in_buffer.size() - prev_size >= MIN_BYTES_WANTED)
{