fixed possible cross deadlocks between RsGxsGenExchange and RsGxsNetService

This commit is contained in:
csoler 2016-06-28 20:59:56 -04:00
parent 00bdc509c5
commit 9f7ef8b46b
2 changed files with 369 additions and 345 deletions

View File

@ -2285,6 +2285,9 @@ bool RsGenExchange::checkKeys(const RsTlvSecurityKeySet& keySet)
void RsGenExchange::publishGrps() void RsGenExchange::publishGrps()
{ {
std::list<RsGxsGroupId> groups_to_subscribe ;
{
RS_STACK_MUTEX(mGenMtx) ; RS_STACK_MUTEX(mGenMtx) ;
NxsGrpSignPendVect::iterator vit = mGrpsToPublish.begin(); NxsGrpSignPendVect::iterator vit = mGrpsToPublish.begin();
@ -2415,8 +2418,8 @@ void RsGenExchange::publishGrps()
else else
mDataAccess->addGroupData(grp); mDataAccess->addGroupData(grp);
#warning this is bad: addGroupData/updateGroupData actially deletes grp. But it may be used below? grp should be a class object and not deleted manually! #warning this is bad: addGroupData/updateGroupData actially deletes grp. But it may be used below? grp should be a class object and not deleted manually!
if(mNetService!=NULL)
mNetService->subscribeStatusChanged(grpId,true) ; groups_to_subscribe.push_back(grpId) ;
} }
else else
{ {
@ -2509,7 +2512,13 @@ void RsGenExchange::publishGrps()
std::cerr << " " << *it << std::endl; std::cerr << " " << *it << std::endl;
#endif #endif
} }
}
// This is done off-mutex to avoid possible cross deadlocks with the net service.
if(mNetService!=NULL)
for(std::list<RsGxsGroupId>::const_iterator it(groups_to_subscribe.begin());it!=groups_to_subscribe.end();++it)
mNetService->subscribeStatusChanged((*it),true) ;
} }
@ -2590,6 +2599,9 @@ void RsGenExchange::computeHash(const RsTlvBinaryData& data, RsFileHash& hash)
void RsGenExchange::processRecvdMessages() void RsGenExchange::processRecvdMessages()
{ {
std::list<RsGxsMessageId> messages_to_reject ;
{
RS_STACK_MUTEX(mGenMtx) ; RS_STACK_MUTEX(mGenMtx) ;
#ifdef GEN_EXCH_DEBUG #ifdef GEN_EXCH_DEBUG
@ -2725,7 +2737,7 @@ void RsGenExchange::processRecvdMessages()
#ifdef GEN_EXCH_DEBUG #ifdef GEN_EXCH_DEBUG
std::cerr << "Notifying the network service to not download this message again." << std::endl; std::cerr << "Notifying the network service to not download this message again." << std::endl;
#endif #endif
mNetService->rejectMessage(msg->msgId) ; messages_to_reject.push_back(msg->msgId) ;
} }
} }
else else
@ -2802,6 +2814,13 @@ void RsGenExchange::processRecvdMessages()
} }
mReceivedMsgs.clear(); mReceivedMsgs.clear();
}
// Done off-mutex to avoid cross deadlocks in the netservice that might call the RsGenExchange as an observer..
if(mNetService != NULL)
for(std::list<RsGxsMessageId>::const_iterator it(messages_to_reject.begin());it!=messages_to_reject.end();++it)
mNetService->rejectMessage(*it) ;
} }
void RsGenExchange::processRecvdGroups() void RsGenExchange::processRecvdGroups()

View File

@ -928,6 +928,8 @@ void RsGxsNetService::handleRecvSyncGrpStatistics(RsNxsSyncGrpStatsItem *grs)
#ifdef NXS_NET_DEBUG_6 #ifdef NXS_NET_DEBUG_6
GXSNETDEBUG_PG(grs->PeerId(),grs->grpId) << "Received Grp update stats item from peer " << grs->PeerId() << " for group " << grs->grpId << ", reporting " << grs->number_of_posts << " posts." << std::endl; GXSNETDEBUG_PG(grs->PeerId(),grs->grpId) << "Received Grp update stats item from peer " << grs->PeerId() << " for group " << grs->grpId << ", reporting " << grs->number_of_posts << " posts." << std::endl;
#endif #endif
bool should_notify = false ;
{
RS_STACK_MUTEX(mNxsMutex) ; RS_STACK_MUTEX(mNxsMutex) ;
RsGroupNetworkStatsRecord& rec(mGroupNetworkStats[grs->grpId]) ; RsGroupNetworkStatsRecord& rec(mGroupNetworkStats[grs->grpId]) ;
@ -939,6 +941,9 @@ void RsGxsNetService::handleRecvSyncGrpStatistics(RsNxsSyncGrpStatsItem *grs)
rec.update_TS = time(NULL) ; rec.update_TS = time(NULL) ;
if (old_count != rec.max_visible_count || old_suppliers_count != rec.suppliers.size()) if (old_count != rec.max_visible_count || old_suppliers_count != rec.suppliers.size())
should_notify = true ;
}
if(should_notify)
mObserver->notifyChangedGroupStats(grs->grpId); mObserver->notifyChangedGroupStats(grs->grpId);
} }
else else