removed aliasing in storeMessages/storeGroups, removed several memory leaks in unittests, improved auto-delete structures in rsgxsutil, added deletion for members of RsGenExchange, removed shared pointers from unittests (they cause a memory leak and are only used in unittests)

This commit is contained in:
csoler 2017-07-25 00:07:53 +02:00
parent ef24459c5e
commit 461ccf3b84
21 changed files with 281 additions and 275 deletions

View File

@ -702,20 +702,20 @@ RsNxsMsg* RsDataService::locked_getMessage(RetroCursor &c)
return NULL;
}
int RsDataService::storeMessage(std::map<RsNxsMsg *, RsGxsMsgMetaData *> &msg)
int RsDataService::storeMessage(const std::list<RsNxsMsg*>& msg)
{
RsStackMutex stack(mDbMutex);
std::map<RsNxsMsg*, RsGxsMsgMetaData* >::iterator mit = msg.begin();
// start a transaction
mDb->beginTransaction();
for(; mit != msg.end(); ++mit)
for(std::list<RsNxsMsg*>::const_iterator mit = msg.begin(); mit != msg.end(); ++mit)
{
RsNxsMsg* msgPtr = mit->first;
RsGxsMsgMetaData* msgMetaPtr = mit->second;
RsNxsMsg* msgPtr = *mit;
RsGxsMsgMetaData* msgMetaPtr = msgPtr->metaData;
assert(msgMetaPtr != NULL);
#ifdef RS_DATA_SERVICE_DEBUG
std::cerr << "RsDataService::storeMessage() ";
@ -790,16 +790,6 @@ int RsDataService::storeMessage(std::map<RsNxsMsg *, RsGxsMsgMetaData *> &msg)
// finish transaction
bool ret = mDb->commitTransaction();
for(mit = msg.begin(); mit != msg.end(); ++mit)
{
//TODO: API encourages aliasing, remove this abomination
if(mit->second != mit->first->metaData)
delete mit->second;
delete mit->first;
;
}
return ret;
}
@ -811,104 +801,94 @@ bool RsDataService::validSize(RsNxsMsg* msg) const
}
int RsDataService::storeGroup(std::map<RsNxsGrp *, RsGxsGrpMetaData *> &grp)
int RsDataService::storeGroup(const std::list<RsNxsGrp*>& grp)
{
RsStackMutex stack(mDbMutex);
std::map<RsNxsGrp*, RsGxsGrpMetaData* >::iterator sit = grp.begin();
// begin transaction
mDb->beginTransaction();
for(; sit != grp.end(); ++sit)
{
for(std::list<RsNxsGrp*>::const_iterator sit = grp.begin();sit != grp.end(); ++sit)
{
RsNxsGrp* grpPtr = *sit;
RsGxsGrpMetaData* grpMetaPtr = grpPtr->metaData;
RsNxsGrp* grpPtr = sit->first;
RsGxsGrpMetaData* grpMetaPtr = sit->second;
assert(grpMetaPtr != NULL);
// if data is larger than max item size do not add
if(!validSize(grpPtr)) continue;
// if data is larger than max item size do not add
if(!validSize(grpPtr)) continue;
#ifdef RS_DATA_SERVICE_DEBUG
std::cerr << "RsDataService::storeGroup() GrpId: " << grpPtr->grpId.toStdString();
std::cerr << " CircleType: " << (uint32_t) grpMetaPtr->mCircleType;
std::cerr << " CircleId: " << grpMetaPtr->mCircleId.toStdString();
std::cerr << std::endl;
std::cerr << "RsDataService::storeGroup() GrpId: " << grpPtr->grpId.toStdString();
std::cerr << " CircleType: " << (uint32_t) grpMetaPtr->mCircleType;
std::cerr << " CircleId: " << grpMetaPtr->mCircleId.toStdString();
std::cerr << std::endl;
#endif
/*!
* STORE data, data len,
* grpId, flags, publish time stamp, identity,
* id signature, admin signatue, key set, last posting ts
* and meta data
**/
ContentValue cv;
/*!
* STORE data, data len,
* grpId, flags, publish time stamp, identity,
* id signature, admin signatue, key set, last posting ts
* and meta data
**/
ContentValue cv;
uint32_t dataLen = grpPtr->grp.TlvSize();
char grpData[dataLen];
uint32_t offset = 0;
grpPtr->grp.SetTlv(grpData, dataLen, &offset);
cv.put(KEY_NXS_DATA, dataLen, grpData);
uint32_t dataLen = grpPtr->grp.TlvSize();
char grpData[dataLen];
uint32_t offset = 0;
grpPtr->grp.SetTlv(grpData, dataLen, &offset);
cv.put(KEY_NXS_DATA, dataLen, grpData);
cv.put(KEY_NXS_DATA_LEN, (int32_t) dataLen);
cv.put(KEY_GRP_ID, grpPtr->grpId.toStdString());
cv.put(KEY_GRP_NAME, grpMetaPtr->mGroupName);
cv.put(KEY_ORIG_GRP_ID, grpMetaPtr->mOrigGrpId.toStdString());
cv.put(KEY_NXS_SERV_STRING, grpMetaPtr->mServiceString);
cv.put(KEY_NXS_FLAGS, (int32_t)grpMetaPtr->mGroupFlags);
cv.put(KEY_TIME_STAMP, (int32_t)grpMetaPtr->mPublishTs);
cv.put(KEY_GRP_SIGN_FLAGS, (int32_t)grpMetaPtr->mSignFlags);
cv.put(KEY_GRP_CIRCLE_ID, grpMetaPtr->mCircleId.toStdString());
cv.put(KEY_GRP_CIRCLE_TYPE, (int32_t)grpMetaPtr->mCircleType);
cv.put(KEY_GRP_INTERNAL_CIRCLE, grpMetaPtr->mInternalCircle.toStdString());
cv.put(KEY_GRP_ORIGINATOR, grpMetaPtr->mOriginator.toStdString());
cv.put(KEY_GRP_AUTHEN_FLAGS, (int32_t)grpMetaPtr->mAuthenFlags);
cv.put(KEY_PARENT_GRP_ID, grpMetaPtr->mParentGrpId.toStdString());
cv.put(KEY_NXS_HASH, grpMetaPtr->mHash.toStdString());
cv.put(KEY_RECV_TS, (int32_t)grpMetaPtr->mRecvTS);
cv.put(KEY_GRP_REP_CUTOFF, (int32_t)grpMetaPtr->mReputationCutOff);
cv.put(KEY_NXS_IDENTITY, grpMetaPtr->mAuthorId.toStdString());
cv.put(KEY_NXS_DATA_LEN, (int32_t) dataLen);
cv.put(KEY_GRP_ID, grpPtr->grpId.toStdString());
cv.put(KEY_GRP_NAME, grpMetaPtr->mGroupName);
cv.put(KEY_ORIG_GRP_ID, grpMetaPtr->mOrigGrpId.toStdString());
cv.put(KEY_NXS_SERV_STRING, grpMetaPtr->mServiceString);
cv.put(KEY_NXS_FLAGS, (int32_t)grpMetaPtr->mGroupFlags);
cv.put(KEY_TIME_STAMP, (int32_t)grpMetaPtr->mPublishTs);
cv.put(KEY_GRP_SIGN_FLAGS, (int32_t)grpMetaPtr->mSignFlags);
cv.put(KEY_GRP_CIRCLE_ID, grpMetaPtr->mCircleId.toStdString());
cv.put(KEY_GRP_CIRCLE_TYPE, (int32_t)grpMetaPtr->mCircleType);
cv.put(KEY_GRP_INTERNAL_CIRCLE, grpMetaPtr->mInternalCircle.toStdString());
cv.put(KEY_GRP_ORIGINATOR, grpMetaPtr->mOriginator.toStdString());
cv.put(KEY_GRP_AUTHEN_FLAGS, (int32_t)grpMetaPtr->mAuthenFlags);
cv.put(KEY_PARENT_GRP_ID, grpMetaPtr->mParentGrpId.toStdString());
cv.put(KEY_NXS_HASH, grpMetaPtr->mHash.toStdString());
cv.put(KEY_RECV_TS, (int32_t)grpMetaPtr->mRecvTS);
cv.put(KEY_GRP_REP_CUTOFF, (int32_t)grpMetaPtr->mReputationCutOff);
cv.put(KEY_NXS_IDENTITY, grpMetaPtr->mAuthorId.toStdString());
offset = 0;
char keySetData[grpMetaPtr->keys.TlvSize()];
grpMetaPtr->keys.SetTlv(keySetData, grpMetaPtr->keys.TlvSize(), &offset);
cv.put(KEY_KEY_SET, grpMetaPtr->keys.TlvSize(), keySetData);
offset = 0;
char keySetData[grpMetaPtr->keys.TlvSize()];
grpMetaPtr->keys.SetTlv(keySetData, grpMetaPtr->keys.TlvSize(), &offset);
cv.put(KEY_KEY_SET, grpMetaPtr->keys.TlvSize(), keySetData);
offset = 0;
char metaData[grpPtr->meta.TlvSize()];
grpPtr->meta.SetTlv(metaData, grpPtr->meta.TlvSize(), &offset);
cv.put(KEY_NXS_META, grpPtr->meta.TlvSize(), metaData);
offset = 0;
char metaData[grpPtr->meta.TlvSize()];
grpPtr->meta.SetTlv(metaData, grpPtr->meta.TlvSize(), &offset);
cv.put(KEY_NXS_META, grpPtr->meta.TlvSize(), metaData);
// local meta data
cv.put(KEY_GRP_SUBCR_FLAG, (int32_t)grpMetaPtr->mSubscribeFlags);
cv.put(KEY_GRP_POP, (int32_t)grpMetaPtr->mPop);
cv.put(KEY_MSG_COUNT, (int32_t)grpMetaPtr->mVisibleMsgCount);
cv.put(KEY_GRP_STATUS, (int32_t)grpMetaPtr->mGroupStatus);
cv.put(KEY_GRP_LAST_POST, (int32_t)grpMetaPtr->mLastPost);
// local meta data
cv.put(KEY_GRP_SUBCR_FLAG, (int32_t)grpMetaPtr->mSubscribeFlags);
cv.put(KEY_GRP_POP, (int32_t)grpMetaPtr->mPop);
cv.put(KEY_MSG_COUNT, (int32_t)grpMetaPtr->mVisibleMsgCount);
cv.put(KEY_GRP_STATUS, (int32_t)grpMetaPtr->mGroupStatus);
cv.put(KEY_GRP_LAST_POST, (int32_t)grpMetaPtr->mLastPost);
locked_clearGrpMetaCache(grpMetaPtr->mGroupId);
locked_clearGrpMetaCache(grpMetaPtr->mGroupId);
if (!mDb->sqlInsert(GRP_TABLE_NAME, "", cv))
{
std::cerr << "RsDataService::storeGroup() sqlInsert Failed";
std::cerr << std::endl;
std::cerr << "\t For GroupId: " << grpMetaPtr->mGroupId.toStdString();
std::cerr << std::endl;
}
}
if (!mDb->sqlInsert(GRP_TABLE_NAME, "", cv))
{
std::cerr << "RsDataService::storeGroup() sqlInsert Failed";
std::cerr << std::endl;
std::cerr << "\t For GroupId: " << grpMetaPtr->mGroupId.toStdString();
std::cerr << std::endl;
}
}
// finish transaction
bool ret = mDb->commitTransaction();
for(sit = grp.begin(); sit != grp.end(); ++sit)
{
//TODO: API encourages aliasing, remove this abomination
if(sit->second != sit->first->metaData)
delete sit->second;
delete sit->first;
}
return ret;
}
@ -918,21 +898,21 @@ void RsDataService::locked_clearGrpMetaCache(const RsGxsGroupId& gid)
mGrpMetaDataCache_ContainsAllDatabase = false;
}
int RsDataService::updateGroup(std::map<RsNxsGrp *, RsGxsGrpMetaData *> &grp)
int RsDataService::updateGroup(const std::list<RsNxsGrp *> &grp)
{
RsStackMutex stack(mDbMutex);
std::map<RsNxsGrp*, RsGxsGrpMetaData* >::iterator sit = grp.begin();
// begin transaction
mDb->beginTransaction();
for(; sit != grp.end(); ++sit)
for( std::list<RsNxsGrp*>::const_iterator sit = grp.begin(); sit != grp.end(); ++sit)
{
RsNxsGrp* grpPtr = sit->first;
RsGxsGrpMetaData* grpMetaPtr = sit->second;
RsNxsGrp* grpPtr = *sit;
RsGxsGrpMetaData* grpMetaPtr = grpPtr->metaData;
assert(grpMetaPtr != NULL);
// if data is larger than max item size do not add
if(!validSize(grpPtr)) continue;
@ -991,15 +971,6 @@ int RsDataService::updateGroup(std::map<RsNxsGrp *, RsGxsGrpMetaData *> &grp)
// finish transaction
bool ret = mDb->commitTransaction();
for(sit = grp.begin(); sit != grp.end(); ++sit)
{
//TODO: API encourages aliasing, remove this abomination
if(sit->second != sit->first->metaData)
delete sit->second;
delete sit->first;
}
return ret;
}

View File

@ -127,21 +127,21 @@ public:
* @param msg map of message and decoded meta data information
* @return error code
*/
int storeMessage(std::map<RsNxsMsg*, RsGxsMsgMetaData*>& msg);
int storeMessage(const std::list<RsNxsMsg*>& msg);
/*!
* Stores a list of groups in data store
* @param grp map of group and decoded meta data
* @return error code
*/
int storeGroup(std::map<RsNxsGrp*, RsGxsGrpMetaData*>& grp);
int storeGroup(const std::list<RsNxsGrp*>& grp);
/*!
* Updates group entries in Db
* @param grp map of group and decoded meta data
* @return error code
*/
int updateGroup(std::map<RsNxsGrp*, RsGxsGrpMetaData*>& grsp);
int updateGroup(const std::list<RsNxsGrp*>& grsp);
/*!
* @param metaData The meta data item to update

View File

@ -223,14 +223,14 @@ public:
* @param msg map of message and decoded meta data information
* @return error code
*/
virtual int storeMessage(std::map<RsNxsMsg*, RsGxsMsgMetaData*>& msgs) = 0;
virtual int storeMessage(const std::list<RsNxsMsg*>& msgs) = 0;
/*!
* Stores a list of groups in data store
* @param grp map of group and decoded meta data
* @return error code
*/
virtual int storeGroup(std::map<RsNxsGrp*, RsGxsGrpMetaData*>& grsp) = 0;
virtual int storeGroup(const std::list<RsNxsGrp*>& grsp) = 0;
/*!
@ -238,7 +238,7 @@ public:
* @param grp map of group and decoded meta data
* @return error code
*/
virtual int updateGroup(std::map<RsNxsGrp*, RsGxsGrpMetaData*>& grsp) = 0;
virtual int updateGroup(const std::list<RsNxsGrp*>& grsp) = 0;
/*!
* @param metaData

View File

@ -117,6 +117,14 @@ RsGenExchange::~RsGenExchange()
delete mDataStore;
mDataStore = NULL;
for(uint32_t i=0;i<mNotifications.size();++i)
delete mNotifications[i] ;
for(uint32_t i=0;i<mGrpsToPublish.size();++i)
delete mGrpsToPublish[i].mItem ;
mNotifications.clear();
mGrpsToPublish.clear();
}
bool RsGenExchange::getGroupServerUpdateTS(const RsGxsGroupId& gid, time_t& grp_server_update_TS, time_t& msg_server_update_TS)
@ -2264,6 +2272,8 @@ void RsGenExchange::publishMsgs()
computeHash(msg->msg, msg->metaData->mHash);
mDataAccess->addMsgData(msg);
delete msg ;
msgChangeMap[grpId].push_back(msgId);
delete[] metaDataBuff;
@ -2664,9 +2674,9 @@ void RsGenExchange::publishGrps()
mDataAccess->updateGroupData(grp);
else
mDataAccess->addGroupData(grp);
#warning csoler: this is bad: addGroupData/updateGroupData actially deletes grp. But it may be used below? grp should be a class object and not deleted manually!
groups_to_subscribe.push_back(grpId) ;
delete grp ;
groups_to_subscribe.push_back(grpId) ;
}
else
{
@ -2885,9 +2895,8 @@ void RsGenExchange::processRecvdMessages()
std::vector<RsNxsMsg*>::iterator vit = mReceivedMsgs.begin();
GxsMsgReq msgIds;
std::map<RsNxsMsg*, RsGxsMsgMetaData*> msgs;
std::map<RsGxsGroupId, RsGxsGrpMetaData*> grpMetas;
RsNxsMsgDataTemporaryList msgs;
RsGxsGrpMetaTemporaryMap grpMetas;
// coalesce group meta retrieval for performance
for(; vit != mReceivedMsgs.end(); ++vit)
@ -2961,7 +2970,7 @@ void RsGenExchange::processRecvdMessages()
if(validateReturn == VALIDATE_SUCCESS)
{
meta->mMsgStatus = GXS_SERV::GXS_MSG_STATUS_UNPROCESSED | GXS_SERV::GXS_MSG_STATUS_GUI_NEW | GXS_SERV::GXS_MSG_STATUS_GUI_UNREAD;
msgs.insert(std::make_pair(msg, meta));
msgs.push_back(msg);
std::vector<RsGxsMessageId> &msgv = msgIds[msg->grpId];
if (std::find(msgv.begin(), msgv.end(), msg->msgId) == msgv.end())
@ -3038,15 +3047,9 @@ void RsGenExchange::processRecvdMessages()
if(vit == mMsgPendingValidate.end())
mMsgPendingValidate.push_back(GxsPendingItem<RsNxsMsg*, RsGxsGrpMsgIdPair>(msg, id,time(NULL)));
// else
// delete msg ;
}
}
// clean up resources from group meta retrieval
freeAndClearContainerResource<std::map<RsGxsGroupId, RsGxsGrpMetaData*>,
RsGxsGrpMetaData*>(grpMetas);
if(!msgIds.empty())
{
#ifdef GEN_EXCH_DEBUG
@ -3091,7 +3094,7 @@ void RsGenExchange::processRecvdGroups()
std::vector<RsGxsGroupId> existingGrpIds;
std::list<RsGxsGroupId> grpIds;
std::map<RsNxsGrp*, RsGxsGrpMetaData*> grps;
RsNxsGrpDataTemporaryList grps;
mDataStore->retrieveGroupIds(existingGrpIds);
@ -3145,7 +3148,7 @@ void RsGenExchange::processRecvdGroups()
meta->mSubscribeFlags = GXS_SERV::GROUP_SUBSCRIBE_NOT_SUBSCRIBED;
grps.insert(std::make_pair(grp, meta));
grps.push_back(grp);
grpIds.push_back(grp->grpId);
}
else
@ -3250,7 +3253,9 @@ void RsGenExchange::performUpdateValidation()
#endif
vit = mGroupUpdates.begin();
std::map<RsNxsGrp*, RsGxsGrpMetaData*> grps;
RsNxsGrpDataTemporaryList grps ;
for(; vit != mGroupUpdates.end(); ++vit)
{
GroupUpdate& gu = *vit;
@ -3265,7 +3270,7 @@ void RsGenExchange::performUpdateValidation()
gu.newGrp->metaData->mSubscribeFlags = gu.oldGrpMeta->mSubscribeFlags ;
grps.insert(std::make_pair(gu.newGrp, gu.newGrp->metaData));
grps.push_back(gu.newGrp);
}
else
{
@ -3353,14 +3358,14 @@ void RsGenExchange::setGroupReputationCutOff(uint32_t& token, const RsGxsGroupId
mGrpLocMetaMap.insert(std::make_pair(token, g));
}
void RsGenExchange::removeDeleteExistingMessages( RsGeneralDataService::MsgStoreMap& msgs, GxsMsgReq& msgIdsNotify)
void RsGenExchange::removeDeleteExistingMessages( std::list<RsNxsMsg*>& msgs, GxsMsgReq& msgIdsNotify)
{
// first get grp ids of messages to be stored
RsGxsGroupId::std_set mGrpIdsUnique;
for(RsGeneralDataService::MsgStoreMap::const_iterator cit = msgs.begin(); cit != msgs.end(); ++cit)
mGrpIdsUnique.insert(cit->second->mGroupId);
for(std::list<RsNxsMsg*>::const_iterator cit = msgs.begin(); cit != msgs.end(); ++cit)
mGrpIdsUnique.insert((*cit)->metaData->mGroupId);
//RsGxsGroupId::std_list grpIds(mGrpIdsUnique.begin(), mGrpIdsUnique.end());
//RsGxsGroupId::std_list::const_iterator it = grpIds.begin();
@ -3381,13 +3386,10 @@ void RsGenExchange::removeDeleteExistingMessages( RsGeneralDataService::MsgStore
#endif
}
//RsGeneralDataService::MsgStoreMap::iterator cit2 = msgs.begin();
RsGeneralDataService::MsgStoreMap filtered;
// now for each msg to be stored that exist in the retrieved msg/grp "index" delete and erase from map
for(RsGeneralDataService::MsgStoreMap::iterator cit2 = msgs.begin(); cit2 != msgs.end(); ++cit2)
for(std::list<RsNxsMsg*>::iterator cit2 = msgs.begin(); cit2 != msgs.end(); ++cit2)
{
const RsGxsMessageId::std_vector& msgIds = msgIdReq[cit2->second->mGroupId];
const RsGxsMessageId::std_vector& msgIds = msgIdReq[(*cit2)->metaData->mGroupId];
#ifdef GEN_EXCH_DEBUG
std::cerr << " grpid=" << cit2->second->mGroupId << ", msgid=" << cit2->second->mMsgId ;
@ -3395,38 +3397,27 @@ void RsGenExchange::removeDeleteExistingMessages( RsGeneralDataService::MsgStore
// Avoid storing messages that are already in the database, as well as messages that are too old (or generally do not pass the database storage test)
//
if(std::find(msgIds.begin(), msgIds.end(), cit2->second->mMsgId) == msgIds.end() && messagePublicationTest(*cit2->second))
{
// passes tests, so add to filtered list
//
filtered.insert(*cit2);
#ifdef GEN_EXCH_DEBUG
std::cerr << " keeping " << cit2->second->mMsgId << std::endl;
#endif
}
else // remove message from list
if(std::find(msgIds.begin(), msgIds.end(), (*cit2)->metaData->mMsgId) != msgIds.end() || !messagePublicationTest( *(*cit2)->metaData))
{
// msg exist in retrieved index
RsGxsMessageId::std_vector& notifyIds = msgIdsNotify[cit2->second->mGroupId];
RsGxsMessageId::std_vector::iterator it2 = std::find(notifyIds.begin(),
notifyIds.end(), cit2->second->mMsgId);
RsGxsMessageId::std_vector& notifyIds = msgIdsNotify[ (*cit2)->metaData->mGroupId];
RsGxsMessageId::std_vector::iterator it2 = std::find(notifyIds.begin(), notifyIds.end(), (*cit2)->metaData->mMsgId);
if(it2 != notifyIds.end())
{
notifyIds.erase(it2);
if (notifyIds.empty())
{
msgIdsNotify.erase(cit2->second->mGroupId);
msgIdsNotify.erase( (*cit2)->metaData->mGroupId);
}
}
#ifdef GEN_EXCH_DEBUG
std::cerr << " discarding " << cit2->second->mMsgId << std::endl;
#endif
delete cit2->first;
delete *cit2;
msgs.erase(cit2);
// cit2->second will be deleted too in the destructor of cit2->first (RsNxsMsg)
}
}
msgs = filtered;
}

View File

@ -850,7 +850,7 @@ private:
* @param msgs messages to be filtered
* @param msgIdsNotify message notification map to be filtered
*/
void removeDeleteExistingMessages(RsGeneralDataService::MsgStoreMap& msgs, GxsMsgReq& msgIdsNotify);
void removeDeleteExistingMessages(std::list<RsNxsMsg*>& msgs, GxsMsgReq& msgIdsNotify);
RsMutex mGenMtx;
RsGxsDataAccess* mDataAccess;

View File

@ -45,6 +45,11 @@ RsGxsDataAccess::RsGxsDataAccess(RsGeneralDataService* ds) :
mDataStore(ds), mDataMutex("RsGxsDataAccess"), mNextToken(0) {}
RsGxsDataAccess::~RsGxsDataAccess()
{
for(std::map<uint32_t, GxsRequest*>::const_iterator it(mRequests.begin());it!=mRequests.end();++it)
delete it->second ;
}
bool RsGxsDataAccess::requestGroupInfo(uint32_t &token, uint32_t ansType, const RsTokReqOptions &opts,
const std::list<RsGxsGroupId> &groupIds)
{
@ -1803,8 +1808,8 @@ bool RsGxsDataAccess::addGroupData(RsNxsGrp* grp) {
RsStackMutex stack(mDataMutex);
std::map<RsNxsGrp*, RsGxsGrpMetaData*> grpM;
grpM.insert(std::make_pair(grp, grp->metaData));
std::list<RsNxsGrp*> grpM;
grpM.push_back(grp);
return mDataStore->storeGroup(grpM);
}
@ -1812,8 +1817,8 @@ bool RsGxsDataAccess::updateGroupData(RsNxsGrp* grp) {
RsStackMutex stack(mDataMutex);
std::map<RsNxsGrp*, RsGxsGrpMetaData*> grpM;
grpM.insert(std::make_pair(grp, grp->metaData));
std::list<RsNxsGrp*> grpM;
grpM.push_back(grp);
return mDataStore->updateGroup(grpM);
}
@ -1821,8 +1826,8 @@ bool RsGxsDataAccess::addMsgData(RsNxsMsg* msg) {
RsStackMutex stack(mDataMutex);
std::map<RsNxsMsg*, RsGxsMsgMetaData*> msgM;
msgM.insert(std::make_pair(msg, msg->metaData));
std::list<RsNxsMsg*> msgM;
msgM.push_back(msg);
return mDataStore->storeMessage(msgM);
}

View File

@ -38,7 +38,7 @@ class RsGxsDataAccess : public RsTokenService
{
public:
RsGxsDataAccess(RsGeneralDataService* ds);
virtual ~RsGxsDataAccess() { return ;}
virtual ~RsGxsDataAccess() ;
public:

View File

@ -71,13 +71,10 @@ class RsGroupNetworkStatsRecord
* Incoming transaction are in 3 different states
* 1. START 2. RECEIVING 3. END
*/
class RsGxsNetService : public RsNetworkExchangeService, public p3ThreadedService,
public p3Config
class RsGxsNetService : public RsNetworkExchangeService, public p3ThreadedService, public p3Config
{
public:
typedef RsSharedPtr<RsGxsNetService> pointer;
static const uint32_t FRAGMENT_SIZE;
/*!
* only one observer is allowed

View File

@ -33,26 +33,19 @@
class RsGixs ;
class RsGenExchange ;
/*!
* Handy function for cleaning out meta result containers
* @param container
*/
template <class Container, class Item>
void freeAndClearContainerResource(Container container)
{
typename Container::iterator meta_it = container.begin();
for(; meta_it != container.end(); ++meta_it)
if(meta_it->second != NULL)
delete meta_it->second;
container.clear();
}
// temporary holds a map of pointers to class T, and destroys all pointers on delete.
class non_copiable
{
public:
non_copiable() {}
private:
non_copiable& operator=(const non_copiable&) { return *this ;}
non_copiable(const non_copiable&) {}
};
template<class IdClass,class IdData>
class t_RsGxsGenericDataTemporaryMap: public std::map<IdClass,IdData *>
class t_RsGxsGenericDataTemporaryMap: public std::map<IdClass,IdData *>, public non_copiable
{
public:
virtual ~t_RsGxsGenericDataTemporaryMap()
@ -71,7 +64,7 @@ public:
};
template<class T>
class t_RsGxsGenericDataTemporaryMapVector: public std::map<RsGxsGroupId,std::vector<T*> >
class t_RsGxsGenericDataTemporaryMapVector: public std::map<RsGxsGroupId,std::vector<T*> >, public non_copiable
{
public:
virtual ~t_RsGxsGenericDataTemporaryMapVector()
@ -93,12 +86,33 @@ public:
}
};
template<class T>
class t_RsGxsGenericDataTemporaryList: public std::list<T*>, public non_copiable
{
public:
virtual ~t_RsGxsGenericDataTemporaryList()
{
clear() ;
}
virtual void clear()
{
for(typename t_RsGxsGenericDataTemporaryList<T>::iterator it = this->begin();it!=this->end();++it)
delete *it;
std::list<T*>::clear() ;
}
};
typedef t_RsGxsGenericDataTemporaryMap<RsGxsGroupId,RsGxsGrpMetaData> RsGxsGrpMetaTemporaryMap;
typedef t_RsGxsGenericDataTemporaryMap<RsGxsGroupId,RsNxsGrp> RsNxsGrpDataTemporaryMap;
typedef t_RsGxsGenericDataTemporaryMapVector<RsGxsMsgMetaData> RsGxsMsgMetaTemporaryMap ;
typedef t_RsGxsGenericDataTemporaryMapVector<RsNxsMsg> RsNxsMsgDataTemporaryMap ;
typedef t_RsGxsGenericDataTemporaryList<RsNxsGrp> RsNxsGrpDataTemporaryList ;
typedef t_RsGxsGenericDataTemporaryList<RsNxsMsg> RsNxsMsgDataTemporaryList ;
#ifdef UNUSED
template<class T>
class RsGxsMetaDataTemporaryMapVector: public std::vector<T*>

View File

@ -12,6 +12,7 @@
/*!
* Not thread safe!!
* And also has a memory leak. Do not use (csoler, 24 Jul 2017).
*/
template<class T>
class RsSharedPtr

View File

@ -36,49 +36,38 @@ void test_groupStoreAndRetrieve(){
setUp();
int nGrp = rand()%32;
std::map<RsNxsGrp*, RsGxsGrpMetaData*> grps, grps_copy;
RsNxsGrpDataTemporaryList grps, grps_copy;
RsNxsGrp* grp;
RsGxsGrpMetaData* grpMeta;
for(int i = 0; i < nGrp; i++)
{
std::pair<RsNxsGrp*, RsGxsGrpMetaData*> p;
grp = new RsNxsGrp(RS_SERVICE_TYPE_PLUGIN_SIMPLE_FORUM);
grpMeta = new RsGxsGrpMetaData();
p.first = grp;
p.second = grpMeta;
init_item(*grp);
init_item(grpMeta);
grpMeta->mGroupId = grp->grpId;
grps.insert(p);
RsNxsGrp* grp_copy = new RsNxsGrp(RS_SERVICE_TYPE_PLUGIN_SIMPLE_FORUM);
*grp_copy = *grp;
RsGxsGrpMetaData* grpMeta_copy = new RsGxsGrpMetaData();
*grpMeta_copy = *grpMeta;
grps_copy.insert(std::make_pair(grp_copy, grpMeta_copy ));
grpMeta = NULL;
grp = NULL;
}
grp = new RsNxsGrp(RS_SERVICE_TYPE_PLUGIN_SIMPLE_FORUM);
grpMeta = new RsGxsGrpMetaData();
init_item(*grp);
init_item(grpMeta);
grpMeta->mGroupId = grp->grpId;
grp->metaData = grpMeta ;
grps.push_back(grp);
}
dStore->storeGroup(grps);
//use copy, a grps are deleted in store
grps.clear();
grps = grps_copy;
RsNxsGrpDataTemporaryMap gR;
RsGxsGrpMetaTemporaryMap grpMetaR;
dStore->retrieveNxsGrps(gR, false, false);
dStore->retrieveGxsGrpMetaData(grpMetaR);
std::map<RsNxsGrp*, RsGxsGrpMetaData*>::iterator mit = grps.begin();
bool grpMatch = true, grpMetaMatch = true;
for(; mit != grps.end(); mit++)
for( std::list<RsNxsGrp*>::iterator mit = grps.begin(); mit != grps.end(); mit++)
{
const RsGxsGroupId grpId = mit->first->grpId;
const RsGxsGroupId grpId = (*mit)->metaData->mGroupId;
// check if it exists
if(gR.find(grpId) == gR.end()) {
@ -86,8 +75,8 @@ void test_groupStoreAndRetrieve(){
break;
}
RsNxsGrp *l = mit->first,
*r = gR[grpId];
RsNxsGrp *l = (*mit);
RsNxsGrp *r = gR[grpId];
// assign transaction number
// to right to as tn is not stored
@ -108,7 +97,7 @@ void test_groupStoreAndRetrieve(){
break;
}
RsGxsGrpMetaData *l_Meta = mit->second,
RsGxsGrpMetaData *l_Meta = (*mit)->metaData,
*r_Meta = grpMetaR[grpId];
// assign signSet and mGrpSize
@ -148,15 +137,18 @@ void test_messageStoresAndRetrieve()
grpV.push_back(grpId0);
grpV.push_back(grpId1);
std::map<RsNxsMsg*, RsGxsMsgMetaData*> msgs;
std::map<RsNxsMsg*, RsGxsMsgMetaData*> msgs_copy;
RsNxsMsgDataTemporaryList msgs;
RsNxsMsg* msg = NULL;
RsGxsMsgMetaData* msgMeta = NULL;
int nMsgs = rand()%120;
GxsMsgReq req;
t_RsGxsGenericDataTemporaryMap<RsGxsMessageId,RsNxsMsg> VergrpId0, VergrpId1;
t_RsGxsGenericDataTemporaryMap<RsGxsMessageId, RsGxsMsgMetaData> VerMetagrpId0, VerMetagrpId1;
// These ones are not in auto-delete structures because the data is deleted as part of the RsNxsMsg struct in the msgs list.
std::map<RsGxsMessageId,RsNxsMsg*> VergrpId0 ;
std::map<RsGxsMessageId,RsNxsMsg*> VergrpId1 ;
std::map<RsGxsMessageId, RsGxsMsgMetaData*> VerMetagrpId0;
std::map<RsGxsMessageId, RsGxsMsgMetaData*> VerMetagrpId1;
for(int i=0; i<nMsgs; i++)
{
@ -164,6 +156,9 @@ void test_messageStoresAndRetrieve()
msgMeta = new RsGxsMsgMetaData();
init_item(*msg);
init_item(msgMeta);
msg->metaData = msgMeta ;
std::pair<RsNxsMsg*, RsGxsMsgMetaData*> p(msg, msgMeta);
int chosen = 0;
if(rand()%50 > 24){
@ -179,15 +174,9 @@ void test_messageStoresAndRetrieve()
msgMeta->mMsgId = msg->msgId;
msgMeta->mGroupId = msg->grpId = grpId;
RsNxsMsg* msg_copy = new RsNxsMsg(RS_SERVICE_TYPE_PLUGIN_SIMPLE_FORUM);
RsGxsMsgMetaData* msgMeta_copy = new RsGxsMsgMetaData();
*msg_copy = *msg;
*msgMeta_copy = *msgMeta;
// store msgs in map to use for verification
std::pair<RsGxsMessageId, RsNxsMsg*> vP(msg->msgId, msg_copy);
std::pair<RsGxsMessageId, RsGxsMsgMetaData*> vPmeta(msg->msgId, msgMeta_copy);
std::pair<RsGxsMessageId, RsNxsMsg*> vP(msg->msgId, msg);
std::pair<RsGxsMessageId, RsGxsMsgMetaData*> vPmeta(msg->msgId, msgMeta);
if(!chosen)
{
@ -201,19 +190,12 @@ void test_messageStoresAndRetrieve()
}
msg = NULL;
msgMeta = NULL;
msgs.insert(p);
msgs_copy.insert(std::make_pair(msg_copy, msgMeta_copy));
msgs.push_back(msg);
}
req[grpV[0]] = std::vector<RsGxsMessageId>(); // assign empty list for other
dStore->storeMessage(msgs);
msgs.clear();
msgs = msgs_copy;
// now retrieve msgs for comparison
// first selective retrieval

View File

@ -1,12 +1,15 @@
#include "genexchangetestservice.h"
GenExchangeTestService::GenExchangeTestService(RsGeneralDataService *dataServ, RsNetworkExchangeService * netService,
RsGixs* gixs)
: RsGenExchange(dataServ, netService, new RsDummySerialiser(), RS_SERVICE_TYPE_DUMMY, gixs, 0)
GenExchangeTestService::GenExchangeTestService(RsGeneralDataService *dataServ, RsNetworkExchangeService * netService, RsGixs* gixs)
: RsGenExchange(dataServ, netService, mSerializer = new RsDummySerialiser(), RS_SERVICE_TYPE_DUMMY, gixs, 0)
{
}
GenExchangeTestService::~GenExchangeTestService()
{
delete mSerializer ;
}
RsServiceInfo GenExchangeTestService::getServiceInfo()
{
RsServiceInfo info;

View File

@ -11,6 +11,7 @@ class GenExchangeTestService : public RsGenExchange
{
public:
GenExchangeTestService(RsGeneralDataService* dataServ, RsNetworkExchangeService* nxs, RsGixs* gixs);
virtual ~GenExchangeTestService();
void notifyChanges(std::vector<RsGxsNotify*>& changes);
@ -104,6 +105,7 @@ public:
void service_tick();
RsSerialType *mSerializer ;
};
#endif // GENEXCHANGETESTSERVICE_H

View File

@ -39,6 +39,8 @@ TEST(libretroshare_gxs, DISABLED_RsGenExchange)
//GxsPublishMsgTest testMsgPublishing(&testService, dataStore);
//testMsgPublishing.runTests();
//delete dataStore ; // deleted as a member of RsGenExchange
}
TEST(libretroshare_gxs, GetStats)
@ -54,4 +56,6 @@ TEST(libretroshare_gxs, GetStats)
//GxsPublishMsgTest testMsgPublishing(&testService, dataStore);
//testMsgPublishing.runTests();
//delete dataStore ; // deleted as a member of RsGenExchange
}

View File

@ -82,8 +82,8 @@ NxsGrpSync::NxsGrpSync(RsGcxs* circle, RsGixsReputation* reputation):
RsGxsGroupId grpId = grp->grpId;
RsGeneralDataService::GrpStoreMap gsp;
gsp.insert(std::make_pair(grp, meta));
RsNxsGrpDataTemporaryList gsp;
gsp.push_back(grp);
mit->second->storeGroup(gsp);
// the expected result is that each peer has the group of the others

View File

@ -17,6 +17,20 @@
using namespace rs_nxs_test;
rs_nxs_test::NxsMsgSync::~NxsMsgSync()
{
for(std::map<RsPeerId,RsNxsNetMgr*>::const_iterator it(mNxsNetMgrs.begin());it!=mNxsNetMgrs.end();++it)
delete it->second ;
for(DataMap::const_iterator it(mDataServices.begin());it!=mDataServices.end();++it)
delete it->second ;
delete mRep ;
delete mCircles;
delete mPgpUtils;
}
rs_nxs_test::NxsMsgSync::NxsMsgSync()
: mPgpUtils(NULL), mServType(0) {
int numPeers = 2;
@ -79,8 +93,8 @@ rs_nxs_test::NxsMsgSync::NxsMsgSync()
// first store grp
RsGeneralDataService* ds = mit->second;
RsNxsGrp* grp_clone = grp->clone();
RsGeneralDataService::GrpStoreMap gsp;
gsp.insert(std::make_pair(grp_clone, grp_clone->metaData));
RsNxsGrpDataTemporaryList gsp;
gsp.push_back(grp_clone);
ds->storeGroup(gsp);
RsGxsGroupId grpId = grp->grpId;
@ -95,10 +109,12 @@ rs_nxs_test::NxsMsgSync::NxsMsgSync()
msg->grpId = grp->grpId;
RsGxsMsgMetaData* msgMeta = new RsGxsMsgMetaData();
init_item(msgMeta);
msg->metaData = msgMeta;
msgMeta->mGroupId = grp->grpId;
msgMeta->mMsgId = msg->msgId;
RsGeneralDataService::MsgStoreMap msm;
msm.insert(std::make_pair(msg , msgMeta));
RsNxsMsgDataTemporaryList msm;
msm.push_back(msg);
RsGxsMessageId msgId = msg->msgId;
ds->storeMessage(msm);

View File

@ -17,6 +17,7 @@ namespace rs_nxs_test {
public:
NxsMsgSync();
virtual ~NxsMsgSync();
void getPeers(std::list<RsPeerId>& peerIds);
RsGeneralDataService* getDataService(const RsPeerId& peerId);
RsNxsNetMgr* getDummyNetManager(const RsPeerId& peerId);

View File

@ -60,7 +60,7 @@ private:
};
rs_nxs_test::NxsTestHub::NxsTestHub(NxsTestScenario::pointer testScenario)
rs_nxs_test::NxsTestHub::NxsTestHub(NxsTestScenario *testScenario)
: mTestScenario(testScenario), mMtx("NxsTestHub Mutex")
{
std::list<RsPeerId> peers;
@ -73,31 +73,43 @@ rs_nxs_test::NxsTestHub::NxsTestHub(NxsTestScenario::pointer testScenario)
for(; cit != peers.end(); cit++)
{
RsGxsNetService::pointer ns = RsGxsNetService::pointer(
new RsGxsNetService(
mTestScenario->getServiceType(),
mTestScenario->getDataService(*cit),
mTestScenario->getDummyNetManager(*cit),
new NotifyWithPeerId(*cit, *this),
mTestScenario->getServiceInfo(),
mTestScenario->getDummyReputations(*cit),
mTestScenario->getDummyCircles(*cit),
NULL,
mTestScenario->getDummyPgpUtils(),
true
)
);
NotifyWithPeerId *noti = new NotifyWithPeerId(*cit, *this) ;
NxsTestHubConnection *connection =
new NxsTestHubConnection(*cit, this);
mNotifys.push_back(noti) ;
RsGxsNetService *ns = new RsGxsNetService(
mTestScenario->getServiceType(),
mTestScenario->getDataService(*cit),
mTestScenario->getDummyNetManager(*cit),
noti,
mTestScenario->getServiceInfo(),
mTestScenario->getDummyReputations(*cit),
mTestScenario->getDummyCircles(*cit),
NULL,
mTestScenario->getDummyPgpUtils(),
true
);
NxsTestHubConnection *connection = new NxsTestHubConnection(*cit, this);
ns->setServiceServer(connection);
mConnections.push_back(connection) ;
mPeerNxsMap.insert(std::make_pair(*cit, ns));
}
}
rs_nxs_test::NxsTestHub::~NxsTestHub() {
rs_nxs_test::NxsTestHub::~NxsTestHub()
{
for(PeerNxsMap::const_iterator it(mPeerNxsMap.begin());it!=mPeerNxsMap.end();++it)
delete it->second ;
for(std::list<NotifyWithPeerId*>::const_iterator it(mNotifys.begin());it!=mNotifys.end();++it)
delete *it ;
for(std::list<NxsTestHubConnection*>::const_iterator it(mConnections.begin());it!=mConnections.end();++it)
delete *it ;
}
@ -137,7 +149,7 @@ void rs_nxs_test::NxsTestHub::notifyNewMessages(const RsPeerId& pid,
{
RS_STACK_MUTEX(mMtx); /***** MTX LOCKED *****/
std::map<RsNxsMsg*, RsGxsMsgMetaData*> toStore;
RsNxsMsgDataTemporaryList toStore;
std::vector<RsNxsMsg*>::iterator it = messages.begin();
for(; it != messages.end(); it++)
{
@ -145,13 +157,17 @@ void rs_nxs_test::NxsTestHub::notifyNewMessages(const RsPeerId& pid,
RsGxsMsgMetaData* meta = new RsGxsMsgMetaData();
// local meta is not touched by the deserialisation routine
// have to initialise it
msg->metaData = meta ;
meta->mMsgStatus = 0;
meta->mMsgSize = 0;
meta->mChildTs = 0;
meta->recvTS = 0;
meta->validated = false;
meta->deserialise(msg->meta.bin_data, &(msg->meta.bin_len));
toStore.insert(std::make_pair(msg, meta));
toStore.push_back(msg);
}
RsGeneralDataService* ds = mTestScenario->getDataService(pid);
@ -163,14 +179,15 @@ void rs_nxs_test::NxsTestHub::notifyNewGroups(const RsPeerId& pid, std::vector<R
{
RS_STACK_MUTEX(mMtx); /***** MTX LOCKED *****/
std::map<RsNxsGrp*, RsGxsGrpMetaData*> toStore;
RsNxsGrpDataTemporaryList toStore;
std::vector<RsNxsGrp*>::iterator it = groups.begin();
for(; it != groups.end(); it++)
{
RsNxsGrp* grp = *it;
RsGxsGrpMetaData* meta = new RsGxsGrpMetaData();
grp->metaData = meta ;
meta->deserialise(grp->meta.bin_data, grp->meta.bin_len);
toStore.insert(std::make_pair(grp, meta));
toStore.push_back(grp);
}
RsGeneralDataService* ds = mTestScenario->getDataService(pid);
@ -227,7 +244,7 @@ void rs_nxs_test::NxsTestHub::data_tick()
// then tick net services
for(; it != mPeerNxsMap.end(); it++)
{
RsGxsNetService::pointer s = it->second;
RsGxsNetService *s = it->second;
s->tick();
}

View File

@ -11,7 +11,8 @@
// hence one could envision synchronising between an arbitrary number
// of peers
class NotifyWithPeerId;
class NxsTestHubConnection ;
namespace rs_nxs_test
{
@ -44,7 +45,7 @@ namespace rs_nxs_test
* This constructs the test hub
* for a give scenario in mind
*/
NxsTestHub(NxsTestScenario::pointer testScenario);
NxsTestHub(NxsTestScenario* testScenario);
/*!
* This cleans up what ever testing resources are left
@ -101,13 +102,14 @@ namespace rs_nxs_test
typedef std::pair<RsPeerId, RsRawItem*> PayLoad;
typedef std::map<RsPeerId, RsGxsNetService::pointer > PeerNxsMap ;
typedef std::map<RsPeerId, RsGxsNetService* > PeerNxsMap ;
NxsTestScenario::pointer mTestScenario;
NxsTestScenario *mTestScenario;
RsMutex mMtx;
PeerNxsMap mPeerNxsMap;
std::queue<PayLoad> mPayLoad;
std::list<NotifyWithPeerId*> mNotifys;
std::list<NxsTestHubConnection *> mConnections;
};
}
#endif // NXSTESTHUB_H

View File

@ -15,8 +15,7 @@
// disabled, because it fails after rebase to current master (did not fail in 2015, fails in 2016)
TEST(libretroshare_gxs, DISABLED_gxs_grp_sync)
{
rs_nxs_test::NxsTestScenario::pointer gsync_test = rs_nxs_test::NxsTestScenario::pointer(
new rs_nxs_test::NxsGrpSync());
rs_nxs_test::NxsTestScenario *gsync_test = new rs_nxs_test::NxsGrpSync();
rs_nxs_test::NxsTestHub tHub(gsync_test);
tHub.StartTest();
@ -28,13 +27,13 @@ TEST(libretroshare_gxs, DISABLED_gxs_grp_sync)
ASSERT_TRUE(tHub.testsPassed());
tHub.CleanUpTest();
delete gsync_test ;
}
// disabled, not implemented (does currently the same as NxsGrpSync)
TEST(libretroshare_gxs, DISABLED_gxs_grp_sync_delayed)
{
rs_nxs_test::NxsTestScenario::pointer gsync_test = rs_nxs_test::NxsTestScenario::pointer(
new rs_nxs_test::NxsGrpSyncDelayed());
rs_nxs_test::NxsTestScenario *gsync_test = new rs_nxs_test::NxsGrpSyncDelayed();
rs_nxs_test::NxsTestHub tHub(gsync_test);
tHub.StartTest();
@ -47,12 +46,12 @@ TEST(libretroshare_gxs, DISABLED_gxs_grp_sync_delayed)
tHub.CleanUpTest();
delete gsync_test ;
}
TEST(libretroshare_gxs, gxs_msg_sync)
{
rs_nxs_test::NxsTestScenario::pointer gsync_test = rs_nxs_test::NxsTestScenario::pointer(
new rs_nxs_test::NxsMsgSync);
rs_nxs_test::NxsTestScenario *gsync_test = new rs_nxs_test::NxsMsgSync();
rs_nxs_test::NxsTestHub tHub(gsync_test);
tHub.StartTest();
@ -64,6 +63,7 @@ TEST(libretroshare_gxs, gxs_msg_sync)
ASSERT_TRUE(tHub.testsPassed());
tHub.CleanUpTest();
delete gsync_test ;
}
TEST(libretroshare_gxs, gxs_msg_sync_delayed)

View File

@ -104,7 +104,7 @@ void init_item(RsChatAvatarItem& cai)
{
std::string image_data;
randString(LARGE_STR, image_data);
cai.image_data = new unsigned char[image_data.size()];
cai.image_data = (unsigned char*)malloc(image_data.size());
memcpy(cai.image_data, image_data.c_str(), image_data.size());
cai.image_size = image_data.size();