fixed tree bugs in GXS sync:(1) group pulish TS is now accounted for in mServerGrpSyncMap, so that modification of metadata is propagated in the network; (2) mClientSyncMessageMap is stamped when the server cannot feed any new messages. (3) group info for no-auto-update services are still sent, but only groups already present but available with a new version are sent. This fixes the propagation of GXS avatars.

git-svn-id: http://svn.code.sf.net/p/retroshare/code/trunk@7889 b45a01b8-16f6-495d-af2f-9b41ad6348cc
This commit is contained in:
csoler 2015-01-30 20:46:58 +00:00
parent be6c17fe5f
commit 327c369d5c
2 changed files with 134 additions and 111 deletions

View File

@ -37,6 +37,7 @@
/*** /***
* #define NXS_NET_DEBUG 1 * #define NXS_NET_DEBUG 1
***/ ***/
//#define NXS_NET_DEBUG 1
//#define NXS_NET_DEBUG_0 1 //#define NXS_NET_DEBUG_0 1
// #define NXS_NET_DEBUG_1 1 // #define NXS_NET_DEBUG_1 1
@ -236,33 +237,30 @@ void RsGxsNetService::syncWithPeers()
std::set<RsPeerId>::iterator sit = peers.begin(); std::set<RsPeerId>::iterator sit = peers.begin();
if(mGrpAutoSync) // for now just grps
for(; sit != peers.end(); ++sit)
{ {
// for now just grps
for(; sit != peers.end(); ++sit) const RsPeerId peerId = *sit;
ClientGrpMap::const_iterator cit = mClientGrpUpdateMap.find(peerId);
uint32_t updateTS = 0;
if(cit != mClientGrpUpdateMap.end())
{ {
const RsGxsGrpUpdateItem *gui = cit->second;
updateTS = gui->grpUpdateTS;
}
RsNxsSyncGrp *grp = new RsNxsSyncGrp(mServType);
grp->clear();
grp->PeerId(*sit);
grp->updateTS = updateTS;
const RsPeerId peerId = *sit; NxsBandwidthRecorder::recordEvent(mServType,grp) ;
ClientGrpMap::const_iterator cit = mClientGrpUpdateMap.find(peerId);
uint32_t updateTS = 0;
if(cit != mClientGrpUpdateMap.end())
{
const RsGxsGrpUpdateItem *gui = cit->second;
updateTS = gui->grpUpdateTS;
}
RsNxsSyncGrp *grp = new RsNxsSyncGrp(mServType);
grp->clear();
grp->PeerId(*sit);
grp->updateTS = updateTS;
NxsBandwidthRecorder::recordEvent(mServType,grp) ;
#ifdef NXS_NET_DEBUG_0 #ifdef NXS_NET_DEBUG_0
std::cerr << " sending RsNxsSyncGrp item to peer id: " << *sit << " ts=" << updateTS << std::endl; std::cerr << " sending RsNxsSyncGrp item to peer id: " << *sit << " ts=" << updateTS << std::endl;
#endif #endif
sendItem(grp); sendItem(grp);
}
} }
#ifndef GXS_DISABLE_SYNC_MSGS #ifndef GXS_DISABLE_SYNC_MSGS
@ -1173,19 +1171,25 @@ void RsGxsNetService::updateServerSyncTS()
// as a grp list server also note this is the latest item you have // as a grp list server also note this is the latest item you have
if(mGrpServerUpdateItem == NULL) if(mGrpServerUpdateItem == NULL)
{
mGrpServerUpdateItem = new RsGxsServerGrpUpdateItem(mServType); mGrpServerUpdateItem = new RsGxsServerGrpUpdateItem(mServType);
}
bool change = false; bool change = false;
for(; mit != gxsMap.end(); ++mit) for(; mit != gxsMap.end(); ++mit)
{ {
const RsGxsGroupId& grpId = mit->first; const RsGxsGroupId& grpId = mit->first;
RsGxsGrpMetaData* grpMeta = mit->second; RsGxsGrpMetaData* grpMeta = mit->second;
ServerMsgMap::iterator mapIT = mServerMsgUpdateMap.find(grpId); ServerMsgMap::iterator mapIT = mServerMsgUpdateMap.find(grpId);
RsGxsServerMsgUpdateItem* msui = NULL; RsGxsServerMsgUpdateItem* msui = NULL;
// That accounts for modification of the meta data.
if(mGrpServerUpdateItem->grpUpdateTS < grpMeta->mPublishTs)
{
std::cerr << "publish time stamp of group " << grpId << " has changed to " << time(NULL)-grpMeta->mPublishTs << " secs ago. updating!" << std::endl;
mGrpServerUpdateItem->grpUpdateTS = grpMeta->mPublishTs;
}
if(mapIT == mServerMsgUpdateMap.end()) if(mapIT == mServerMsgUpdateMap.end())
{ {
msui = new RsGxsServerMsgUpdateItem(mServType); msui = new RsGxsServerMsgUpdateItem(mServType);
@ -1205,7 +1209,7 @@ void RsGxsNetService::updateServerSyncTS()
// this might be very inefficient with time // this might be very inefficient with time
if(grpMeta->mRecvTS > mGrpServerUpdateItem->grpUpdateTS) if(grpMeta->mRecvTS > mGrpServerUpdateItem->grpUpdateTS)
{ {
mGrpServerUpdateItem->grpUpdateTS = grpMeta->mRecvTS; mGrpServerUpdateItem->grpUpdateTS = grpMeta->mRecvTS;
change = true; change = true;
} }
} }
@ -1805,7 +1809,7 @@ void RsGxsNetService::locked_pushMsgTransactionFromList(std::list<RsNxsItem*>& r
void RsGxsNetService::locked_genReqMsgTransaction(NxsTransaction* tr) void RsGxsNetService::locked_genReqMsgTransaction(NxsTransaction* tr)
{ {
#ifdef NXS_NET_DEBUG_0 #ifdef NXS_NET_DEBUG
std::cerr << "RsGxsNetService::genReqMsgTransaction()" << std::endl; std::cerr << "RsGxsNetService::genReqMsgTransaction()" << std::endl;
#endif #endif
@ -1831,7 +1835,7 @@ void RsGxsNetService::locked_genReqMsgTransaction(NxsTransaction* tr)
#endif #endif
} }
} }
#ifdef NXS_NET_DEBUG_0 #ifdef NXS_NET_DEBUG
std::cerr << " found " << msgItemL.size()<< " messages in this transaction." << std::endl; std::cerr << " found " << msgItemL.size()<< " messages in this transaction." << std::endl;
#endif #endif
@ -1866,15 +1870,7 @@ void RsGxsNetService::locked_genReqMsgTransaction(NxsTransaction* tr)
// peer again, unless the peer has new info about it. // peer again, unless the peer has new info about it.
// That needs of course to reset that time to 0 when we subscribe. // That needs of course to reset that time to 0 when we subscribe.
RsGxsMsgUpdateItem *& pitem(mClientMsgUpdateMap[pid]) ; locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL)) ;
if(pitem == NULL)
{
pitem = new RsGxsMsgUpdateItem(mServType) ;
pitem->peerId = pid ;
}
pitem->msgUpdateTS[grpId] = time(NULL) ;
return ; return ;
} }
@ -2046,6 +2042,27 @@ void RsGxsNetService::locked_genReqMsgTransaction(NxsTransaction* tr)
mPartialMsgUpdates[tr->mTransaction->PeerId()].erase(item->grpId) ; mPartialMsgUpdates[tr->mTransaction->PeerId()].erase(item->grpId) ;
} }
} }
else
{
// The list to req is empty. That means we already have all messages that this peer can
// provide. So we can stamp the group from this peer to be up to date.
locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL)) ;
}
}
void RsGxsNetService::locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm)
{
RsGxsMsgUpdateItem *& pitem(mClientMsgUpdateMap[pid]) ;
if(pitem == NULL)
{
pitem = new RsGxsMsgUpdateItem(mServType) ;
pitem->peerId = pid ;
}
pitem->msgUpdateTS[grpId] = time(NULL) ;
return ;
} }
void RsGxsNetService::locked_pushGrpTransactionFromList( void RsGxsNetService::locked_pushGrpTransactionFromList(
@ -2093,73 +2110,72 @@ void RsGxsNetService::addGroupItemToList(NxsTransaction*& tr,
void RsGxsNetService::locked_genReqGrpTransaction(NxsTransaction* tr) void RsGxsNetService::locked_genReqGrpTransaction(NxsTransaction* tr)
{ {
// to create a transaction you need to know who you are transacting with
// to create a transaction you need to know who you are transacting with // then what grps to request
// then what grps to request // then add an active Transaction for request
// then add an active Transaction for request
#ifdef NXS_NET_DEBUG #ifdef NXS_NET_DEBUG
std::cerr << "locked_genReqGrpTransaction(): " << std::endl; std::cerr << "locked_genReqGrpTransaction(): " << std::endl;
#endif #endif
std::list<RsNxsSyncGrpItem*> grpItemL; std::list<RsNxsSyncGrpItem*> grpItemL;
std::list<RsNxsItem*>::iterator lit = tr->mItems.begin(); std::list<RsNxsItem*>::iterator lit = tr->mItems.begin();
for(; lit != tr->mItems.end(); ++lit) for(; lit != tr->mItems.end(); ++lit)
{ {
RsNxsSyncGrpItem* item = dynamic_cast<RsNxsSyncGrpItem*>(*lit); RsNxsSyncGrpItem* item = dynamic_cast<RsNxsSyncGrpItem*>(*lit);
if(item) if(item)
{ {
grpItemL.push_back(item); grpItemL.push_back(item);
}else }else
{ {
#ifdef NXS_NET_DEBUG #ifdef NXS_NET_DEBUG
std::cerr << "RsGxsNetService::genReqGrpTransaction(): item failed to caste to RsNxsSyncMsgItem* " std::cerr << "RsGxsNetService::genReqGrpTransaction(): item failed to caste to RsNxsSyncMsgItem* "
<< std::endl; << std::endl;
#endif #endif
} }
} }
std::map<RsGxsGroupId, RsGxsGrpMetaData*> grpMetaMap; std::map<RsGxsGroupId, RsGxsGrpMetaData*> grpMetaMap;
std::map<RsGxsGroupId, RsGxsGrpMetaData*>::const_iterator metaIter; std::map<RsGxsGroupId, RsGxsGrpMetaData*>::const_iterator metaIter;
mDataStore->retrieveGxsGrpMetaData(grpMetaMap); mDataStore->retrieveGxsGrpMetaData(grpMetaMap);
// now do compare and add loop // now do compare and add loop
std::list<RsNxsSyncGrpItem*>::iterator llit = grpItemL.begin(); std::list<RsNxsSyncGrpItem*>::iterator llit = grpItemL.begin();
std::list<RsNxsItem*> reqList; std::list<RsNxsItem*> reqList;
uint32_t transN = locked_getTransactionId(); uint32_t transN = locked_getTransactionId();
GrpAuthorV toVet; GrpAuthorV toVet;
std::list<RsPeerId> peers; std::list<RsPeerId> peers;
peers.push_back(tr->mTransaction->PeerId()); peers.push_back(tr->mTransaction->PeerId());
for(; llit != grpItemL.end(); ++llit) for(; llit != grpItemL.end(); ++llit)
{ {
RsNxsSyncGrpItem*& grpSyncItem = *llit; RsNxsSyncGrpItem*& grpSyncItem = *llit;
const RsGxsGroupId& grpId = grpSyncItem->grpId; const RsGxsGroupId& grpId = grpSyncItem->grpId;
metaIter = grpMetaMap.find(grpId); metaIter = grpMetaMap.find(grpId);
bool haveItem = false; bool haveItem = false;
bool latestVersion = false; bool latestVersion = false;
if (metaIter != grpMetaMap.end()) if (metaIter != grpMetaMap.end())
{ {
haveItem = true; haveItem = true;
latestVersion = grpSyncItem->publishTs > metaIter->second->mPublishTs; latestVersion = grpSyncItem->publishTs > metaIter->second->mPublishTs;
} }
if(!haveItem || latestVersion) if( (mGrpAutoSync && !haveItem) || latestVersion)
{ {
// determine if you need to check reputation // determine if you need to check reputation
bool checkRep = !grpSyncItem->authorId.isNull(); bool checkRep = !grpSyncItem->authorId.isNull();
// check if you have reputation, if you don't then // check if you have reputation, if you don't then
// place in holding pen // place in holding pen
if(checkRep) if(checkRep)
{ {
if(mReputations->haveReputation(grpSyncItem->authorId)) if(mReputations->haveReputation(grpSyncItem->authorId))
{ {
GixsReputation rep; GixsReputation rep;
mReputations->getReputation(grpSyncItem->authorId, rep); mReputations->getReputation(grpSyncItem->authorId, rep);
if(rep.score >= GIXS_CUT_OFF) if(rep.score >= GIXS_CUT_OFF)
{ {
@ -2169,42 +2185,42 @@ void RsGxsNetService::locked_genReqGrpTransaction(NxsTransaction* tr)
else else
std::cerr << " reputation cut off: limit=" << GIXS_CUT_OFF << " value=" << rep.score << ": you shall not pass." << std::endl; std::cerr << " reputation cut off: limit=" << GIXS_CUT_OFF << " value=" << rep.score << ": you shall not pass." << std::endl;
} }
else else
{ {
// preload reputation for later // preload reputation for later
mReputations->loadReputation(grpSyncItem->authorId, peers); mReputations->loadReputation(grpSyncItem->authorId, peers);
GrpAuthEntry entry; GrpAuthEntry entry;
entry.mAuthorId = grpSyncItem->authorId; entry.mAuthorId = grpSyncItem->authorId;
entry.mGrpId = grpSyncItem->grpId; entry.mGrpId = grpSyncItem->grpId;
toVet.push_back(entry); toVet.push_back(entry);
} }
}
else
{
addGroupItemToList(tr, grpId, transN, reqList);
} }
} else
} {
addGroupItemToList(tr, grpId, transN, reqList);
}
}
}
if(!toVet.empty()) if(!toVet.empty())
{ {
RsPeerId peerId = tr->mTransaction->PeerId(); RsPeerId peerId = tr->mTransaction->PeerId();
GrpRespPending* grp = new GrpRespPending(mReputations, peerId, toVet); GrpRespPending* grp = new GrpRespPending(mReputations, peerId, toVet);
mPendingResp.push_back(grp); mPendingResp.push_back(grp);
} }
if(!reqList.empty()) if(!reqList.empty())
{ {
locked_pushGrpTransactionFromList(reqList, tr->mTransaction->PeerId(), transN); locked_pushGrpTransactionFromList(reqList, tr->mTransaction->PeerId(), transN);
} }
// clean up meta data // clean up meta data
std::map<RsGxsGroupId, RsGxsGrpMetaData*>::iterator mit = grpMetaMap.begin(); std::map<RsGxsGroupId, RsGxsGrpMetaData*>::iterator mit = grpMetaMap.begin();
for(; mit != grpMetaMap.end(); ++mit) for(; mit != grpMetaMap.end(); ++mit)
delete mit->second; delete mit->second;
} }
void RsGxsNetService::locked_genSendGrpsTransaction(NxsTransaction* tr) void RsGxsNetService::locked_genSendGrpsTransaction(NxsTransaction* tr)

View File

@ -428,6 +428,13 @@ private:
* @param partFragments the partitioned fragments (into message ids) * @param partFragments the partitioned fragments (into message ids)
*/ */
void collateGrpFragments(GrpFragments fragments, std::map<RsGxsGroupId, GrpFragments>& partFragments) const; void collateGrpFragments(GrpFragments fragments, std::map<RsGxsGroupId, GrpFragments>& partFragments) const;
/*!
* stamp the group info from that particular peer at the given time.
*/
void locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm) ;
private: private: