mirror of
https://github.com/RetroShare/RetroShare.git
synced 2025-01-24 14:23:36 -05:00
added number of posts at friends to saveList/loadList. Improves visibility of number of posts
git-svn-id: http://svn.code.sf.net/p/retroshare/code/trunk@7896 b45a01b8-16f6-495d-af2f-9b41ad6348cc
This commit is contained in:
parent
a6c4e430f6
commit
8a8f9fc528
@ -430,7 +430,7 @@ uint8_t RsGenExchange::createGroup(RsNxsGrp *grp, RsTlvSecurityKeySet& privateKe
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
std::cerr << "RsGenExchange::createGroup() ERROR !okay (getSignature error)";
|
||||
std::cerr << "RsGenExchange::createGroup() ERROR !okay (getSignature error)";
|
||||
std::cerr << std::endl;
|
||||
return CREATE_FAIL;
|
||||
}
|
||||
@ -1240,9 +1240,10 @@ bool RsGenExchange::getGroupData(const uint32_t &token, std::vector<RsGxsGrpItem
|
||||
bool ok = mDataAccess->getGroupData(token, nxsGrps);
|
||||
|
||||
std::list<RsNxsGrp*>::iterator lit = nxsGrps.begin();
|
||||
|
||||
#ifdef GEN_EXCH_DEBUG
|
||||
std::cerr << "RsGenExchange::getGroupData() RsNxsGrp::len: " << nxsGrps.size();
|
||||
std::cerr << std::endl;
|
||||
std::cerr << std::endl;
|
||||
#endif
|
||||
|
||||
if(ok)
|
||||
{
|
||||
@ -1542,7 +1543,7 @@ void RsGenExchange::deleteGroup(uint32_t& token, RsGxsGrpItem* grpItem)
|
||||
mGroupDeletePublish.push_back(GroupDeletePublish(grpItem, token));
|
||||
|
||||
#ifdef GEN_EXCH_DEBUG
|
||||
std::cerr << "RsGenExchange::deleteGroup() token: " << token;
|
||||
std::cerr << "RsGenExchange::deleteGroup() token: " << token;
|
||||
std::cerr << std::endl;
|
||||
#endif
|
||||
}
|
||||
|
@ -331,10 +331,10 @@ void RsGxsNetService::syncWithPeers()
|
||||
|
||||
if(mui)
|
||||
{
|
||||
std::map<RsGxsGroupId, uint32_t>::const_iterator cit2 = mui->msgUpdateTS.find(grpId);
|
||||
std::map<RsGxsGroupId, RsGxsMsgUpdateItem::MsgUpdateInfo>::const_iterator cit2 = mui->msgUpdateInfos.find(grpId);
|
||||
|
||||
if(cit2 != mui->msgUpdateTS.end())
|
||||
updateTS = cit2->second;
|
||||
if(cit2 != mui->msgUpdateInfos.end())
|
||||
updateTS = cit2->second.time_stamp;
|
||||
}
|
||||
|
||||
RsNxsSyncMsg* msg = new RsNxsSyncMsg(mServType);
|
||||
@ -385,10 +385,10 @@ void RsGxsNetService::subscribeStatusChanged(const RsGxsGroupId& grpId,bool subs
|
||||
#endif
|
||||
for(ClientMsgMap::iterator it(mClientMsgUpdateMap.begin());it!=mClientMsgUpdateMap.end();++it)
|
||||
{
|
||||
std::map<RsGxsGroupId,uint32_t>::iterator it2 = it->second->msgUpdateTS.find(grpId) ;
|
||||
std::map<RsGxsGroupId,RsGxsMsgUpdateItem::MsgUpdateInfo>::iterator it2 = it->second->msgUpdateInfos.find(grpId) ;
|
||||
|
||||
if(it2 != it->second->msgUpdateTS.end())
|
||||
it->second->msgUpdateTS.erase(it2) ;
|
||||
if(it2 != it->second->msgUpdateInfos.end())
|
||||
it->second->msgUpdateInfos.erase(it2) ;
|
||||
}
|
||||
}
|
||||
|
||||
@ -848,7 +848,18 @@ private:
|
||||
|
||||
bool RsGxsNetService::loadList(std::list<RsItem *> &load)
|
||||
{
|
||||
RS_STACK_MUTEX(mNxsMutex) ;
|
||||
|
||||
std::for_each(load.begin(), load.end(), StoreHere(mClientGrpUpdateMap, mClientMsgUpdateMap, mServerMsgUpdateMap, mGrpServerUpdateItem));
|
||||
|
||||
for(ClientMsgMap::iterator it = mClientMsgUpdateMap.begin();it!=mClientMsgUpdateMap.end();++it)
|
||||
for(std::map<RsGxsGroupId,RsGxsMsgUpdateItem::MsgUpdateInfo>::const_iterator it2(it->second->msgUpdateInfos.begin());it2!=it->second->msgUpdateInfos.end();++it2)
|
||||
{
|
||||
RsGroupNetworkStatsRecord& gnsr = mGroupNetworkStats[it2->first] ;
|
||||
|
||||
gnsr.suppliers.insert(it->first) ;
|
||||
gnsr.max_visible_count = std::max(it2->second.message_count,gnsr.max_visible_count) ;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1130,20 +1141,23 @@ bool RsGxsNetService::locked_processTransac(RsNxsTransac* item)
|
||||
return false;
|
||||
}
|
||||
|
||||
void RsGxsNetService::run(){
|
||||
void RsGxsNetService::run()
|
||||
{
|
||||
double timeDelta = 0.5;
|
||||
int updateCounter = 0;
|
||||
|
||||
while(isRunning()){
|
||||
while(isRunning())
|
||||
{
|
||||
//Start waiting as nothing to do in runup
|
||||
usleep((int) (timeDelta * 1000 * 1000)); // timeDelta sec
|
||||
|
||||
if(updateCounter >= 20) {
|
||||
updateServerSyncTS();
|
||||
updateCounter = 0;
|
||||
} else {//if(updateCounter >= 20)
|
||||
updateCounter++;
|
||||
}//else (updateCounter >= 20)
|
||||
if(updateCounter >= 20)
|
||||
{
|
||||
updateServerSyncTS();
|
||||
updateCounter = 0;
|
||||
}
|
||||
else
|
||||
updateCounter++;
|
||||
|
||||
// process active transactions
|
||||
processTransactions();
|
||||
@ -1156,7 +1170,7 @@ void RsGxsNetService::run(){
|
||||
|
||||
processExplicitGroupRequests();
|
||||
|
||||
}//while(isRunning())
|
||||
}
|
||||
}
|
||||
|
||||
void RsGxsNetService::updateServerSyncTS()
|
||||
@ -1170,7 +1184,7 @@ void RsGxsNetService::updateServerSyncTS()
|
||||
std::map<RsGxsGroupId, RsGxsGrpMetaData*>::iterator mit = gxsMap.begin();
|
||||
|
||||
// as a grp list server also note this is the latest item you have
|
||||
if(mGrpServerUpdateItem == NULL)
|
||||
if(mGrpServerUpdateItem == NULL)
|
||||
mGrpServerUpdateItem = new RsGxsServerGrpUpdateItem(mServType);
|
||||
|
||||
bool change = false;
|
||||
@ -1200,7 +1214,7 @@ void RsGxsNetService::updateServerSyncTS()
|
||||
msui = mapIT->second;
|
||||
}
|
||||
|
||||
if(grpMeta->mLastPost > msui->msgUpdateTS )
|
||||
if(grpMeta->mLastPost > msui->msgUpdateTS )
|
||||
{
|
||||
change = true;
|
||||
msui->msgUpdateTS = grpMeta->mLastPost;
|
||||
@ -1595,8 +1609,7 @@ void RsGxsNetService::locked_processCompletedIncomingTrans(NxsTransaction* tr)
|
||||
}else
|
||||
{
|
||||
item = new RsGxsGrpUpdateItem(mServType);
|
||||
mClientGrpUpdateMap.insert(
|
||||
std::make_pair(peerFrom, item));
|
||||
mClientGrpUpdateMap.insert(std::make_pair(peerFrom, item));
|
||||
}
|
||||
|
||||
item->grpUpdateTS = updateTS;
|
||||
@ -1713,7 +1726,7 @@ void RsGxsNetService::locked_doMsgUpdateWork(const RsNxsTransac *nxsTrans, const
|
||||
#ifdef NXS_NET_DEBUG
|
||||
std::cerr << " this is a full update. Updating time stamp." << std::endl;
|
||||
#endif
|
||||
mui->msgUpdateTS[grpId] = nxsTrans->updateTS;
|
||||
mui->msgUpdateInfos[grpId].time_stamp = nxsTrans->updateTS;
|
||||
IndicateConfigChanged();
|
||||
}
|
||||
}
|
||||
@ -1870,7 +1883,7 @@ void RsGxsNetService::locked_genReqMsgTransaction(NxsTransaction* tr)
|
||||
// peer again, unless the peer has new info about it.
|
||||
// That needs of course to reset that time to 0 when we subscribe.
|
||||
|
||||
locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL)) ;
|
||||
locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL),msgItemL.size()) ;
|
||||
return ;
|
||||
}
|
||||
|
||||
@ -2047,11 +2060,11 @@ void RsGxsNetService::locked_genReqMsgTransaction(NxsTransaction* tr)
|
||||
// The list to req is empty. That means we already have all messages that this peer can
|
||||
// provide. So we can stamp the group from this peer to be up to date.
|
||||
|
||||
locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL)) ;
|
||||
locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL),msgItemL.size()) ;
|
||||
}
|
||||
}
|
||||
|
||||
void RsGxsNetService::locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm)
|
||||
void RsGxsNetService::locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm,uint32_t n_messages)
|
||||
{
|
||||
RsGxsMsgUpdateItem *& pitem(mClientMsgUpdateMap[pid]) ;
|
||||
|
||||
@ -2061,8 +2074,10 @@ void RsGxsNetService::locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const
|
||||
pitem->peerId = pid ;
|
||||
}
|
||||
|
||||
pitem->msgUpdateTS[grpId] = time(NULL) ;
|
||||
return ;
|
||||
pitem->msgUpdateInfos[grpId].time_stamp = time(NULL) ;
|
||||
pitem->msgUpdateInfos[grpId].message_count = n_messages ;
|
||||
|
||||
IndicateConfigChanged();
|
||||
}
|
||||
|
||||
void RsGxsNetService::locked_pushGrpTransactionFromList(
|
||||
|
@ -433,7 +433,7 @@ private:
|
||||
* stamp the group info from that particular peer at the given time.
|
||||
*/
|
||||
|
||||
void locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm) ;
|
||||
void locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm,uint32_t n_messages) ;
|
||||
|
||||
private:
|
||||
|
||||
|
@ -26,16 +26,11 @@
|
||||
#include "rsgxsupdateitems.h"
|
||||
#include "rsbaseserial.h"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
void RsGxsGrpUpdateItem::clear()
|
||||
{
|
||||
grpUpdateTS = 0;
|
||||
peerId.clear();
|
||||
}
|
||||
|
||||
std::ostream& RsGxsGrpUpdateItem::print(std::ostream& out, uint16_t indent)
|
||||
{
|
||||
printRsItemBase(out, "RsGxsGrpUpdateItem", indent);
|
||||
@ -44,14 +39,14 @@ std::ostream& RsGxsGrpUpdateItem::print(std::ostream& out, uint16_t indent)
|
||||
printIndent(out, int_Indent);
|
||||
out << "grpUpdateTS: " << grpUpdateTS << std::endl;
|
||||
printIndent(out, int_Indent);
|
||||
return out ;
|
||||
return out ;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void RsGxsMsgUpdateItem::clear()
|
||||
{
|
||||
msgUpdateTS.clear();
|
||||
msgUpdateInfos.clear();
|
||||
peerId.clear();
|
||||
}
|
||||
|
||||
@ -65,15 +60,17 @@ std::ostream& RsGxsMsgUpdateItem::print(std::ostream& out, uint16_t indent)
|
||||
out << "peerId: " << peerId << std::endl;
|
||||
printIndent(out, int_Indent);
|
||||
|
||||
std::map<RsGxsGroupId, uint32_t>::const_iterator cit = msgUpdateTS.begin();
|
||||
std::map<RsGxsGroupId, MsgUpdateInfo>::const_iterator cit = msgUpdateInfos.begin();
|
||||
out << "msgUpdateTS map:" << std::endl;
|
||||
int_Indent += 2;
|
||||
for(; cit != msgUpdateTS.end(); ++cit)
|
||||
for(; cit != msgUpdateInfos.end(); ++cit)
|
||||
{
|
||||
out << "grpId: " << cit->first << std::endl;
|
||||
printIndent(out, int_Indent);
|
||||
out << "Msg time stamp: " << cit->second << std::endl;
|
||||
printIndent(out, int_Indent);
|
||||
out << "grpId: " << cit->first << std::endl;
|
||||
printIndent(out, int_Indent);
|
||||
out << "Msg time stamp: " << cit->second.time_stamp << std::endl;
|
||||
printIndent(out, int_Indent);
|
||||
out << "posts available: " << cit->second.message_count << std::endl;
|
||||
printIndent(out, int_Indent);
|
||||
}
|
||||
|
||||
return out;
|
||||
@ -213,8 +210,8 @@ uint32_t RsGxsUpdateSerialiser::sizeGxsGrpUpdate(RsGxsGrpUpdateItem* item)
|
||||
{
|
||||
uint32_t s = 8; // header size
|
||||
s += item->peerId.serial_size();
|
||||
s += 4;
|
||||
return s;
|
||||
s += 4; // mUpdateTS
|
||||
return s;
|
||||
}
|
||||
|
||||
uint32_t RsGxsUpdateSerialiser::sizeGxsServerGrpUpdate(RsGxsServerGrpUpdateItem* /* item */)
|
||||
@ -319,9 +316,7 @@ bool RsGxsUpdateSerialiser::serialiseGxsServerGrpUpdate(RsGxsServerGrpUpdateItem
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
RsGxsGrpUpdateItem* RsGxsUpdateSerialiser::deserialGxsGrpUpddate(void* data,
|
||||
uint32_t* size)
|
||||
RsGxsGrpUpdateItem* RsGxsUpdateSerialiser::deserialGxsGrpUpddate(void* data, uint32_t* size)
|
||||
{
|
||||
#ifdef RSSERIAL_DEBUG
|
||||
std::cerr << "RsGxsUpdateSerialiser::deserialGxsServerGrpUpdate()" << std::endl;
|
||||
@ -453,21 +448,13 @@ RsGxsServerGrpUpdateItem* RsGxsUpdateSerialiser::deserialGxsServerGrpUpddate(voi
|
||||
|
||||
uint32_t RsGxsUpdateSerialiser::sizeGxsMsgUpdate(RsGxsMsgUpdateItem* item)
|
||||
{
|
||||
uint32_t s = 8; // header size
|
||||
uint32_t s = 8; // header size
|
||||
s += item->peerId.serial_size() ;//GetTlvStringSize(item->peerId);
|
||||
|
||||
const std::map<RsGxsGroupId, uint32_t>& msgUpdateTS = item->msgUpdateTS;
|
||||
std::map<RsGxsGroupId, uint32_t>::const_iterator cit = msgUpdateTS.begin();
|
||||
s += item->msgUpdateInfos.size() * (4 + 4 + RsGxsGroupId::serial_size());
|
||||
s += 4; // number of map items
|
||||
|
||||
for(; cit != msgUpdateTS.end(); ++cit)
|
||||
{
|
||||
s += cit->first.serial_size();
|
||||
s += 4;
|
||||
}
|
||||
|
||||
s += 4; // number of map items
|
||||
|
||||
return s;
|
||||
return s;
|
||||
}
|
||||
|
||||
uint32_t RsGxsUpdateSerialiser::sizeGxsServerMsgUpdate(RsGxsServerMsgUpdateItem* item)
|
||||
@ -510,16 +497,16 @@ bool RsGxsUpdateSerialiser::serialiseGxsMsgUpdate(RsGxsMsgUpdateItem* item,
|
||||
|
||||
ok &= item->peerId.serialise(data, *size, offset) ;
|
||||
|
||||
const std::map<RsGxsGroupId, uint32_t>& msgUpdateTS = item->msgUpdateTS;
|
||||
std::map<RsGxsGroupId, uint32_t>::const_iterator cit = msgUpdateTS.begin();
|
||||
std::map<RsGxsGroupId, RsGxsMsgUpdateItem::MsgUpdateInfo>::const_iterator cit(item->msgUpdateInfos.begin());
|
||||
|
||||
uint32_t numItems = msgUpdateTS.size();
|
||||
uint32_t numItems = item->msgUpdateInfos.size();
|
||||
ok &= setRawUInt32(data, *size, &offset, numItems);
|
||||
|
||||
for(; cit != msgUpdateTS.end(); ++cit)
|
||||
for(; cit != item->msgUpdateInfos.end(); ++cit)
|
||||
{
|
||||
ok &= cit->first.serialise(data, *size, offset);
|
||||
ok &= setRawUInt32(data, *size, &offset, cit->second);
|
||||
ok &= setRawUInt32(data, *size, &offset, cit->second.time_stamp);
|
||||
ok &= setRawUInt32(data, *size, &offset, cit->second.message_count);
|
||||
}
|
||||
|
||||
if(offset != tlvsize){
|
||||
@ -632,9 +619,11 @@ RsGxsMsgUpdateItem* RsGxsUpdateSerialiser::deserialGxsMsgUpdate(void* data,
|
||||
ok &= item->peerId.deserialise(data, *size, offset) ;
|
||||
uint32_t numUpdateItems;
|
||||
ok &= getRawUInt32(data, *size, &offset, &(numUpdateItems));
|
||||
std::map<RsGxsGroupId, uint32_t>& msgUpdateItem = item->msgUpdateTS;
|
||||
std::map<RsGxsGroupId, RsGxsMsgUpdateItem::MsgUpdateInfo>& msgUpdateInfos = item->msgUpdateInfos;
|
||||
RsGxsGroupId pId;
|
||||
uint32_t updateTS;
|
||||
|
||||
RsGxsMsgUpdateItem::MsgUpdateInfo info ;
|
||||
|
||||
for(uint32_t i = 0; i < numUpdateItems; i++)
|
||||
{
|
||||
ok &= pId.deserialise(data, *size, offset);
|
||||
@ -642,12 +631,13 @@ RsGxsMsgUpdateItem* RsGxsUpdateSerialiser::deserialGxsMsgUpdate(void* data,
|
||||
if(!ok)
|
||||
break;
|
||||
|
||||
ok &= getRawUInt32(data, *size, &offset, &(updateTS));
|
||||
ok &= getRawUInt32(data, *size, &offset, &(info.time_stamp));
|
||||
ok &= getRawUInt32(data, *size, &offset, &(info.message_count));
|
||||
|
||||
if(!ok)
|
||||
break;
|
||||
|
||||
msgUpdateItem.insert(std::make_pair(pId, updateTS));
|
||||
msgUpdateInfos.insert(std::make_pair(pId, info));
|
||||
}
|
||||
|
||||
if (offset != rssize)
|
||||
|
@ -41,7 +41,8 @@
|
||||
|
||||
|
||||
const uint8_t RS_PKT_SUBTYPE_GXS_GRP_UPDATE = 0x0001;
|
||||
const uint8_t RS_PKT_SUBTYPE_GXS_MSG_UPDATE = 0x0002;
|
||||
const uint8_t RS_PKT_SUBTYPE_GXS_MSG_UPDATE_deprecated = 0x0002;
|
||||
const uint8_t RS_PKT_SUBTYPE_GXS_MSG_UPDATE = 0x0003;
|
||||
const uint8_t RS_PKT_SUBTYPE_GXS_SERVER_GRP_UPDATE = 0x0004;
|
||||
const uint8_t RS_PKT_SUBTYPE_GXS_SERVER_MSG_UPDATE = 0x0008;
|
||||
|
||||
@ -50,13 +51,13 @@ public:
|
||||
RsGxsGrpUpdateItem(uint16_t servType) : RsItem(RS_PKT_VERSION_SERVICE, servType,
|
||||
RS_PKT_SUBTYPE_GXS_GRP_UPDATE)
|
||||
{clear();}
|
||||
virtual ~RsGxsGrpUpdateItem() {}
|
||||
virtual ~RsGxsGrpUpdateItem() {}
|
||||
|
||||
virtual void clear();
|
||||
virtual std::ostream &print(std::ostream &out, uint16_t indent);
|
||||
virtual void clear();
|
||||
virtual std::ostream &print(std::ostream &out, uint16_t indent);
|
||||
|
||||
RsPeerId peerId;
|
||||
uint32_t grpUpdateTS;
|
||||
uint32_t grpUpdateTS;
|
||||
};
|
||||
|
||||
class RsGxsServerGrpUpdateItem : public RsItem {
|
||||
@ -82,8 +83,14 @@ public:
|
||||
virtual void clear();
|
||||
virtual std::ostream &print(std::ostream &out, uint16_t indent);
|
||||
|
||||
struct MsgUpdateInfo
|
||||
{
|
||||
uint32_t time_stamp ;
|
||||
uint32_t message_count ;
|
||||
};
|
||||
|
||||
RsPeerId peerId;
|
||||
std::map<RsGxsGroupId, uint32_t> msgUpdateTS;
|
||||
std::map<RsGxsGroupId, MsgUpdateInfo> msgUpdateInfos;
|
||||
};
|
||||
|
||||
class RsGxsServerMsgUpdateItem : public RsItem
|
||||
|
@ -802,7 +802,7 @@ bool p3IdService::createGroup(uint32_t& token, RsGxsIdGroup &group)
|
||||
|
||||
bool p3IdService::updateGroup(uint32_t& token, RsGxsIdGroup &group)
|
||||
{
|
||||
RsGxsId id = RsGxsId(group.mMeta.mGroupId.toStdString());
|
||||
RsGxsId id(group.mMeta.mGroupId);
|
||||
RsGxsIdGroupItem* item = new RsGxsIdGroupItem();
|
||||
|
||||
item->fromGxsIdGroup(group,false) ;
|
||||
@ -812,7 +812,7 @@ bool p3IdService::updateGroup(uint32_t& token, RsGxsIdGroup &group)
|
||||
std::cerr << std::endl;
|
||||
#endif
|
||||
|
||||
RsGenExchange::updateGroup(token, item);
|
||||
RsGenExchange::updateGroup(token, item);
|
||||
|
||||
// if its in the cache - clear it.
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user