mirror of
https://github.com/RetroShare/RetroShare.git
synced 2025-06-05 05:12:51 -04:00
added number of posts at friends to saveList/loadList. Improves visibility of number of posts
git-svn-id: http://svn.code.sf.net/p/retroshare/code/trunk@7896 b45a01b8-16f6-495d-af2f-9b41ad6348cc
This commit is contained in:
parent
a6c4e430f6
commit
8a8f9fc528
6 changed files with 90 additions and 77 deletions
|
@ -1240,9 +1240,10 @@ bool RsGenExchange::getGroupData(const uint32_t &token, std::vector<RsGxsGrpItem
|
||||||
bool ok = mDataAccess->getGroupData(token, nxsGrps);
|
bool ok = mDataAccess->getGroupData(token, nxsGrps);
|
||||||
|
|
||||||
std::list<RsNxsGrp*>::iterator lit = nxsGrps.begin();
|
std::list<RsNxsGrp*>::iterator lit = nxsGrps.begin();
|
||||||
|
#ifdef GEN_EXCH_DEBUG
|
||||||
std::cerr << "RsGenExchange::getGroupData() RsNxsGrp::len: " << nxsGrps.size();
|
std::cerr << "RsGenExchange::getGroupData() RsNxsGrp::len: " << nxsGrps.size();
|
||||||
std::cerr << std::endl;
|
std::cerr << std::endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
if(ok)
|
if(ok)
|
||||||
{
|
{
|
||||||
|
|
|
@ -331,10 +331,10 @@ void RsGxsNetService::syncWithPeers()
|
||||||
|
|
||||||
if(mui)
|
if(mui)
|
||||||
{
|
{
|
||||||
std::map<RsGxsGroupId, uint32_t>::const_iterator cit2 = mui->msgUpdateTS.find(grpId);
|
std::map<RsGxsGroupId, RsGxsMsgUpdateItem::MsgUpdateInfo>::const_iterator cit2 = mui->msgUpdateInfos.find(grpId);
|
||||||
|
|
||||||
if(cit2 != mui->msgUpdateTS.end())
|
if(cit2 != mui->msgUpdateInfos.end())
|
||||||
updateTS = cit2->second;
|
updateTS = cit2->second.time_stamp;
|
||||||
}
|
}
|
||||||
|
|
||||||
RsNxsSyncMsg* msg = new RsNxsSyncMsg(mServType);
|
RsNxsSyncMsg* msg = new RsNxsSyncMsg(mServType);
|
||||||
|
@ -385,10 +385,10 @@ void RsGxsNetService::subscribeStatusChanged(const RsGxsGroupId& grpId,bool subs
|
||||||
#endif
|
#endif
|
||||||
for(ClientMsgMap::iterator it(mClientMsgUpdateMap.begin());it!=mClientMsgUpdateMap.end();++it)
|
for(ClientMsgMap::iterator it(mClientMsgUpdateMap.begin());it!=mClientMsgUpdateMap.end();++it)
|
||||||
{
|
{
|
||||||
std::map<RsGxsGroupId,uint32_t>::iterator it2 = it->second->msgUpdateTS.find(grpId) ;
|
std::map<RsGxsGroupId,RsGxsMsgUpdateItem::MsgUpdateInfo>::iterator it2 = it->second->msgUpdateInfos.find(grpId) ;
|
||||||
|
|
||||||
if(it2 != it->second->msgUpdateTS.end())
|
if(it2 != it->second->msgUpdateInfos.end())
|
||||||
it->second->msgUpdateTS.erase(it2) ;
|
it->second->msgUpdateInfos.erase(it2) ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -848,7 +848,18 @@ private:
|
||||||
|
|
||||||
bool RsGxsNetService::loadList(std::list<RsItem *> &load)
|
bool RsGxsNetService::loadList(std::list<RsItem *> &load)
|
||||||
{
|
{
|
||||||
|
RS_STACK_MUTEX(mNxsMutex) ;
|
||||||
|
|
||||||
std::for_each(load.begin(), load.end(), StoreHere(mClientGrpUpdateMap, mClientMsgUpdateMap, mServerMsgUpdateMap, mGrpServerUpdateItem));
|
std::for_each(load.begin(), load.end(), StoreHere(mClientGrpUpdateMap, mClientMsgUpdateMap, mServerMsgUpdateMap, mGrpServerUpdateItem));
|
||||||
|
|
||||||
|
for(ClientMsgMap::iterator it = mClientMsgUpdateMap.begin();it!=mClientMsgUpdateMap.end();++it)
|
||||||
|
for(std::map<RsGxsGroupId,RsGxsMsgUpdateItem::MsgUpdateInfo>::const_iterator it2(it->second->msgUpdateInfos.begin());it2!=it->second->msgUpdateInfos.end();++it2)
|
||||||
|
{
|
||||||
|
RsGroupNetworkStatsRecord& gnsr = mGroupNetworkStats[it2->first] ;
|
||||||
|
|
||||||
|
gnsr.suppliers.insert(it->first) ;
|
||||||
|
gnsr.max_visible_count = std::max(it2->second.message_count,gnsr.max_visible_count) ;
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1130,20 +1141,23 @@ bool RsGxsNetService::locked_processTransac(RsNxsTransac* item)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RsGxsNetService::run(){
|
void RsGxsNetService::run()
|
||||||
|
{
|
||||||
double timeDelta = 0.5;
|
double timeDelta = 0.5;
|
||||||
int updateCounter = 0;
|
int updateCounter = 0;
|
||||||
|
|
||||||
while(isRunning()){
|
while(isRunning())
|
||||||
|
{
|
||||||
//Start waiting as nothing to do in runup
|
//Start waiting as nothing to do in runup
|
||||||
usleep((int) (timeDelta * 1000 * 1000)); // timeDelta sec
|
usleep((int) (timeDelta * 1000 * 1000)); // timeDelta sec
|
||||||
|
|
||||||
if(updateCounter >= 20) {
|
if(updateCounter >= 20)
|
||||||
|
{
|
||||||
updateServerSyncTS();
|
updateServerSyncTS();
|
||||||
updateCounter = 0;
|
updateCounter = 0;
|
||||||
} else {//if(updateCounter >= 20)
|
}
|
||||||
|
else
|
||||||
updateCounter++;
|
updateCounter++;
|
||||||
}//else (updateCounter >= 20)
|
|
||||||
|
|
||||||
// process active transactions
|
// process active transactions
|
||||||
processTransactions();
|
processTransactions();
|
||||||
|
@ -1156,7 +1170,7 @@ void RsGxsNetService::run(){
|
||||||
|
|
||||||
processExplicitGroupRequests();
|
processExplicitGroupRequests();
|
||||||
|
|
||||||
}//while(isRunning())
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RsGxsNetService::updateServerSyncTS()
|
void RsGxsNetService::updateServerSyncTS()
|
||||||
|
@ -1595,8 +1609,7 @@ void RsGxsNetService::locked_processCompletedIncomingTrans(NxsTransaction* tr)
|
||||||
}else
|
}else
|
||||||
{
|
{
|
||||||
item = new RsGxsGrpUpdateItem(mServType);
|
item = new RsGxsGrpUpdateItem(mServType);
|
||||||
mClientGrpUpdateMap.insert(
|
mClientGrpUpdateMap.insert(std::make_pair(peerFrom, item));
|
||||||
std::make_pair(peerFrom, item));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
item->grpUpdateTS = updateTS;
|
item->grpUpdateTS = updateTS;
|
||||||
|
@ -1713,7 +1726,7 @@ void RsGxsNetService::locked_doMsgUpdateWork(const RsNxsTransac *nxsTrans, const
|
||||||
#ifdef NXS_NET_DEBUG
|
#ifdef NXS_NET_DEBUG
|
||||||
std::cerr << " this is a full update. Updating time stamp." << std::endl;
|
std::cerr << " this is a full update. Updating time stamp." << std::endl;
|
||||||
#endif
|
#endif
|
||||||
mui->msgUpdateTS[grpId] = nxsTrans->updateTS;
|
mui->msgUpdateInfos[grpId].time_stamp = nxsTrans->updateTS;
|
||||||
IndicateConfigChanged();
|
IndicateConfigChanged();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1870,7 +1883,7 @@ void RsGxsNetService::locked_genReqMsgTransaction(NxsTransaction* tr)
|
||||||
// peer again, unless the peer has new info about it.
|
// peer again, unless the peer has new info about it.
|
||||||
// That needs of course to reset that time to 0 when we subscribe.
|
// That needs of course to reset that time to 0 when we subscribe.
|
||||||
|
|
||||||
locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL)) ;
|
locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL),msgItemL.size()) ;
|
||||||
return ;
|
return ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2047,11 +2060,11 @@ void RsGxsNetService::locked_genReqMsgTransaction(NxsTransaction* tr)
|
||||||
// The list to req is empty. That means we already have all messages that this peer can
|
// The list to req is empty. That means we already have all messages that this peer can
|
||||||
// provide. So we can stamp the group from this peer to be up to date.
|
// provide. So we can stamp the group from this peer to be up to date.
|
||||||
|
|
||||||
locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL)) ;
|
locked_stampPeerGroupUpdateTime(pid,grpId,time(NULL),msgItemL.size()) ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RsGxsNetService::locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm)
|
void RsGxsNetService::locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm,uint32_t n_messages)
|
||||||
{
|
{
|
||||||
RsGxsMsgUpdateItem *& pitem(mClientMsgUpdateMap[pid]) ;
|
RsGxsMsgUpdateItem *& pitem(mClientMsgUpdateMap[pid]) ;
|
||||||
|
|
||||||
|
@ -2061,8 +2074,10 @@ void RsGxsNetService::locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const
|
||||||
pitem->peerId = pid ;
|
pitem->peerId = pid ;
|
||||||
}
|
}
|
||||||
|
|
||||||
pitem->msgUpdateTS[grpId] = time(NULL) ;
|
pitem->msgUpdateInfos[grpId].time_stamp = time(NULL) ;
|
||||||
return ;
|
pitem->msgUpdateInfos[grpId].message_count = n_messages ;
|
||||||
|
|
||||||
|
IndicateConfigChanged();
|
||||||
}
|
}
|
||||||
|
|
||||||
void RsGxsNetService::locked_pushGrpTransactionFromList(
|
void RsGxsNetService::locked_pushGrpTransactionFromList(
|
||||||
|
|
|
@ -433,7 +433,7 @@ private:
|
||||||
* stamp the group info from that particular peer at the given time.
|
* stamp the group info from that particular peer at the given time.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm) ;
|
void locked_stampPeerGroupUpdateTime(const RsPeerId& pid,const RsGxsGroupId& grpId,time_t tm,uint32_t n_messages) ;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
|
|
@ -26,16 +26,11 @@
|
||||||
#include "rsgxsupdateitems.h"
|
#include "rsgxsupdateitems.h"
|
||||||
#include "rsbaseserial.h"
|
#include "rsbaseserial.h"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void RsGxsGrpUpdateItem::clear()
|
void RsGxsGrpUpdateItem::clear()
|
||||||
{
|
{
|
||||||
grpUpdateTS = 0;
|
grpUpdateTS = 0;
|
||||||
peerId.clear();
|
peerId.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::ostream& RsGxsGrpUpdateItem::print(std::ostream& out, uint16_t indent)
|
std::ostream& RsGxsGrpUpdateItem::print(std::ostream& out, uint16_t indent)
|
||||||
{
|
{
|
||||||
printRsItemBase(out, "RsGxsGrpUpdateItem", indent);
|
printRsItemBase(out, "RsGxsGrpUpdateItem", indent);
|
||||||
|
@ -51,7 +46,7 @@ std::ostream& RsGxsGrpUpdateItem::print(std::ostream& out, uint16_t indent)
|
||||||
|
|
||||||
void RsGxsMsgUpdateItem::clear()
|
void RsGxsMsgUpdateItem::clear()
|
||||||
{
|
{
|
||||||
msgUpdateTS.clear();
|
msgUpdateInfos.clear();
|
||||||
peerId.clear();
|
peerId.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,14 +60,16 @@ std::ostream& RsGxsMsgUpdateItem::print(std::ostream& out, uint16_t indent)
|
||||||
out << "peerId: " << peerId << std::endl;
|
out << "peerId: " << peerId << std::endl;
|
||||||
printIndent(out, int_Indent);
|
printIndent(out, int_Indent);
|
||||||
|
|
||||||
std::map<RsGxsGroupId, uint32_t>::const_iterator cit = msgUpdateTS.begin();
|
std::map<RsGxsGroupId, MsgUpdateInfo>::const_iterator cit = msgUpdateInfos.begin();
|
||||||
out << "msgUpdateTS map:" << std::endl;
|
out << "msgUpdateTS map:" << std::endl;
|
||||||
int_Indent += 2;
|
int_Indent += 2;
|
||||||
for(; cit != msgUpdateTS.end(); ++cit)
|
for(; cit != msgUpdateInfos.end(); ++cit)
|
||||||
{
|
{
|
||||||
out << "grpId: " << cit->first << std::endl;
|
out << "grpId: " << cit->first << std::endl;
|
||||||
printIndent(out, int_Indent);
|
printIndent(out, int_Indent);
|
||||||
out << "Msg time stamp: " << cit->second << std::endl;
|
out << "Msg time stamp: " << cit->second.time_stamp << std::endl;
|
||||||
|
printIndent(out, int_Indent);
|
||||||
|
out << "posts available: " << cit->second.message_count << std::endl;
|
||||||
printIndent(out, int_Indent);
|
printIndent(out, int_Indent);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,7 +210,7 @@ uint32_t RsGxsUpdateSerialiser::sizeGxsGrpUpdate(RsGxsGrpUpdateItem* item)
|
||||||
{
|
{
|
||||||
uint32_t s = 8; // header size
|
uint32_t s = 8; // header size
|
||||||
s += item->peerId.serial_size();
|
s += item->peerId.serial_size();
|
||||||
s += 4;
|
s += 4; // mUpdateTS
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -319,9 +316,7 @@ bool RsGxsUpdateSerialiser::serialiseGxsServerGrpUpdate(RsGxsServerGrpUpdateItem
|
||||||
|
|
||||||
return ok;
|
return ok;
|
||||||
}
|
}
|
||||||
|
RsGxsGrpUpdateItem* RsGxsUpdateSerialiser::deserialGxsGrpUpddate(void* data, uint32_t* size)
|
||||||
RsGxsGrpUpdateItem* RsGxsUpdateSerialiser::deserialGxsGrpUpddate(void* data,
|
|
||||||
uint32_t* size)
|
|
||||||
{
|
{
|
||||||
#ifdef RSSERIAL_DEBUG
|
#ifdef RSSERIAL_DEBUG
|
||||||
std::cerr << "RsGxsUpdateSerialiser::deserialGxsServerGrpUpdate()" << std::endl;
|
std::cerr << "RsGxsUpdateSerialiser::deserialGxsServerGrpUpdate()" << std::endl;
|
||||||
|
@ -456,15 +451,7 @@ uint32_t RsGxsUpdateSerialiser::sizeGxsMsgUpdate(RsGxsMsgUpdateItem* item)
|
||||||
uint32_t s = 8; // header size
|
uint32_t s = 8; // header size
|
||||||
s += item->peerId.serial_size() ;//GetTlvStringSize(item->peerId);
|
s += item->peerId.serial_size() ;//GetTlvStringSize(item->peerId);
|
||||||
|
|
||||||
const std::map<RsGxsGroupId, uint32_t>& msgUpdateTS = item->msgUpdateTS;
|
s += item->msgUpdateInfos.size() * (4 + 4 + RsGxsGroupId::serial_size());
|
||||||
std::map<RsGxsGroupId, uint32_t>::const_iterator cit = msgUpdateTS.begin();
|
|
||||||
|
|
||||||
for(; cit != msgUpdateTS.end(); ++cit)
|
|
||||||
{
|
|
||||||
s += cit->first.serial_size();
|
|
||||||
s += 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
s += 4; // number of map items
|
s += 4; // number of map items
|
||||||
|
|
||||||
return s;
|
return s;
|
||||||
|
@ -510,16 +497,16 @@ bool RsGxsUpdateSerialiser::serialiseGxsMsgUpdate(RsGxsMsgUpdateItem* item,
|
||||||
|
|
||||||
ok &= item->peerId.serialise(data, *size, offset) ;
|
ok &= item->peerId.serialise(data, *size, offset) ;
|
||||||
|
|
||||||
const std::map<RsGxsGroupId, uint32_t>& msgUpdateTS = item->msgUpdateTS;
|
std::map<RsGxsGroupId, RsGxsMsgUpdateItem::MsgUpdateInfo>::const_iterator cit(item->msgUpdateInfos.begin());
|
||||||
std::map<RsGxsGroupId, uint32_t>::const_iterator cit = msgUpdateTS.begin();
|
|
||||||
|
|
||||||
uint32_t numItems = msgUpdateTS.size();
|
uint32_t numItems = item->msgUpdateInfos.size();
|
||||||
ok &= setRawUInt32(data, *size, &offset, numItems);
|
ok &= setRawUInt32(data, *size, &offset, numItems);
|
||||||
|
|
||||||
for(; cit != msgUpdateTS.end(); ++cit)
|
for(; cit != item->msgUpdateInfos.end(); ++cit)
|
||||||
{
|
{
|
||||||
ok &= cit->first.serialise(data, *size, offset);
|
ok &= cit->first.serialise(data, *size, offset);
|
||||||
ok &= setRawUInt32(data, *size, &offset, cit->second);
|
ok &= setRawUInt32(data, *size, &offset, cit->second.time_stamp);
|
||||||
|
ok &= setRawUInt32(data, *size, &offset, cit->second.message_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(offset != tlvsize){
|
if(offset != tlvsize){
|
||||||
|
@ -632,9 +619,11 @@ RsGxsMsgUpdateItem* RsGxsUpdateSerialiser::deserialGxsMsgUpdate(void* data,
|
||||||
ok &= item->peerId.deserialise(data, *size, offset) ;
|
ok &= item->peerId.deserialise(data, *size, offset) ;
|
||||||
uint32_t numUpdateItems;
|
uint32_t numUpdateItems;
|
||||||
ok &= getRawUInt32(data, *size, &offset, &(numUpdateItems));
|
ok &= getRawUInt32(data, *size, &offset, &(numUpdateItems));
|
||||||
std::map<RsGxsGroupId, uint32_t>& msgUpdateItem = item->msgUpdateTS;
|
std::map<RsGxsGroupId, RsGxsMsgUpdateItem::MsgUpdateInfo>& msgUpdateInfos = item->msgUpdateInfos;
|
||||||
RsGxsGroupId pId;
|
RsGxsGroupId pId;
|
||||||
uint32_t updateTS;
|
|
||||||
|
RsGxsMsgUpdateItem::MsgUpdateInfo info ;
|
||||||
|
|
||||||
for(uint32_t i = 0; i < numUpdateItems; i++)
|
for(uint32_t i = 0; i < numUpdateItems; i++)
|
||||||
{
|
{
|
||||||
ok &= pId.deserialise(data, *size, offset);
|
ok &= pId.deserialise(data, *size, offset);
|
||||||
|
@ -642,12 +631,13 @@ RsGxsMsgUpdateItem* RsGxsUpdateSerialiser::deserialGxsMsgUpdate(void* data,
|
||||||
if(!ok)
|
if(!ok)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
ok &= getRawUInt32(data, *size, &offset, &(updateTS));
|
ok &= getRawUInt32(data, *size, &offset, &(info.time_stamp));
|
||||||
|
ok &= getRawUInt32(data, *size, &offset, &(info.message_count));
|
||||||
|
|
||||||
if(!ok)
|
if(!ok)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
msgUpdateItem.insert(std::make_pair(pId, updateTS));
|
msgUpdateInfos.insert(std::make_pair(pId, info));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (offset != rssize)
|
if (offset != rssize)
|
||||||
|
|
|
@ -41,7 +41,8 @@
|
||||||
|
|
||||||
|
|
||||||
const uint8_t RS_PKT_SUBTYPE_GXS_GRP_UPDATE = 0x0001;
|
const uint8_t RS_PKT_SUBTYPE_GXS_GRP_UPDATE = 0x0001;
|
||||||
const uint8_t RS_PKT_SUBTYPE_GXS_MSG_UPDATE = 0x0002;
|
const uint8_t RS_PKT_SUBTYPE_GXS_MSG_UPDATE_deprecated = 0x0002;
|
||||||
|
const uint8_t RS_PKT_SUBTYPE_GXS_MSG_UPDATE = 0x0003;
|
||||||
const uint8_t RS_PKT_SUBTYPE_GXS_SERVER_GRP_UPDATE = 0x0004;
|
const uint8_t RS_PKT_SUBTYPE_GXS_SERVER_GRP_UPDATE = 0x0004;
|
||||||
const uint8_t RS_PKT_SUBTYPE_GXS_SERVER_MSG_UPDATE = 0x0008;
|
const uint8_t RS_PKT_SUBTYPE_GXS_SERVER_MSG_UPDATE = 0x0008;
|
||||||
|
|
||||||
|
@ -82,8 +83,14 @@ public:
|
||||||
virtual void clear();
|
virtual void clear();
|
||||||
virtual std::ostream &print(std::ostream &out, uint16_t indent);
|
virtual std::ostream &print(std::ostream &out, uint16_t indent);
|
||||||
|
|
||||||
|
struct MsgUpdateInfo
|
||||||
|
{
|
||||||
|
uint32_t time_stamp ;
|
||||||
|
uint32_t message_count ;
|
||||||
|
};
|
||||||
|
|
||||||
RsPeerId peerId;
|
RsPeerId peerId;
|
||||||
std::map<RsGxsGroupId, uint32_t> msgUpdateTS;
|
std::map<RsGxsGroupId, MsgUpdateInfo> msgUpdateInfos;
|
||||||
};
|
};
|
||||||
|
|
||||||
class RsGxsServerMsgUpdateItem : public RsItem
|
class RsGxsServerMsgUpdateItem : public RsItem
|
||||||
|
|
|
@ -802,7 +802,7 @@ bool p3IdService::createGroup(uint32_t& token, RsGxsIdGroup &group)
|
||||||
|
|
||||||
bool p3IdService::updateGroup(uint32_t& token, RsGxsIdGroup &group)
|
bool p3IdService::updateGroup(uint32_t& token, RsGxsIdGroup &group)
|
||||||
{
|
{
|
||||||
RsGxsId id = RsGxsId(group.mMeta.mGroupId.toStdString());
|
RsGxsId id(group.mMeta.mGroupId);
|
||||||
RsGxsIdGroupItem* item = new RsGxsIdGroupItem();
|
RsGxsIdGroupItem* item = new RsGxsIdGroupItem();
|
||||||
|
|
||||||
item->fromGxsIdGroup(group,false) ;
|
item->fromGxsIdGroup(group,false) ;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue