Merge branch 'master' into gxs_mail_experiments

This commit is contained in:
Gioacchino Mazzurco 2017-03-13 22:57:33 +01:00
commit 2630ed4573
223 changed files with 123466 additions and 98194 deletions

View file

@ -545,6 +545,8 @@ bool AEAD_chacha20_sha256(uint8_t key[32], uint8_t nonce[12],uint8_t *data,uint3
HMAC_Update(&hmac_ctx,aad,aad_size) ;
HMAC_Update(&hmac_ctx,data,data_size) ;
HMAC_Final(&hmac_ctx,computed_tag,&md_size) ;
HMAC_CTX_cleanup(&hmac_ctx) ;
#else
HMAC_CTX *hmac_ctx = HMAC_CTX_new();
@ -576,6 +578,8 @@ bool AEAD_chacha20_sha256(uint8_t key[32], uint8_t nonce[12],uint8_t *data,uint3
HMAC_Update(&hmac_ctx,aad,aad_size) ;
HMAC_Update(&hmac_ctx,data,data_size) ;
HMAC_Final(&hmac_ctx,computed_tag,&md_size) ;
HMAC_CTX_cleanup(&hmac_ctx) ;
#else
HMAC_CTX *hmac_ctx = HMAC_CTX_new();
@ -1227,6 +1231,8 @@ bool perform_tests()
uint32_t SIZE = 1*1024*1024 ;
uint8_t *ten_megabyte_data = (uint8_t*)malloc(SIZE) ;
memset(ten_megabyte_data,0x37,SIZE) ; // put something. We dont really care here.
uint8_t key[32] = { 0x1c,0x92,0x40,0xa5,0xeb,0x55,0xd3,0x8a,0xf3,0x33,0x88,0x86,0x04,0xf6,0xb5,0xf0,
0x47,0x39,0x17,0xc1,0x40,0x2b,0x80,0x09,0x9d,0xca,0x5c,0xbc,0x20,0x70,0x75,0xc0 };

View file

@ -306,6 +306,12 @@ void DirectoryStorage::checkSave()
/* Local Directory Storage */
/******************************************************************************************************************/
LocalDirectoryStorage::LocalDirectoryStorage(const std::string& fname,const RsPeerId& own_id)
: DirectoryStorage(own_id,fname)
{
mTSChanged = false ;
}
RsFileHash LocalDirectoryStorage::makeEncryptedHash(const RsFileHash& hash)
{
return RsDirUtil::sha1sum(hash.toByteArray(),hash.SIZE_IN_BYTES);

View file

@ -210,7 +210,7 @@ private:
class LocalDirectoryStorage: public DirectoryStorage
{
public:
LocalDirectoryStorage(const std::string& fname,const RsPeerId& own_id) : DirectoryStorage(own_id,fname) {}
LocalDirectoryStorage(const std::string& fname,const RsPeerId& own_id);
virtual ~LocalDirectoryStorage() {}
/*!

View file

@ -48,6 +48,8 @@ LocalDirectoryUpdater::LocalDirectoryUpdater(HashStorage *hc,LocalDirectoryStora
// Can be left to false, but setting it to true will force to re-hash any file that has been left unhashed in the last session.
mNeedsFullRecheck = true ;
mIsChecking = false ;
mForceUpdate = false ;
}
bool LocalDirectoryUpdater::isEnabled() const
@ -70,9 +72,9 @@ void LocalDirectoryUpdater::setEnabled(bool b)
void LocalDirectoryUpdater::data_tick()
{
time_t now = time(NULL) ;
if (mIsEnabled)
{
if (mIsEnabled || mForceUpdate)
{
if(now > mDelayBetweenDirectoryUpdates + mLastSweepTime)
{
if(sweepSharedDirectories())
@ -80,6 +82,7 @@ void LocalDirectoryUpdater::data_tick()
mNeedsFullRecheck = false;
mLastSweepTime = now ;
mSharedDirectories->notifyTSChanged();
mForceUpdate = false ;
}
else
std::cerr << "(WW) sweepSharedDirectories() failed. Will do it again in a short time." << std::endl;
@ -97,7 +100,8 @@ void LocalDirectoryUpdater::data_tick()
void LocalDirectoryUpdater::forceUpdate()
{
mLastSweepTime = 0;
mForceUpdate = true ;
mLastSweepTime = 0 ;
}
bool LocalDirectoryUpdater::sweepSharedDirectories()
@ -108,6 +112,8 @@ bool LocalDirectoryUpdater::sweepSharedDirectories()
return false;
}
mIsChecking = true ;
RsServer::notify()->notifyListPreChange(NOTIFY_LIST_DIRLIST_LOCAL, 0);
#ifdef DEBUG_LOCAL_DIR_UPDATER
std::cerr << "[directory storage] LocalDirectoryUpdater::sweep()" << std::endl;
@ -149,6 +155,8 @@ bool LocalDirectoryUpdater::sweepSharedDirectories()
}
RsServer::notify()->notifyListChange(NOTIFY_LIST_DIRLIST_LOCAL, 0);
mIsChecking = false ;
return true ;
}

View file

@ -75,5 +75,7 @@ private:
bool mIsEnabled ;
bool mFollowSymLinks;
bool mNeedsFullRecheck ;
bool mIsChecking ;
bool mForceUpdate ;
};

View file

@ -179,7 +179,7 @@ int p3FileDatabase::tick()
#endif
last_print_time = now ;
#warning mr-alice 2016-08-19: This should be removed, but it's necessary atm for updating the GUI
#warning mr-alice 2016-08-19: "This should be removed, but it's necessary atm for updating the GUI"
RsServer::notify()->notifyListChange(NOTIFY_LIST_DIRLIST_LOCAL, 0);
}
@ -1189,8 +1189,13 @@ void p3FileDatabase::tickRecv()
{
case RS_PKT_SUBTYPE_FILELISTS_SYNC_REQ_ITEM: handleDirSyncRequest( dynamic_cast<RsFileListsSyncRequestItem*>(item) ) ;
break ;
case RS_PKT_SUBTYPE_FILELISTS_SYNC_RSP_ITEM: handleDirSyncResponse( dynamic_cast<RsFileListsSyncResponseItem*>(item) ) ;
break ;
case RS_PKT_SUBTYPE_FILELISTS_SYNC_RSP_ITEM:
{
RsFileListsSyncResponseItem *sitem = dynamic_cast<RsFileListsSyncResponseItem*>(item);
handleDirSyncResponse(sitem) ;
item = sitem ;
}
break ;
default:
P3FILELISTS_ERROR() << "(EE) unhandled packet subtype " << item->PacketSubType() << " in " << __PRETTY_FUNCTION__ << std::endl;
}
@ -1322,6 +1327,7 @@ void p3FileDatabase::splitAndSendItem(RsFileListsSyncResponseItem *ritem)
}
// This function should not take memory ownership of ritem, so it makes copies.
// The item that is returned is either created (if different from ritem) or equal to ritem.
RsFileListsSyncResponseItem *p3FileDatabase::recvAndRebuildItem(RsFileListsSyncResponseItem *ritem)
{
@ -1393,13 +1399,25 @@ RsFileListsSyncResponseItem *p3FileDatabase::recvAndRebuildItem(RsFileListsSyncR
return NULL ;
}
void p3FileDatabase::handleDirSyncResponse(RsFileListsSyncResponseItem *sitem)
// We employ a trick in this function:
// - if recvAndRebuildItem(item) returns the same item, it has not created memory, so the incoming item should be the one to
// delete, which is done by the caller in every case.
// - if it returns a different item, it means that the item has been created below when collapsing items, so we should delete both.
// to do so, we first delete the incoming item, and replace the pointer by the new created one.
void p3FileDatabase::handleDirSyncResponse(RsFileListsSyncResponseItem*& sitem)
{
RsFileListsSyncResponseItem *item = recvAndRebuildItem(sitem) ;
if(!item)
return ;
if(item != sitem)
{
delete sitem ;
sitem = item ;
}
time_t now = time(NULL);
// check the hash. If anything goes wrong (in the chunking for instance) the hash will not match

View file

@ -205,7 +205,7 @@ class p3FileDatabase: public p3Service, public p3Config, public ftSearch //, pub
uint32_t locked_getFriendIndex(const RsPeerId& pid);
void handleDirSyncRequest (RsFileListsSyncRequestItem *) ;
void handleDirSyncResponse (RsFileListsSyncResponseItem *) ;
void handleDirSyncResponse (RsFileListsSyncResponseItem *&) ;
std::map<RsPeerId,uint32_t> mFriendIndexMap ;
std::vector<RsPeerId> mFriendIndexTab;

View file

@ -30,6 +30,7 @@
#include "rsgenexchange.h"
#include "gxssecurity.h"
#include "util/contentvalue.h"
#include "util/rsprint.h"
#include "retroshare/rsgxsflags.h"
#include "retroshare/rsgxscircles.h"
#include "retroshare/rsgrouter.h"
@ -1318,11 +1319,9 @@ bool RsGenExchange::getGroupData(const uint32_t &token, std::vector<RsGxsGrpItem
delete item;
}
}
else
{
std::cerr << "RsGenExchange::getGroupData() ERROR deserialising item";
std::cerr << std::endl;
}
else if(data.bin_len > 0)
std::cerr << "(EE) RsGenExchange::getGroupData() Item type is probably not handled. Data is: " << RsUtil::BinToHex((unsigned char*)data.bin_data,std::min(50u,data.bin_len)) << ((data.bin_len>50)?"...":"") << std::endl;
delete *lit;
}
}
@ -1666,14 +1665,14 @@ void RsGenExchange::updateGroup(uint32_t& token, RsGxsGrpItem* grpItem)
#endif
}
void RsGenExchange::deleteGroup(uint32_t& token, RsGxsGrpItem* grpItem)
void RsGenExchange::deleteGroup(uint32_t& token, const RsGxsGroupId& grpId)
{
RS_STACK_MUTEX(mGenMtx) ;
RS_STACK_MUTEX(mGenMtx) ;
token = mDataAccess->generatePublicToken();
mGroupDeletePublish.push_back(GroupDeletePublish(grpItem, token));
mGroupDeletePublish.push_back(GroupDeletePublish(grpId, token));
#ifdef GEN_EXCH_DEBUG
std::cerr << "RsGenExchange::deleteGroup() token: " << token;
std::cerr << "RsGenExchange::deleteGroup() token: " << token;
std::cerr << std::endl;
#endif
}
@ -2317,14 +2316,10 @@ void RsGenExchange::processGroupDelete()
std::vector<GroupDeletePublish>::iterator vit = mGroupDeletePublish.begin();
for(; vit != mGroupDeletePublish.end(); ++vit)
{
GroupDeletePublish& gdp = *vit;
uint32_t token = gdp.mToken;
const RsGxsGroupId& groupId = gdp.grpItem->meta.mGroupId;
std::vector<RsGxsGroupId> gprIds;
gprIds.push_back(groupId);
gprIds.push_back(vit->mGroupId);
mDataStore->removeGroups(gprIds);
toNotify.insert(std::make_pair(
token, GrpNote(true, groupId)));
toNotify.insert(std::make_pair( vit->mToken, GrpNote(true, vit->mGroupId)));
}

View file

@ -519,7 +519,7 @@ protected:
* @param token
* @param grpItem
*/
void deleteGroup(uint32_t& token, RsGxsGrpItem* grpItem);
void deleteGroup(uint32_t& token, const RsGxsGroupId &grpId);
public:
/*!

View file

@ -37,7 +37,7 @@ typedef std::map<RsGxsGrpMsgIdPair, std::vector<RsGxsMsgMetaData*> > MsgRelatedM
// Default values that are used throughout GXS code
static const uint32_t RS_GXS_DEFAULT_MSG_STORE_PERIOD = 86400 * 365 ; // 1 year. Default time for which messages are keps in the database.
static const uint32_t RS_GXS_DEFAULT_MSG_STORE_PERIOD = 86400 * 372 ; // 1 year. Default time for which messages are keps in the database.
static const uint32_t RS_GXS_DEFAULT_MSG_SEND_PERIOD = 86400 * 30 * 1 ; // one month. Default delay after which we don't send messages
static const uint32_t RS_GXS_DEFAULT_MSG_REQ_PERIOD = 86400 * 30 * 1 ; // one month. Default Delay after which we don't request messages

View file

@ -50,7 +50,7 @@ bool RsGxsDataAccess::requestGroupInfo(uint32_t &token, uint32_t ansType, const
{
if(groupIds.empty())
{
std::cerr << "Group Id list is empty" << std::endl;
std::cerr << "(WW) Group Id list is empty" << std::endl;
return false;
}

View file

@ -1451,7 +1451,9 @@ bool RsGxsNetService::saveList(bool& cleanup, std::list<RsItem*>& save)
{
RS_STACK_MUTEX(mNxsMutex) ;
#ifdef NXS_NET_DEBUG_0
std::cerr << "RsGxsNetService::saveList()..." << std::endl;
#endif
// hardcore templates
std::transform(mClientGrpUpdateMap.begin(), mClientGrpUpdateMap.end(), std::back_inserter(save), get_second<ClientGrpMap,RsGxsGrpUpdateItem>(mServType,&RsGxsGrpUpdateItem::peerID));
@ -3818,10 +3820,7 @@ void RsGxsNetService::handleRecvSyncGroup(RsNxsSyncGrpReqItem *item)
uint32_t status = RS_NXS_ITEM_ENCRYPTION_STATUS_UNKNOWN ;
if(encryptSingleNxsItem(gItem, grpMeta->mCircleId,mit->first, encrypted_item,status))
{
itemL.push_back(encrypted_item) ;
delete gItem ;
}
else
{
switch(status)
@ -3836,6 +3835,7 @@ void RsGxsNetService::handleRecvSyncGroup(RsNxsSyncGrpReqItem *item)
std::cerr << " Could not encrypt item for grpId " << grpMeta->mGroupId << " for circle " << grpMeta->mCircleId << ". Not sending it." << std::endl;
}
}
delete gItem ;
}
else
itemL.push_back(gItem);

View file

@ -195,7 +195,9 @@ bool RsGxsIntegrityCheck::check()
if(stats.mSuppliers == 0 && stats.mMaxVisibleCount == 0 && stats.mGrpAutoSync)
{
#ifdef DEBUG_GXSUTIL
GXSUTIL_DEBUG() << "Scheduling group \"" << grp->metaData->mGroupName << "\" ID=" << grp->grpId << " in service " << std::hex << mGenExchangeClient->serviceType() << std::dec << " for deletion because it has no suppliers not any visible data at friends." << std::endl;
#endif
grpsToDel.push_back(grp->grpId);
}
}

View file

@ -222,9 +222,9 @@ public:
class GroupDeletePublish
{
public:
GroupDeletePublish(RsGxsGrpItem* item, uint32_t token)
: grpItem(item), mToken(token) {}
RsGxsGrpItem* grpItem;
GroupDeletePublish(const RsGxsGroupId& grpId, uint32_t token)
: mGroupId(grpId), mToken(token) {}
RsGxsGroupId mGroupId;
uint32_t mToken;
};

View file

@ -2259,7 +2259,10 @@ bool p3PeerMgrIMPL::loadList(std::list<RsItem *>& load)
}
else if (kit->key == kConfigKeyProxyServerPortTor)
{
proxyPortTor = atoi(kit->value.c_str());
uint16_t p = atoi(kit->value.c_str());
if(p >= 1024)
proxyPortTor = p;
#ifdef PEER_DEBUG
std::cerr << "Loaded proxyPort for Tor: " << proxyPortTor;
std::cerr << std::endl ;
@ -2276,7 +2279,10 @@ bool p3PeerMgrIMPL::loadList(std::list<RsItem *>& load)
}
else if (kit->key == kConfigKeyProxyServerPortI2P)
{
proxyPortI2P = atoi(kit->value.c_str());
uint16_t p = atoi(kit->value.c_str());
if(p >= 1024)
proxyPortI2P = p;
#ifdef PEER_DEBUG
std::cerr << "Loaded proxyPort for I2P: " << proxyPortI2P;
std::cerr << std::endl ;
@ -2784,7 +2790,9 @@ bool p3PeerMgrIMPL::removeBannedIps()
{
RsStackMutex stack(mPeerMtx); /****** STACK LOCK MUTEX *******/
#ifdef PEER_DEBUG
std::cerr << "Cleaning known IPs for all peers." << std::endl;
#endif
bool changed = false ;
for( std::map<RsPeerId, peerState>::iterator it = mFriendList.begin(); it != mFriendList.end(); ++it)

View file

@ -78,7 +78,7 @@ void *pqiQoSstreamer::locked_pop_out_data(uint32_t max_slice_size, uint32_t& siz
if(out != NULL)
{
_total_item_size -= getRsItemSize(out) ;
_total_item_size -= size ;
if(ends)
--_total_item_count ;

View file

@ -1,6 +1,6 @@
#define RS_MAJOR_VERSION 0
#define RS_MINOR_VERSION 6
#define RS_BUILD_NUMBER 1
#define RS_BUILD_NUMBER 2
#define RS_BUILD_NUMBER_ADD "x" // <-- do we need this?
// The revision number should be the 4 first bytes of the git revision hash, which is obtained using:
// git log --pretty="%H" | head -1 | cut -c1-8

View file

@ -1,6 +1,6 @@
#define RS_MAJOR_VERSION 0
#define RS_MINOR_VERSION 6
#define RS_BUILD_NUMBER 1
#define RS_BUILD_NUMBER 2
#define RS_BUILD_NUMBER_ADD ""
// The revision number should be the 4 first bytes of the git revision hash, which is obtained using:

View file

@ -901,6 +901,12 @@ bool p3Peers::setLocalAddress(const RsPeerId &id, const std::string &addr_str,
std::cerr << "p3Peers::setLocalAddress() " << id << std::endl;
#endif
if(port < 1024)
{
std::cerr << "(EE) attempt to use a port that is reserved to the system: " << port << std::endl;
return false ;
}
struct sockaddr_storage addr;
struct sockaddr_in *addrv4p = (struct sockaddr_in *) &addr;
addrv4p->sin_family = AF_INET;
@ -926,6 +932,12 @@ bool p3Peers::setExtAddress(const RsPeerId &id, const std::string &addr_str, ui
#ifdef P3PEERS_DEBUG
std::cerr << "p3Peers::setExtAddress() " << id << std::endl;
#endif
if(port < 1024)
{
std::cerr << "(EE) attempt to use a port that is reserved to the system: " << port << std::endl;
return false ;
}
// NOTE THIS IS IPV4 FOR NOW.
struct sockaddr_storage addr;
@ -1020,6 +1032,11 @@ bool p3Peers::setProxyServer(const uint32_t type, const std::string &addr_str, c
std::cerr << "p3Peers::setProxyServer() " << std::endl;
#endif
if(port < 1024)
{
std::cerr << "(EE) attempt to set proxy server address to something not allowed: " << addr_str << ":" << port << std::endl;
return false ;
}
struct sockaddr_storage addr;
struct sockaddr_in *addrv4p = (struct sockaddr_in *) &addr;
addrv4p->sin_family = AF_INET;

View file

@ -220,7 +220,13 @@ void p3GxsChannels::notifyChanges(std::vector<RsGxsNotify *> &changes)
std::list<RsGxsGroupId>::iterator git;
for (git = grpList.begin(); git != grpList.end(); ++git)
{
notify->AddFeedItem(RS_FEED_ITEM_CHANNEL_NEW, git->toStdString());
if(mKnownChannels.find(*git) == mKnownChannels.end())
{
notify->AddFeedItem(RS_FEED_ITEM_CHANNEL_NEW, git->toStdString());
mKnownChannels.insert(*git) ;
}
else
std::cerr << "(II) Not notifying already known channel " << *git << std::endl;
}
break;
}

View file

@ -218,6 +218,7 @@ bool generateGroup(uint32_t &token, std::string groupName);
RsGxsMessageId mGenThreadId;
p3GxsCommentService *mCommentService;
std::set<RsGxsGroupId> mKnownChannels;
};
#endif

View file

@ -142,9 +142,16 @@ void p3GxsForums::notifyChanges(std::vector<RsGxsNotify *> &changes)
/* group received */
std::list<RsGxsGroupId> &grpList = grpChange->mGrpIdList;
std::list<RsGxsGroupId>::iterator git;
for (git = grpList.begin(); git != grpList.end(); ++git)
{
notify->AddFeedItem(RS_FEED_ITEM_FORUM_NEW, git->toStdString());
if(mKnownForums.find(*git) == mKnownForums.end())
{
notify->AddFeedItem(RS_FEED_ITEM_FORUM_NEW, git->toStdString());
mKnownForums.insert(*git) ;
}
else
std::cerr << "(II) Not notifying already known forum " << *git << std::endl;
}
break;
}

View file

@ -117,6 +117,7 @@ bool generateGroup(uint32_t &token, std::string groupName);
int mGenCount;
std::vector<ForumDummyRef> mGenRefs;
RsGxsMessageId mGenThreadId;
std::set<RsGxsGroupId> mKnownForums ;
};

View file

@ -166,6 +166,7 @@ p3GxsReputation::p3GxsReputation(p3LinkMgr *lm)
mLastReputationConfigSaved = 0;
mChanged = false ;
mMaxPreventReloadBannedIds = 0 ; // default is "never"
mLastCleanUp = time(NULL) ;
}
const std::string GXS_REPUTATION_APP_NAME = "gxsreputation";
@ -1095,7 +1096,9 @@ bool p3GxsReputation::saveList(bool& cleanup, std::list<RsItem*> &savelist)
cleanup = true;
RsStackMutex stack(mReputationMtx); /****** LOCKED MUTEX *******/
#ifdef DEBUG_REPUTATION
std::cerr << "p3GxsReputation::saveList()" << std::endl;
#endif
/* save */
std::map<RsPeerId, ReputationConfig>::iterator it;

View file

@ -407,11 +407,15 @@ public:
bool is_signed_id = (bool)(entry.details.mFlags & RS_IDENTITY_FLAGS_PGP_LINKED) ;
bool is_a_contact = (bool)(entry.details.mFlags & RS_IDENTITY_FLAGS_IS_A_CONTACT) ;
#ifdef DEBUG_IDS
std::cerr << "Identity: " << gxs_id << ": banned: " << is_id_banned << ", own: " << is_own_id << ", contact: " << is_a_contact << ", signed: " << is_signed_id << ", known: " << is_known_id;
#endif
if(is_own_id || is_a_contact)
{
#ifdef DEBUG_IDS
std::cerr << " => kept" << std::endl;
#endif
return true ;
}
@ -439,15 +443,21 @@ public:
else
max_keep_time = MAX_KEEP_KEYS_DEFAULT ;
#ifdef DEBUG_IDS
std::cerr << ". Max keep = " << max_keep_time/86400 << " days. Unused for " << (now - last_usage_ts + 86399)/86400 << " days " ;
#endif
if(should_check && now > last_usage_ts + max_keep_time)
{
#ifdef DEBUG_IDS
std::cerr << " => delete " << std::endl;
#endif
ids_to_delete.push_back(gxs_id) ;
}
#ifdef DEBUG_IDS
else
std::cerr << " => keep " << std::endl;
#endif
return true;
}
@ -484,7 +494,9 @@ void p3IdService::cleanUnusedKeys()
for(std::list<RsGxsId>::const_iterator it(ids_to_delete.begin());it!=ids_to_delete.end();++it)
{
#ifdef DEBUG_IDS
std::cerr << "Deleting identity " << *it << " which is too old." << std::endl;
#endif
uint32_t token ;
RsGxsIdGroup group;
group.mMeta.mGroupId=RsGxsGroupId(*it);
@ -518,7 +530,9 @@ bool p3IdService::acceptNewGroup(const RsGxsGrpMetaData *grpMeta)
{
bool res = !rsReputations->isIdentityBanned(RsGxsId(grpMeta->mGroupId)) ;
#ifdef DEBUG_IDS
std::cerr << "p3IdService::acceptNewGroup: ID=" << grpMeta->mGroupId << ": " << (res?"ACCEPTED":"DENIED") << std::endl;
#endif
return res ;
}
@ -866,7 +880,9 @@ bool p3IdService::requestKey(const RsGxsId &id, const std::list<RsPeerId>& peers
// Normally we should call getIdDetails(), but since the key is not known, we need to digg a possibly old information
// from the reputation system, which keeps its own list of banned keys. Of course, the owner ID is not known at this point.
#ifdef DEBUG_IDS
std::cerr << "p3IdService::requesting key " << id <<std::endl;
#endif
RsReputations::ReputationInfo info ;
rsReputations->getReputationInfo(id,RsPgpId(),info) ;
@ -1625,17 +1641,14 @@ bool p3IdService::updateGroup(uint32_t& token, RsGxsIdGroup &group)
bool p3IdService::deleteGroup(uint32_t& token, RsGxsIdGroup &group)
{
RsGxsId id = RsGxsId(group.mMeta.mGroupId.toStdString());
RsGxsIdGroupItem* item = new RsGxsIdGroupItem();
item->fromGxsIdGroup(group,false) ;
RsGxsId id(group.mMeta.mGroupId);
#ifdef DEBUG_IDS
std::cerr << "p3IdService::deleteGroup() Deleting RsGxsId: " << id;
std::cerr << std::endl;
#endif
RsGenExchange::deleteGroup(token, item);
RsGenExchange::deleteGroup(token,group.mMeta.mGroupId);
// if its in the cache - clear it.
{

View file

@ -610,7 +610,6 @@ bool p3MsgService::loadList(std::list<RsItem*>& load)
// load items and calculate next unique msgId
for(it = load.begin(); it != load.end(); ++it)
{
if (NULL != (mitem = dynamic_cast<RsMsgItem *>(*it)))
{
/* STORE MsgID */
@ -621,9 +620,12 @@ bool p3MsgService::loadList(std::list<RsItem*>& load)
}
else if (NULL != (grm = dynamic_cast<RsMsgGRouterMap *>(*it)))
{
// merge.
for(std::map<GRouterMsgPropagationId,uint32_t>::const_iterator it(grm->ongoing_msgs.begin());it!=grm->ongoing_msgs.end();++it)
_ongoing_messages.insert(*it) ;
typedef std::map<GRouterMsgPropagationId,uint32_t> tT;
for( tT::const_iterator bit = grm->ongoing_msgs.begin();
bit != grm->ongoing_msgs.end(); ++bit )
_ongoing_messages.insert(*bit);
delete *it;
continue;
}
else if(NULL != (ghm = dynamic_cast<RsMsgDistantMessagesHashMap*>(*it)))
{
@ -631,15 +633,16 @@ bool p3MsgService::loadList(std::list<RsItem*>& load)
RS_STACK_MUTEX(recentlyReceivedMutex);
mRecentlyReceivedMessageHashes = ghm->hash_map;
}
#ifdef DEBUG_DISTANT_MSG
std::cerr << " loaded recently received message map: " << std::endl;
for(std::map<Sha1CheckSum,uint32_t>::const_iterator it(mRecentlyReceivedDistantMessageHashes.begin());it!=mRecentlyReceivedDistantMessageHashes.end();++it)
std::cerr << " " << it->first << " received " << time(NULL)-it->second << " secs ago." << std::endl;
#endif
}
else if(NULL != (mtt = dynamic_cast<RsMsgTagType *>(*it)))
delete *it ;
continue ;
}
else if(NULL != (mtt = dynamic_cast<RsMsgTagType *>(*it)))
{
// delete standard tags as they are now save in config
if(mTags.end() == (tagIt = mTags.find(mtt->tagId)))

View file

@ -2047,7 +2047,7 @@ void p3turtle::getInfo( std::vector<std::vector<std::string> >& hashes_info,
tunnel.push_back(it->second.hash.toStdString()) ;
tunnel.push_back(printNumber(now-it->second.time_stamp) + " secs ago") ;
tunnel.push_back(printFloatNumber(it->second.speed_Bps,true)) ;
tunnel.push_back(printFloatNumber(it->second.speed_Bps,false)) ; //
}
search_reqs_info.clear();

View file

@ -159,6 +159,7 @@ class RsTurtleGenericTunnelItem: public RsTurtleItem
{
public:
RsTurtleGenericTunnelItem(uint8_t sub_packet_id) : RsTurtleItem(sub_packet_id), direction(0), tunnel_id(0) { setPriorityLevel(QOS_PRIORITY_RS_TURTLE_GENERIC_ITEM);}
virtual ~RsTurtleGenericTunnelItem() {}
typedef uint32_t Direction ;
static const Direction DIRECTION_CLIENT = 0x001 ;

View file

@ -124,9 +124,9 @@ bool FolderIterator::updateFileInfo(bool& should_skip)
return true ;
}
}
else if( ent->d_type != DT_DIR && ent->d_type != DT_REG)
else if( ent->d_type != DT_DIR && ent->d_type != DT_REG && ent->d_type != DT_UNKNOWN) // DT_UNKNOWN is reported by mounted dirs, by services such as sshfs.
{
std::cerr << "(II) Skipping file of unknown type " << ent->d_type << ": " << mFullPath << std::endl;
std::cerr << "(II) Skipping file of unknown type " << std::dec << (int)ent->d_type << std::dec << ": " << mFullPath << std::endl;
should_skip = true ;
return true ;
}