mirror of
https://github.com/RetroShare/RetroShare.git
synced 2024-10-01 02:35:48 -04:00
turtle tunnel management
git-svn-id: http://svn.code.sf.net/p/retroshare/code/trunk@1289 b45a01b8-16f6-495d-af2f-9b41ad6348cc
This commit is contained in:
parent
2bd3899c05
commit
48218e98cb
@ -119,7 +119,32 @@ void ftController::addFileSource(const std::string& hash,const std::string& peer
|
|||||||
std::cerr << "... not added: hash not found." << std::endl ;
|
std::cerr << "... not added: hash not found." << std::endl ;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
void ftController::removeFileSource(const std::string& hash,const std::string& peer_id)
|
||||||
|
{
|
||||||
|
RsStackMutex stack(ctrlMutex); /******* LOCKED ********/
|
||||||
|
|
||||||
|
std::map<std::string, ftFileControl>::iterator it;
|
||||||
|
std::map<std::string, ftFileControl> currentDownloads = *(&mDownloads);
|
||||||
|
|
||||||
|
#ifdef CONTROL_DEBUG
|
||||||
|
std::cerr << "ftController: Adding source " << peer_id << " to current download hash=" << hash ;
|
||||||
|
#endif
|
||||||
|
for(it = currentDownloads.begin(); it != currentDownloads.end(); it++)
|
||||||
|
if(it->first == hash)
|
||||||
|
{
|
||||||
|
it->second.mTransfer->removeFileSource(peer_id);
|
||||||
|
|
||||||
|
// setPeerState(it->second.mTransfer, peer_id, rate, mConnMgr->isOnline(peer_id));
|
||||||
|
|
||||||
|
#ifdef CONTROL_DEBUG
|
||||||
|
std::cerr << "... added." << std::endl ;
|
||||||
|
#endif
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
#ifdef CONTROL_DEBUG
|
||||||
|
std::cerr << "... not added: hash not found." << std::endl ;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
void ftController::run()
|
void ftController::run()
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -162,6 +162,7 @@ virtual bool CancelCacheFile(RsPeerId id, std::string path, std::string hash, ui
|
|||||||
public:
|
public:
|
||||||
virtual void statusChange(const std::list<pqipeer> &plist);
|
virtual void statusChange(const std::list<pqipeer> &plist);
|
||||||
void addFileSource(const std::string& hash,const std::string& peer_id) ;
|
void addFileSource(const std::string& hash,const std::string& peer_id) ;
|
||||||
|
void removeFileSource(const std::string& hash,const std::string& peer_id) ;
|
||||||
|
|
||||||
|
|
||||||
/* p3Config Interface */
|
/* p3Config Interface */
|
||||||
|
@ -151,6 +151,27 @@ bool ftTransferModule::addFileSource(std::string peerId)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ftTransferModule::removeFileSource(std::string peerId)
|
||||||
|
{
|
||||||
|
RsStackMutex stack(tfMtx); /******* STACK LOCKED ******/
|
||||||
|
std::map<std::string,peerInfo>::iterator mit;
|
||||||
|
mit = mFileSources.find(peerId);
|
||||||
|
|
||||||
|
if (mit != mFileSources.end())
|
||||||
|
{
|
||||||
|
/* add in new source */
|
||||||
|
mFileSources.erase(mit) ;
|
||||||
|
#ifdef FT_DEBUG
|
||||||
|
std::cerr << "ftTransferModule::addFileSource(): removing peer: " << peerId << " from sourceList" << std::cerr << std::endl;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#ifdef FT_DEBUG
|
||||||
|
else
|
||||||
|
std::cerr << "ftTransferModule::addFileSource(): Should remove peer: " << peerId << ", but it's not in the source list. " << std::cerr << std::endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool ftTransferModule::setPeerState(std::string peerId,uint32_t state,uint32_t maxRate)
|
bool ftTransferModule::setPeerState(std::string peerId,uint32_t state,uint32_t maxRate)
|
||||||
{
|
{
|
||||||
|
@ -131,6 +131,7 @@ public:
|
|||||||
//interface to download controller
|
//interface to download controller
|
||||||
bool setFileSources(std::list<std::string> peerIds);
|
bool setFileSources(std::list<std::string> peerIds);
|
||||||
bool addFileSource(std::string peerId);
|
bool addFileSource(std::string peerId);
|
||||||
|
bool removeFileSource(std::string peerId);
|
||||||
bool setPeerState(std::string peerId,uint32_t state,uint32_t maxRate); //state = ONLINE/OFFLINE
|
bool setPeerState(std::string peerId,uint32_t state,uint32_t maxRate); //state = ONLINE/OFFLINE
|
||||||
bool getFileSources(std::list<std::string> &peerIds);
|
bool getFileSources(std::list<std::string> &peerIds);
|
||||||
bool getPeerState(std::string peerId,uint32_t &state,uint32_t &tfRate);
|
bool getPeerState(std::string peerId,uint32_t &state,uint32_t &tfRate);
|
||||||
|
@ -69,6 +69,9 @@ const uint32_t CONFIG_TYPE_FT_SHARED = 0x0007;
|
|||||||
const uint32_t CONFIG_TYPE_FT_EXTRA_LIST= 0x0008;
|
const uint32_t CONFIG_TYPE_FT_EXTRA_LIST= 0x0008;
|
||||||
const uint32_t CONFIG_TYPE_FT_CONTROL = 0x0009;
|
const uint32_t CONFIG_TYPE_FT_CONTROL = 0x0009;
|
||||||
|
|
||||||
|
/* turtle router */
|
||||||
|
const uint32_t CONFIG_TYPE_TURTLE = 0x0020;
|
||||||
|
|
||||||
/* wish these ids where higher...
|
/* wish these ids where higher...
|
||||||
* may move when switch to v0.5
|
* may move when switch to v0.5
|
||||||
*/
|
*/
|
||||||
|
@ -67,11 +67,18 @@ class RsTurtle
|
|||||||
//
|
//
|
||||||
virtual TurtleRequestId turtleSearch(const std::string& match_string) = 0 ;
|
virtual TurtleRequestId turtleSearch(const std::string& match_string) = 0 ;
|
||||||
|
|
||||||
// Launches a complete download file operation: diggs one or more
|
// Initiates tunnel handling for the given file hash. tunnels. Launches
|
||||||
// tunnels. Launches an exception if an error occurs during the
|
// an exception if an error occurs during the initialization process. The
|
||||||
// initialization process.
|
// turtle router itself does not initiate downloads, it only maintains
|
||||||
|
// tunnels for the given hash. The download should be driven by the file
|
||||||
|
// transfer module by calling ftServer::FileRequest().
|
||||||
//
|
//
|
||||||
virtual void turtleDownload(const std::string& name,const std::string& file_hash,uint64_t size) = 0 ;
|
virtual void monitorFileTunnels(const std::string& name,const std::string& file_hash,uint64_t size) = 0 ;
|
||||||
|
|
||||||
|
// Tells the turtle router to stop handling tunnels for the given file hash. Traditionally this should
|
||||||
|
// be called after calling ftServer::fileCancel().
|
||||||
|
//
|
||||||
|
virtual void stopMonitoringFileTunnels(const std::string& file_hash) = 0 ;
|
||||||
|
|
||||||
// Sets the file sharing strategy. It concerns all local files. It would
|
// Sets the file sharing strategy. It concerns all local files. It would
|
||||||
// be better to handle this for each file, of course.
|
// be better to handle this for each file, of course.
|
||||||
|
@ -813,6 +813,7 @@ int RsServer::StartupRetroShare()
|
|||||||
mConfigMgr->addConfiguration("ranklink.cfg", mRanking);
|
mConfigMgr->addConfiguration("ranklink.cfg", mRanking);
|
||||||
mConfigMgr->addConfiguration("forums.cfg", mForums);
|
mConfigMgr->addConfiguration("forums.cfg", mForums);
|
||||||
mConfigMgr->addConfiguration("channels.cfg", mChannels);
|
mConfigMgr->addConfiguration("channels.cfg", mChannels);
|
||||||
|
mConfigMgr->addConfiguration("turtle.cfg", tr);
|
||||||
|
|
||||||
#ifndef RS_RELEASE
|
#ifndef RS_RELEASE
|
||||||
#else
|
#else
|
||||||
|
@ -53,17 +53,20 @@
|
|||||||
|
|
||||||
#include "util/rsdebug.h"
|
#include "util/rsdebug.h"
|
||||||
#include "util/rsprint.h"
|
#include "util/rsprint.h"
|
||||||
|
|
||||||
// Operating System specific includes.
|
|
||||||
#include "pqi/pqinetwork.h"
|
#include "pqi/pqinetwork.h"
|
||||||
|
|
||||||
static const unsigned int TUNNEL_REQUESTS_LIFE_TIME = 120 ; // life time for requests in the cache.
|
// These number may be quite important. I setup them with sensible values, but
|
||||||
static const unsigned int SEARCH_REQUESTS_LIFE_TIME = 120 ;
|
// an in-depth test would be better to get an idea of what the ideal values
|
||||||
|
// could ever be.
|
||||||
/* TURTLE FLAGS */
|
//
|
||||||
|
static const time_t TUNNEL_REQUESTS_LIFE_TIME = 120 ; /// life time for tunnel requests in the cache.
|
||||||
|
static const time_t SEARCH_REQUESTS_LIFE_TIME = 120 ; /// life time for search requests in the cache
|
||||||
|
static const time_t REGULAR_TUNNEL_DIGGING_TIME = 300 ; /// maximum interval between two tunnel digging campaigns.
|
||||||
|
static const time_t MAXIMUM_TUNNEL_IDLE_TIME = 600 ; /// maximum life time of an unused tunnel.
|
||||||
|
static const time_t MAXIMUM_HASH_IDLE_TIME = 300 ; /// maximum life time of an unused file hash.
|
||||||
|
|
||||||
p3turtle::p3turtle(p3ConnectMgr *cm,ftServer *fs)
|
p3turtle::p3turtle(p3ConnectMgr *cm,ftServer *fs)
|
||||||
:p3Service(RS_SERVICE_TYPE_TURTLE), mConnMgr(cm)
|
:p3Service(RS_SERVICE_TYPE_TURTLE), mConnMgr(cm), p3Config(CONFIG_TYPE_TURTLE)
|
||||||
{
|
{
|
||||||
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
|
|
||||||
@ -79,19 +82,28 @@ p3turtle::p3turtle(p3ConnectMgr *cm,ftServer *fs)
|
|||||||
|
|
||||||
int p3turtle::tick()
|
int p3turtle::tick()
|
||||||
{
|
{
|
||||||
|
// Handle tunnel trafic
|
||||||
|
//
|
||||||
handleIncoming(); // handle incoming packets
|
handleIncoming(); // handle incoming packets
|
||||||
|
|
||||||
time_t now = time(NULL) ;
|
time_t now = time(NULL) ;
|
||||||
|
|
||||||
// Manage tunnels every 10 sec.
|
// Tunnel management:
|
||||||
|
// - we digg new tunnels at least every 5 min (300 sec).
|
||||||
|
// - we digg new tunnels each time a new peer connects
|
||||||
|
// - we digg new tunnels each time a new hash is asked for
|
||||||
//
|
//
|
||||||
if(now > 10+_last_tunnel_management_time)
|
if(now > REGULAR_TUNNEL_DIGGING_TIME+_last_tunnel_management_time || _force_digg_new_tunnels)
|
||||||
{
|
{
|
||||||
#ifdef P3TURTLE_DEBUG
|
#ifdef P3TURTLE_DEBUG
|
||||||
std::cerr << "Calling tunnel management." << std::endl ;
|
std::cerr << "Calling tunnel management." << std::endl ;
|
||||||
#endif
|
#endif
|
||||||
manageTunnels() ;
|
manageTunnels() ;
|
||||||
|
|
||||||
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
|
|
||||||
_last_tunnel_management_time = now ;
|
_last_tunnel_management_time = now ;
|
||||||
|
_force_digg_new_tunnels = false ;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean every 10 sec.
|
// Clean every 10 sec.
|
||||||
@ -101,12 +113,12 @@ int p3turtle::tick()
|
|||||||
#ifdef P3TURTLE_DEBUG
|
#ifdef P3TURTLE_DEBUG
|
||||||
std::cerr << "Calling autowash." << std::endl ;
|
std::cerr << "Calling autowash." << std::endl ;
|
||||||
#endif
|
#endif
|
||||||
autoWash() ; // clean old/unused tunnels and search requests.
|
autoWash() ; // clean old/unused tunnels and file hashes, as well as search and tunnel requests.
|
||||||
_last_clean_time = now ;
|
_last_clean_time = now ;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef P3TURTLE_DEBUG
|
#ifdef P3TURTLE_DEBUG
|
||||||
// Dump state every 20 sec.
|
// Dump state for debugging, every 20 sec.
|
||||||
//
|
//
|
||||||
static time_t last_dump = time(NULL) ;
|
static time_t last_dump = time(NULL) ;
|
||||||
|
|
||||||
@ -132,38 +144,27 @@ void p3turtle::statusChange(const std::list<pqipeer> &plist) // derived from pqi
|
|||||||
{
|
{
|
||||||
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
|
|
||||||
// Do we shutdown tunnels whne peers are down, or automatically find a new tunnel ?
|
// We actually do not shut down tunnels when peers get down: Tunnels that
|
||||||
// I'll see that later...
|
// are not working properly get automatically removed after some time.
|
||||||
|
|
||||||
// save the list of active peers. This is useful for notifying the ftContoller
|
// save the list of active peers. This is useful for notifying the ftContoller
|
||||||
_online_peers = plist ;
|
_online_peers = plist ;
|
||||||
|
|
||||||
std::cerr << "p3turtle: status change triggered. Saving list of " << plist.size() << " peers." << std::endl ;
|
std::cerr << "p3turtle: status change triggered. Saving list of " << plist.size() << " peers." << std::endl ;
|
||||||
#ifdef TO_DO
|
|
||||||
/* get a list of all online peers */
|
|
||||||
std::list<std::string> onlineIds;
|
|
||||||
mConnMgr->getOnlineList(onlineIds);
|
|
||||||
|
|
||||||
std::list<pqipeer>::const_iterator pit;
|
/* if any have switched to 'connected' then we force digging new tunnels */
|
||||||
/* if any have switched to 'connected' then we notify */
|
|
||||||
for(pit = plist.begin(); pit != plist.end(); pit++)
|
for(std::list<pqipeer>::const_iterator pit = plist.begin(); pit != plist.end(); pit++)
|
||||||
{
|
if ((pit->state & RS_PEER_S_FRIEND) && (pit->actions & RS_PEER_CONNECTED))
|
||||||
if ((pit->state & RS_PEER_S_FRIEND) &&
|
_force_digg_new_tunnels = true ;
|
||||||
(pit->actions & RS_PEER_CONNECTED))
|
|
||||||
{
|
|
||||||
/* send our details to them */
|
|
||||||
sendOwnDetails(pit->id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// adds a virtual peer to the list that is communicated ot ftController.
|
// adds a virtual peer to the list that is communicated ot ftController.
|
||||||
//
|
//
|
||||||
void p3turtle::addDistantPeer(const TurtleFileHash& hash,TurtleTunnelId tid)
|
void p3turtle::addDistantPeer(const TurtleFileHash&,TurtleTunnelId tid)
|
||||||
{
|
{
|
||||||
char buff[400] ;
|
char buff[400] ;
|
||||||
sprintf(buff,"%s_%8x",hash.c_str(),tid) ;
|
sprintf(buff,"Turtle tunnel %8x",tid) ;
|
||||||
|
|
||||||
{
|
{
|
||||||
_virtual_peers[TurtleVirtualPeerId(buff)] = tid ;
|
_virtual_peers[TurtleVirtualPeerId(buff)] = tid ;
|
||||||
@ -198,18 +199,17 @@ void p3turtle::getVirtualPeersList(std::list<pqipeer>& list)
|
|||||||
//
|
//
|
||||||
void p3turtle::manageTunnels()
|
void p3turtle::manageTunnels()
|
||||||
{
|
{
|
||||||
// Digg new tunnels for waiting file hashes
|
// Digg new tunnels for all file hashes
|
||||||
|
|
||||||
std::vector<std::string> hashes_to_digg ;
|
std::vector<TurtleFileHash> hashes_to_digg ;
|
||||||
|
|
||||||
{
|
{
|
||||||
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
|
|
||||||
for(std::map<TurtleFileHash,TurtleFileHashInfo>::const_iterator it(_incoming_file_hashes.begin());it!=_incoming_file_hashes.end();++it)
|
for(std::map<TurtleFileHash,TurtleFileHashInfo>::const_iterator it(_incoming_file_hashes.begin());it!=_incoming_file_hashes.end();++it)
|
||||||
if(it->second.tunnels.empty())
|
|
||||||
{
|
{
|
||||||
#ifdef P3TURTLE_DEBUG
|
#ifdef P3TURTLE_DEBUG
|
||||||
std::cerr << "Tunnel management: No tunnels for hash " << it->first << ", digging new ones." << std::endl ;
|
std::cerr << "Tunnel management: digging new tunnels for hash " << it->first << "." << std::endl ;
|
||||||
#endif
|
#endif
|
||||||
hashes_to_digg.push_back(it->first) ;
|
hashes_to_digg.push_back(it->first) ;
|
||||||
}
|
}
|
||||||
@ -221,8 +221,6 @@ void p3turtle::manageTunnels()
|
|||||||
|
|
||||||
void p3turtle::autoWash()
|
void p3turtle::autoWash()
|
||||||
{
|
{
|
||||||
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
|
||||||
|
|
||||||
#ifdef P3TURTLE_DEBUG
|
#ifdef P3TURTLE_DEBUG
|
||||||
std::cerr << " In autowash." << std::endl ;
|
std::cerr << " In autowash." << std::endl ;
|
||||||
#endif
|
#endif
|
||||||
@ -231,6 +229,11 @@ void p3turtle::autoWash()
|
|||||||
|
|
||||||
time_t now = time(NULL) ;
|
time_t now = time(NULL) ;
|
||||||
|
|
||||||
|
// Search requests
|
||||||
|
//
|
||||||
|
{
|
||||||
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
|
|
||||||
for(std::map<TurtleSearchRequestId,TurtleRequestInfo>::iterator it(_search_requests_origins.begin());it!=_search_requests_origins.end();++it)
|
for(std::map<TurtleSearchRequestId,TurtleRequestInfo>::iterator it(_search_requests_origins.begin());it!=_search_requests_origins.end();++it)
|
||||||
if(now > (time_t)(it->second.time_stamp + SEARCH_REQUESTS_LIFE_TIME))
|
if(now > (time_t)(it->second.time_stamp + SEARCH_REQUESTS_LIFE_TIME))
|
||||||
{
|
{
|
||||||
@ -239,6 +242,12 @@ void p3turtle::autoWash()
|
|||||||
#endif
|
#endif
|
||||||
_search_requests_origins.erase(it) ;
|
_search_requests_origins.erase(it) ;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tunnel requests
|
||||||
|
//
|
||||||
|
{
|
||||||
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
|
|
||||||
for(std::map<TurtleTunnelRequestId,TurtleRequestInfo>::iterator it(_tunnel_requests_origins.begin());it!=_tunnel_requests_origins.end();++it)
|
for(std::map<TurtleTunnelRequestId,TurtleRequestInfo>::iterator it(_tunnel_requests_origins.begin());it!=_tunnel_requests_origins.end();++it)
|
||||||
if(now > (time_t)(it->second.time_stamp + TUNNEL_REQUESTS_LIFE_TIME))
|
if(now > (time_t)(it->second.time_stamp + TUNNEL_REQUESTS_LIFE_TIME))
|
||||||
@ -250,6 +259,127 @@ void p3turtle::autoWash()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tunnels.
|
||||||
|
std::vector<TurtleTunnelId> tunnels_to_close ;
|
||||||
|
{
|
||||||
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
|
|
||||||
|
for(std::map<TurtleTunnelId,TurtleTunnel>::iterator it(_local_tunnels.begin());it!=_local_tunnels.end();++it)
|
||||||
|
if(now > (time_t)(it->second.time_stamp + MAXIMUM_TUNNEL_IDLE_TIME))
|
||||||
|
{
|
||||||
|
#ifdef P3TURTLE_DEBUG
|
||||||
|
std::cerr << " removing tunnel " << (void *)it->first << ": timeout." << std::endl ;
|
||||||
|
#endif
|
||||||
|
tunnels_to_close.push_back(it->first) ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for(uint i=0;i<tunnels_to_close.size();++i)
|
||||||
|
closeTunnel(tunnels_to_close[i]) ;
|
||||||
|
|
||||||
|
// File hashes can only be removed by calling the 'stopMonitoringFileTunnels()' command.
|
||||||
|
}
|
||||||
|
|
||||||
|
void p3turtle::closeTunnel(TurtleTunnelId tid)
|
||||||
|
{
|
||||||
|
// This is closing a given tunnel, removing it from file sources, and from the list of tunnels of its
|
||||||
|
// corresponding file hash. In the original turtle4privacy paradigm, they also send back and forward
|
||||||
|
// tunnel closing commands. I'm not sure this is necessary, because if a tunnel is closed somewhere, it's
|
||||||
|
// source is not going to be used and the tunnel will eventually disappear.
|
||||||
|
//
|
||||||
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
|
|
||||||
|
std::map<TurtleTunnelId,TurtleTunnel>::iterator it(_local_tunnels.find(tid)) ;
|
||||||
|
|
||||||
|
if(it == _local_tunnels.end())
|
||||||
|
{
|
||||||
|
std::cerr << "p3turtle: was asked to close tunnel 0x" << (void*)tid << ", which actually doesn't exist." << std::endl ;
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
std::cerr << "p3turtle: Closing tunnel 0x" << (void*)tid << std::endl ;
|
||||||
|
|
||||||
|
if(it->second.local_src == mConnMgr->getOwnId()) // this is a starting tunnel. We thus remove
|
||||||
|
// - the virtual peer from the vpid list
|
||||||
|
// - the tunnel id from the file hash
|
||||||
|
// - the virtual peer from the file sources in the file transfer controller.
|
||||||
|
{
|
||||||
|
TurtleTunnelId tid = it->first ;
|
||||||
|
TurtleVirtualPeerId vpid = it->second.vpid ;
|
||||||
|
TurtleFileHash hash = it->second.hash ;
|
||||||
|
|
||||||
|
#ifdef P3TURTLE_DEBUG
|
||||||
|
std::cerr << " Tunnel is a starting point. Also removing " ;
|
||||||
|
std::cerr << " Virtual Peer Id " << vpid << std::endl ;
|
||||||
|
std::cerr << " Associated file source." << std::endl ;
|
||||||
|
#endif
|
||||||
|
_ft_controller->removeFileSource(hash,vpid) ;
|
||||||
|
_virtual_peers.erase(_virtual_peers.find(vpid)) ;
|
||||||
|
|
||||||
|
std::vector<TurtleTunnelId>& tunnels(_incoming_file_hashes[hash].tunnels) ;
|
||||||
|
|
||||||
|
for(uint i=0;i<tunnels.size();++i)
|
||||||
|
if(tunnels[i] == tid)
|
||||||
|
{
|
||||||
|
tunnels[i] = tunnels.back() ;
|
||||||
|
tunnels.pop_back() ;
|
||||||
|
break ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if(it->second.local_dst == mConnMgr->getOwnId()) // This is a ending tunnel. We also remove the virtual peer id
|
||||||
|
{
|
||||||
|
#ifdef P3TURTLE_DEBUG
|
||||||
|
std::cerr << " Tunnel is a ending point. Also removing associated outgoing hash." ;
|
||||||
|
#endif
|
||||||
|
_outgoing_file_hashes.erase(_outgoing_file_hashes.find(it->second.hash)) ;
|
||||||
|
}
|
||||||
|
|
||||||
|
_local_tunnels.erase(it) ;
|
||||||
|
}
|
||||||
|
|
||||||
|
void p3turtle::stopMonitoringFileTunnels(const std::string& hash)
|
||||||
|
{
|
||||||
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
|
|
||||||
|
std::map<TurtleFileHash,TurtleFileHashInfo>::iterator it(_incoming_file_hashes.find(hash)) ;
|
||||||
|
|
||||||
|
if(it == _incoming_file_hashes.end())
|
||||||
|
{
|
||||||
|
std::cerr << "p3turtle: asked to stop monitoring file hash " << hash << ", but this hash is actually not handled by the turtle router." << std::endl ;
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy the list of tunnels to remove.
|
||||||
|
std::vector<TurtleTunnelId> tunnels_to_remove = it->second.tunnels ;
|
||||||
|
#ifdef P3TURTLE_DEBUG
|
||||||
|
std::cerr << "p3turtle: stopping monitoring for file hash " << hash << ", and closing " << tunnels_to_remove.size() << " tunnels." << std::endl ;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for(uint i=0;i<tunnels_to_remove.size();++i)
|
||||||
|
closeTunnel(tunnels_to_remove[i]) ;
|
||||||
|
|
||||||
|
_incoming_file_hashes.erase(it) ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// -----------------------------------------------------------------------------------//
|
||||||
|
// -------------------------------- Config functions ------------------------------ //
|
||||||
|
// -----------------------------------------------------------------------------------//
|
||||||
|
//
|
||||||
|
RsSerialiser *p3turtle::setupSerialiser()
|
||||||
|
{
|
||||||
|
RsSerialiser *rss = new RsSerialiser();
|
||||||
|
|
||||||
|
return rss ;
|
||||||
|
}
|
||||||
|
std::list<RsItem*> p3turtle::saveList(bool& cleanup)
|
||||||
|
{
|
||||||
|
cleanup = true ;
|
||||||
|
return std::list<RsItem*>() ;
|
||||||
|
}
|
||||||
|
bool p3turtle::loadList(std::list<RsItem*> load)
|
||||||
|
{
|
||||||
|
return true ;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------------//
|
// -----------------------------------------------------------------------------------//
|
||||||
// -------------------------------- Helper functions ------------------------------ //
|
// -------------------------------- Helper functions ------------------------------ //
|
||||||
// -----------------------------------------------------------------------------------//
|
// -----------------------------------------------------------------------------------//
|
||||||
@ -481,6 +611,7 @@ void p3turtle::handleSearchResult(RsTurtleSearchResultItem *item)
|
|||||||
// normally here, we should setup the forward adress, so that the owner's of the files found can be further reached by a tunnel.
|
// normally here, we should setup the forward adress, so that the owner's of the files found can be further reached by a tunnel.
|
||||||
|
|
||||||
fwd_item->PeerId(it->second.origin) ;
|
fwd_item->PeerId(it->second.origin) ;
|
||||||
|
fwd_item->depth = 2 + (rand() % 256) ; // obfuscate the depth for non immediate friends.
|
||||||
|
|
||||||
sendItem(fwd_item) ;
|
sendItem(fwd_item) ;
|
||||||
}
|
}
|
||||||
@ -638,6 +769,8 @@ void p3turtle::sendDataRequest(const std::string& peerId, const std::string& has
|
|||||||
TurtleTunnelId tunnel_id = it->second ;
|
TurtleTunnelId tunnel_id = it->second ;
|
||||||
TurtleTunnel& tunnel(_local_tunnels[tunnel_id]) ;
|
TurtleTunnel& tunnel(_local_tunnels[tunnel_id]) ;
|
||||||
|
|
||||||
|
tunnel.time_stamp = time(NULL) ;
|
||||||
|
|
||||||
#ifdef P3TURTLE_DEBUG
|
#ifdef P3TURTLE_DEBUG
|
||||||
assert(hash == tunnel.hash) ;
|
assert(hash == tunnel.hash) ;
|
||||||
#endif
|
#endif
|
||||||
@ -669,11 +802,12 @@ void p3turtle::sendFileData(const std::string& peerId, const std::string& hash,
|
|||||||
TurtleTunnelId tunnel_id = it->second ;
|
TurtleTunnelId tunnel_id = it->second ;
|
||||||
TurtleTunnel& tunnel(_local_tunnels[tunnel_id]) ;
|
TurtleTunnel& tunnel(_local_tunnels[tunnel_id]) ;
|
||||||
|
|
||||||
|
tunnel.time_stamp = time(NULL) ;
|
||||||
#ifdef P3TURTLE_DEBUG
|
#ifdef P3TURTLE_DEBUG
|
||||||
assert(hash == tunnel.hash) ;
|
assert(hash == tunnel.hash) ;
|
||||||
#endif
|
#endif
|
||||||
RsTurtleFileDataItem *item = new RsTurtleFileDataItem ;
|
RsTurtleFileDataItem *item = new RsTurtleFileDataItem ;
|
||||||
item->tunnel_id = tunnel_id ; // we should randomly select a tunnel, or something more clever.
|
item->tunnel_id = tunnel_id ;
|
||||||
item->chunk_offset = offset ;
|
item->chunk_offset = offset ;
|
||||||
item->chunk_size = chunksize ;
|
item->chunk_size = chunksize ;
|
||||||
item->chunk_data = malloc(chunksize) ;
|
item->chunk_data = malloc(chunksize) ;
|
||||||
@ -747,17 +881,12 @@ std::string p3turtle::getTurtlePeerId(TurtleTunnelId tid) const
|
|||||||
|
|
||||||
std::map<TurtleTunnelId,TurtleTunnel>::const_iterator it( _local_tunnels.find(tid) ) ;
|
std::map<TurtleTunnelId,TurtleTunnel>::const_iterator it( _local_tunnels.find(tid) ) ;
|
||||||
|
|
||||||
|
#ifdef P3TURTLE_DEBUG
|
||||||
assert(it!=_local_tunnels.end()) ;
|
assert(it!=_local_tunnels.end()) ;
|
||||||
assert(it->second.vpid != "") ;
|
assert(it->second.vpid != "") ;
|
||||||
|
#endif
|
||||||
|
|
||||||
return it->second.vpid ;
|
return it->second.vpid ;
|
||||||
|
|
||||||
// static std::string s="Turtle peer" ;
|
|
||||||
// unsigned char buf[100] ;
|
|
||||||
//
|
|
||||||
// sprintf(buff,"% 8d",id) ;
|
|
||||||
//
|
|
||||||
// return s+" "+buff;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool p3turtle::isOnline(const std::string& peer_id) const
|
bool p3turtle::isOnline(const std::string& peer_id) const
|
||||||
@ -953,7 +1082,7 @@ void p3turtle::handleTunnelResult(RsTurtleTunnelOkItem *item)
|
|||||||
|
|
||||||
// store tunnel info.
|
// store tunnel info.
|
||||||
if(_local_tunnels.find(item->tunnel_id) != _local_tunnels.end())
|
if(_local_tunnels.find(item->tunnel_id) != _local_tunnels.end())
|
||||||
std::cerr << "Tunnel already there. This is an error !!" << std::endl ;
|
std::cerr << "Tunnel id " << item->tunnel_id << " is already there. Giving up !!" << std::endl ;
|
||||||
|
|
||||||
TurtleTunnel& tunnel(_local_tunnels[item->tunnel_id]) ;
|
TurtleTunnel& tunnel(_local_tunnels[item->tunnel_id]) ;
|
||||||
|
|
||||||
@ -1086,8 +1215,7 @@ TurtleRequestId p3turtle::turtleSearch(const std::string& string_to_match)
|
|||||||
return id ;
|
return id ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void p3turtle::monitorFileTunnels(const std::string& name,const std::string& file_hash,uint64_t size)
|
||||||
void p3turtle::turtleDownload(const std::string& name,const std::string& file_hash,uint64_t size)
|
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
RsStackMutex stack(mTurtleMtx); /********** STACK LOCKED MTX ******/
|
||||||
@ -1106,18 +1234,24 @@ void p3turtle::turtleDownload(const std::string& name,const std::string& file_ha
|
|||||||
// No tunnels at start, but this triggers digging new tunnels.
|
// No tunnels at start, but this triggers digging new tunnels.
|
||||||
//
|
//
|
||||||
_incoming_file_hashes[file_hash].tunnels.clear();
|
_incoming_file_hashes[file_hash].tunnels.clear();
|
||||||
|
_force_digg_new_tunnels = true ;
|
||||||
|
|
||||||
// also should send associated request to the file transfer module.
|
// also should send associated request to the file transfer module.
|
||||||
_incoming_file_hashes[file_hash].size = size ;
|
_incoming_file_hashes[file_hash].size = size ;
|
||||||
_incoming_file_hashes[file_hash].name = name ;
|
_incoming_file_hashes[file_hash].name = name ;
|
||||||
|
_incoming_file_hashes[file_hash].time_stamp = time(NULL) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef TO_REMOVE
|
||||||
std::list<std::string> srcIds ;
|
std::list<std::string> srcIds ;
|
||||||
|
|
||||||
#ifdef P3TURTLE_DEBUG
|
#ifdef P3TURTLE_DEBUG
|
||||||
std::cerr << "p3turtle: Calling ft server to handle the dl" << std::endl ;
|
std::cerr << "p3turtle: Calling ft server to handle the dl" << std::endl ;
|
||||||
#endif
|
#endif
|
||||||
_ft_server->FileRequest(name,file_hash, size, "", 0, srcIds) ;
|
_ft_server->FileRequest(name,file_hash, size, "", 0, srcIds) ;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
IndicateConfigChanged() ; // initiates saving of handled hashes.
|
||||||
}
|
}
|
||||||
|
|
||||||
void p3turtle::returnSearchResult(RsTurtleSearchResultItem *item)
|
void p3turtle::returnSearchResult(RsTurtleSearchResultItem *item)
|
||||||
@ -1151,26 +1285,33 @@ void p3turtle::dumpState()
|
|||||||
std::cerr << " hash=0x" << it->first << ", name=" << it->second.name << ", size=" << it->second.size << ", tunnel ids =" ;
|
std::cerr << " hash=0x" << it->first << ", name=" << it->second.name << ", size=" << it->second.size << ", tunnel ids =" ;
|
||||||
for(std::vector<TurtleTunnelId>::const_iterator it2(it->second.tunnels.begin());it2!=it->second.tunnels.end();++it2)
|
for(std::vector<TurtleTunnelId>::const_iterator it2(it->second.tunnels.begin());it2!=it->second.tunnels.end();++it2)
|
||||||
std::cerr << " " << (void*)*it2 ;
|
std::cerr << " " << (void*)*it2 ;
|
||||||
std::cerr << ", last_req=" << (void*)it->second.last_request << std::endl ;
|
std::cerr << ", last_req=" << (void*)it->second.last_request << ", time_stamp = " << it->second.time_stamp << std::endl ;
|
||||||
}
|
}
|
||||||
std::cerr << " Active outgoing file hashes: " << _outgoing_file_hashes.size() << std::endl ;
|
std::cerr << " Active outgoing file hashes: " << _outgoing_file_hashes.size() << std::endl ;
|
||||||
for(std::map<TurtleFileHash,FileInfo>::const_iterator it(_outgoing_file_hashes.begin());it!=_outgoing_file_hashes.end();++it)
|
for(std::map<TurtleFileHash,FileInfo>::const_iterator it(_outgoing_file_hashes.begin());it!=_outgoing_file_hashes.end();++it)
|
||||||
std::cerr << " hash=0x" << it->first << ", name=" << it->second.fname << ", size=" << it->second.size ;
|
std::cerr << " hash=0x" << it->first << ", name=" << it->second.fname << ", size=" << it->second.size << std::endl ;
|
||||||
|
|
||||||
std::cerr << " Local tunnels:" << std::endl ;
|
std::cerr << " Local tunnels:" << std::endl ;
|
||||||
for(std::map<TurtleTunnelId,TurtleTunnel>::const_iterator it(_local_tunnels.begin());it!=_local_tunnels.end();++it)
|
for(std::map<TurtleTunnelId,TurtleTunnel>::const_iterator it(_local_tunnels.begin());it!=_local_tunnels.end();++it)
|
||||||
std::cerr << " " << (void*)it->first << ": from="
|
std::cerr << " " << (void*)it->first << ": from="
|
||||||
<< it->second.local_src << ", to=" << it->second.local_dst
|
<< it->second.local_src << ", to=" << it->second.local_dst
|
||||||
<< ", hash=0x" << it->second.hash << ", ts=" << it->second.time_stamp << " (" << now-it->second.time_stamp << " secs ago)"
|
<< ", hash=0x" << it->second.hash << ", ts=" << it->second.time_stamp << " (" << now-it->second.time_stamp << " secs ago)"
|
||||||
<< ", peer id =" << it->second.vpid << std::endl ;
|
<< ", peer id =" << it->second.vpid << ", time_stamp=" << it->second.time_stamp << std::endl ;
|
||||||
|
|
||||||
std::cerr << " buffered request origins: " << std::endl ;
|
std::cerr << " buffered request origins: " << std::endl ;
|
||||||
std::cerr << " Search requests: " << _search_requests_origins.size() << std::endl ;
|
std::cerr << " Search requests: " << _search_requests_origins.size() << std::endl ;
|
||||||
|
|
||||||
for(std::map<TurtleSearchRequestId,TurtleRequestInfo>::const_iterator it(_search_requests_origins.begin());it!=_search_requests_origins.end();++it)
|
for(std::map<TurtleSearchRequestId,TurtleRequestInfo>::const_iterator it(_search_requests_origins.begin());it!=_search_requests_origins.end();++it)
|
||||||
std::cerr << " " << (void*)it->first << ": from=" << it->second.origin << ", ts=" << it->second.time_stamp << " (" << now-it->second.time_stamp << " secs ago)" << std::endl ;
|
std::cerr << " " << (void*)it->first << ": from=" << it->second.origin
|
||||||
|
<< ", ts=" << it->second.time_stamp << " (" << now-it->second.time_stamp
|
||||||
|
<< " secs ago)" << std::endl ;
|
||||||
|
|
||||||
std::cerr << " Tunnel requests: " << _tunnel_requests_origins.size() << std::endl ;
|
std::cerr << " Tunnel requests: " << _tunnel_requests_origins.size() << std::endl ;
|
||||||
for(std::map<TurtleTunnelRequestId,TurtleRequestInfo>::const_iterator it(_tunnel_requests_origins.begin());it!=_tunnel_requests_origins.end();++it)
|
for(std::map<TurtleTunnelRequestId,TurtleRequestInfo>::const_iterator it(_tunnel_requests_origins.begin());it!=_tunnel_requests_origins.end();++it)
|
||||||
std::cerr << " " << (void*)it->first << ": from=" << it->second.origin << ", ts=" << it->second.time_stamp << " (" << now-it->second.time_stamp << " secs ago)" << std::endl ;
|
std::cerr << " " << (void*)it->first << ": from=" << it->second.origin
|
||||||
|
<< ", ts=" << it->second.time_stamp << " (" << now-it->second.time_stamp
|
||||||
|
<< " secs ago)" << std::endl ;
|
||||||
|
|
||||||
std::cerr << " Virtual peers:" << std::endl ;
|
std::cerr << " Virtual peers:" << std::endl ;
|
||||||
for(std::map<TurtleVirtualPeerId,TurtleTunnelId>::const_iterator it(_virtual_peers.begin());it!=_virtual_peers.end();++it)
|
for(std::map<TurtleVirtualPeerId,TurtleTunnelId>::const_iterator it(_virtual_peers.begin());it!=_virtual_peers.end();++it)
|
||||||
std::cerr << " id=" << it->first << ", tunnel=" << (void*)(it->second) << std::endl ;
|
std::cerr << " id=" << it->first << ", tunnel=" << (void*)(it->second) << std::endl ;
|
||||||
|
@ -161,6 +161,7 @@ class ftServer ;
|
|||||||
class p3AuthMgr;
|
class p3AuthMgr;
|
||||||
class p3ConnectMgr;
|
class p3ConnectMgr;
|
||||||
class ftDataMultiplex;
|
class ftDataMultiplex;
|
||||||
|
class RsSerialiser;
|
||||||
static const int TURTLE_MAX_SEARCH_DEPTH = 6 ;
|
static const int TURTLE_MAX_SEARCH_DEPTH = 6 ;
|
||||||
|
|
||||||
// This class is used to keep trace of requests (searches and tunnels).
|
// This class is used to keep trace of requests (searches and tunnels).
|
||||||
@ -191,10 +192,22 @@ class TurtleFileHashInfo
|
|||||||
TurtleRequestId last_request ; // last request for the tunnels of this hash
|
TurtleRequestId last_request ; // last request for the tunnels of this hash
|
||||||
|
|
||||||
TurtleFileName name ;
|
TurtleFileName name ;
|
||||||
|
time_t time_stamp ;
|
||||||
uint64_t size ;
|
uint64_t size ;
|
||||||
};
|
};
|
||||||
|
|
||||||
class p3turtle: public p3Service, public pqiMonitor, public RsTurtle, public ftSearch
|
// Subclassing:
|
||||||
|
//
|
||||||
|
// Class | Brings what | Usage
|
||||||
|
// -----------+------------------+------------------------------------------------------
|
||||||
|
// p3Service | sendItem() | handle packet sending/receiving to/from friend peers.
|
||||||
|
// pqiMonitor | configChanged() | handle who's connecting/disconnecting to dig new tunnels
|
||||||
|
// RsTurtle | start/stop file()| brings interface for turtle service
|
||||||
|
// ftSearch | search() | used to allow searching for monitored files.
|
||||||
|
// p3Config | ConfigChanged() | used to load/save .cfg file for turtle variales.
|
||||||
|
// -----------+------------------+------------------------------------------------------
|
||||||
|
//
|
||||||
|
class p3turtle: public p3Service, public pqiMonitor, public RsTurtle, public ftSearch, public p3Config
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
p3turtle(p3ConnectMgr *cm,ftServer *m);
|
p3turtle(p3ConnectMgr *cm,ftServer *m);
|
||||||
@ -205,16 +218,24 @@ class p3turtle: public p3Service, public pqiMonitor, public RsTurtle, public ftS
|
|||||||
//
|
//
|
||||||
virtual TurtleSearchRequestId turtleSearch(const std::string& string_to_match) ;
|
virtual TurtleSearchRequestId turtleSearch(const std::string& string_to_match) ;
|
||||||
|
|
||||||
// Initiates tunnel handling for the given file hash.
|
// Initiates tunnel handling for the given file hash. tunnels. Launches
|
||||||
// tunnels. Launches an exception if an error occurs during the
|
// an exception if an error occurs during the initialization process. The
|
||||||
// initialization process. The turtle router itself does not initiate downloads,
|
// turtle router itself does not initiate downloads, it only maintains
|
||||||
// it only maintains tunnels for the given hash. The download should be
|
// tunnels for the given hash. The download should be driven by the file
|
||||||
// driven by the file transfer module. Maybe this function can do the whole thing:
|
// transfer module. Maybe this function can do the whole thing:
|
||||||
// - initiate tunnel handling
|
// - initiate tunnel handling
|
||||||
// - send the file request to the file transfer module
|
// - send the file request to the file transfer module
|
||||||
// - populate the file transfer module with the adequate pqi interface and search module.
|
// - populate the file transfer module with the adequate pqi interface and search module.
|
||||||
//
|
//
|
||||||
virtual void turtleDownload(const std::string& name,const std::string& file_hash,uint64_t size) ;
|
// This function should be called in addition to ftServer::FileRequest() so that the turtle router
|
||||||
|
// automatically provide tunnels for the file to download.
|
||||||
|
//
|
||||||
|
virtual void monitorFileTunnels(const std::string& name,const std::string& file_hash,uint64_t size) ;
|
||||||
|
|
||||||
|
// This should be called when canceling a file download, so that the turtle router stops
|
||||||
|
// handling tunnels for this file.
|
||||||
|
//
|
||||||
|
virtual void stopMonitoringFileTunnels(const std::string& file_hash) ;
|
||||||
|
|
||||||
/************* from pqiMonitor *******************/
|
/************* from pqiMonitor *******************/
|
||||||
// Informs the turtle router that some peers are (dis)connected. This should initiate digging new tunnels,
|
// Informs the turtle router that some peers are (dis)connected. This should initiate digging new tunnels,
|
||||||
@ -237,6 +258,11 @@ class p3turtle: public p3Service, public pqiMonitor, public RsTurtle, public ftS
|
|||||||
//
|
//
|
||||||
virtual bool search(std::string hash, uint64_t size, uint32_t hintflags, FileInfo &info) const ;
|
virtual bool search(std::string hash, uint64_t size, uint32_t hintflags, FileInfo &info) const ;
|
||||||
|
|
||||||
|
/************* from p3Config *******************/
|
||||||
|
virtual RsSerialiser *setupSerialiser() ;
|
||||||
|
virtual std::list<RsItem*> saveList(bool& cleanup) ;
|
||||||
|
virtual bool loadList(std::list<RsItem*> load) ;
|
||||||
|
|
||||||
/************* Communication with ftserver *******************/
|
/************* Communication with ftserver *******************/
|
||||||
// Does the turtle router manages tunnels to this peer ? (this is not a
|
// Does the turtle router manages tunnels to this peer ? (this is not a
|
||||||
// real id, but a fake one, that the turtle router is capable of connecting with a tunnel id).
|
// real id, but a fake one, that the turtle router is capable of connecting with a tunnel id).
|
||||||
@ -272,6 +298,7 @@ class p3turtle: public p3Service, public pqiMonitor, public RsTurtle, public ftS
|
|||||||
//----------------------------- Routing functions ----------------------------//
|
//----------------------------- Routing functions ----------------------------//
|
||||||
|
|
||||||
void manageTunnels() ; /// Handle tunnel digging for current file hashes
|
void manageTunnels() ; /// Handle tunnel digging for current file hashes
|
||||||
|
void closeTunnel(TurtleTunnelId tid) ; /// closes a given tunnel
|
||||||
int handleIncoming(); /// Main routing function
|
int handleIncoming(); /// Main routing function
|
||||||
|
|
||||||
void handleSearchRequest(RsTurtleSearchRequestItem *item); /// specific routing functions for handling particular packets.
|
void handleSearchRequest(RsTurtleSearchRequestItem *item); /// specific routing functions for handling particular packets.
|
||||||
@ -303,18 +330,16 @@ class p3turtle: public p3Service, public pqiMonitor, public RsTurtle, public ftS
|
|||||||
|
|
||||||
std::map<TurtleSearchRequestId,TurtleRequestInfo> _search_requests_origins ; /// keeps trace of who emmitted a given search request
|
std::map<TurtleSearchRequestId,TurtleRequestInfo> _search_requests_origins ; /// keeps trace of who emmitted a given search request
|
||||||
std::map<TurtleTunnelRequestId,TurtleRequestInfo> _tunnel_requests_origins ; /// keeps trace of who emmitted a tunnel request
|
std::map<TurtleTunnelRequestId,TurtleRequestInfo> _tunnel_requests_origins ; /// keeps trace of who emmitted a tunnel request
|
||||||
|
|
||||||
std::map<TurtleFileHash,TurtleFileHashInfo> _incoming_file_hashes ; /// stores adequate tunnels for each file hash locally managed
|
std::map<TurtleFileHash,TurtleFileHashInfo> _incoming_file_hashes ; /// stores adequate tunnels for each file hash locally managed
|
||||||
std::map<TurtleFileHash,FileInfo> _outgoing_file_hashes ; /// stores file info for each file we provide.
|
std::map<TurtleFileHash,FileInfo> _outgoing_file_hashes ; /// stores file info for each file we provide.
|
||||||
|
|
||||||
std::map<TurtleTunnelId,TurtleTunnel > _local_tunnels ; /// local tunnels, stored by ids (Either transiting or ending).
|
std::map<TurtleTunnelId,TurtleTunnel > _local_tunnels ; /// local tunnels, stored by ids (Either transiting or ending).
|
||||||
|
|
||||||
std::map<TurtleVirtualPeerId,TurtleTunnelId> _virtual_peers ; /// Peers corresponding to each tunnel.
|
std::map<TurtleVirtualPeerId,TurtleTunnelId> _virtual_peers ; /// Peers corresponding to each tunnel.
|
||||||
|
|
||||||
time_t _last_clean_time ;
|
time_t _last_clean_time ;
|
||||||
time_t _last_tunnel_management_time ;
|
time_t _last_tunnel_management_time ;
|
||||||
|
|
||||||
std::list<pqipeer> _online_peers;
|
std::list<pqipeer> _online_peers;
|
||||||
|
bool _force_digg_new_tunnels ; /// used to force digging new tunnels
|
||||||
#ifdef P3TURTLE_DEBUG
|
#ifdef P3TURTLE_DEBUG
|
||||||
void dumpState() ;
|
void dumpState() ;
|
||||||
#endif
|
#endif
|
||||||
|
@ -33,10 +33,11 @@ class RsTurtleSearchResultItem: public RsTurtleItem
|
|||||||
RsTurtleSearchResultItem() : RsTurtleItem(RS_TURTLE_SUBTYPE_SEARCH_RESULT) {}
|
RsTurtleSearchResultItem() : RsTurtleItem(RS_TURTLE_SUBTYPE_SEARCH_RESULT) {}
|
||||||
RsTurtleSearchResultItem(void *data,uint32_t size) ; // deserialization
|
RsTurtleSearchResultItem(void *data,uint32_t size) ; // deserialization
|
||||||
|
|
||||||
uint16_t depth ;
|
TurtleSearchRequestId request_id ; // Randomly generated request id.
|
||||||
uint8_t peer_id[16]; // peer id. This will eventually be obfuscated in some way.
|
|
||||||
|
|
||||||
TurtleSearchRequestId request_id ; // randomly generated request id.
|
uint16_t depth ; // The depth of a search result is obfuscated in this way:
|
||||||
|
// If the actual depth is 1, this field will be 1.
|
||||||
|
// If the actual depth is > 1, this field is a larger arbitrary integer.
|
||||||
|
|
||||||
std::list<TurtleFileInfo> result ;
|
std::list<TurtleFileInfo> result ;
|
||||||
|
|
||||||
|
@ -611,9 +611,9 @@ void TransfersDialog::insertTransfers()
|
|||||||
int dlPeers = 0;
|
int dlPeers = 0;
|
||||||
for (pit = info.peers.begin(); pit != info.peers.end(); pit++) {
|
for (pit = info.peers.begin(); pit != info.peers.end(); pit++) {
|
||||||
symbol = "";
|
symbol = "";
|
||||||
name = QString::fromStdString(rsPeers->getPeerName(pit->peerId));
|
name = getPeerName(pit->peerId);
|
||||||
//unique combination: fileName + peerName, variant: hash + peerName (too long)
|
//unique combination: fileName + peerName, variant: hash + peerName (too long)
|
||||||
coreId = QString::fromStdString(info.fname + rsPeers->getPeerName(pit->peerId));
|
coreId = QString::fromStdString(info.fname) + getPeerName(pit->peerId);
|
||||||
fileSize = info.size;
|
fileSize = info.size;
|
||||||
progress = (info.transfered * 100.0) / info.size;
|
progress = (info.transfered * 100.0) / info.size;
|
||||||
dlspeed = pit->tfRate * 1024.0;
|
dlspeed = pit->tfRate * 1024.0;
|
||||||
@ -645,7 +645,7 @@ void TransfersDialog::insertTransfers()
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* if peers found in selectedIds, select again */
|
/* if peers found in selectedIds, select again */
|
||||||
if (selectedIds.end() != std::find(selectedIds.begin(), selectedIds.end(), info.fname + rsPeers->getPeerName(pit->peerId))) {
|
if (selectedIds.end() != std::find(selectedIds.begin(), selectedIds.end(), info.fname + getPeerName(pit->peerId).toStdString())) {
|
||||||
QStandardItem *dlItem = DLListModel->item(addedRow);
|
QStandardItem *dlItem = DLListModel->item(addedRow);
|
||||||
QModelIndex childIndex = DLListModel->indexFromItem(dlItem).child(dlPeers, 0);
|
QModelIndex childIndex = DLListModel->indexFromItem(dlItem).child(dlPeers, 0);
|
||||||
selection->select(childIndex, QItemSelectionModel::Rows | QItemSelectionModel::SelectCurrent);
|
selection->select(childIndex, QItemSelectionModel::Rows | QItemSelectionModel::SelectCurrent);
|
||||||
@ -669,7 +669,7 @@ void TransfersDialog::insertTransfers()
|
|||||||
symbol = "";
|
symbol = "";
|
||||||
coreId = QString::fromStdString(info.hash);
|
coreId = QString::fromStdString(info.hash);
|
||||||
name = QString::fromStdString(info.fname);
|
name = QString::fromStdString(info.fname);
|
||||||
sources = QString::fromStdString(rsPeers->getPeerName(pit->peerId));
|
sources = getPeerName(pit->peerId);
|
||||||
|
|
||||||
switch(pit->status)
|
switch(pit->status)
|
||||||
{
|
{
|
||||||
@ -749,6 +749,19 @@ void TransfersDialog::insertTransfers()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QString TransfersDialog::getPeerName(const std::string& id) const
|
||||||
|
{
|
||||||
|
QString res = QString::fromStdString(rsPeers->getPeerName(id)) ;
|
||||||
|
|
||||||
|
// This is because turtle tunnels have no name (I didn't want to bother with
|
||||||
|
// connect mgr). In such a case their id can suitably hold for a name.
|
||||||
|
//
|
||||||
|
if(res == "")
|
||||||
|
return QString::fromStdString(id) ;
|
||||||
|
else
|
||||||
|
return res ;
|
||||||
|
}
|
||||||
|
|
||||||
void TransfersDialog::cancel()
|
void TransfersDialog::cancel()
|
||||||
{
|
{
|
||||||
QString queryWrn2;
|
QString queryWrn2;
|
||||||
@ -764,11 +777,13 @@ void TransfersDialog::cancel()
|
|||||||
if(selection->isRowSelected(i, QModelIndex()))
|
if(selection->isRowSelected(i, QModelIndex()))
|
||||||
{
|
{
|
||||||
std::string id = getID(i, DLListModel).toStdString();
|
std::string id = getID(i, DLListModel).toStdString();
|
||||||
|
#ifdef UNUSED
|
||||||
QString qname = getFileName(i, DLListModel);
|
QString qname = getFileName(i, DLListModel);
|
||||||
/* XXX -> Should not have to 'trim' filename ... something wrong here..
|
/* XXX -> Should not have to 'trim' filename ... something wrong here..
|
||||||
* but otherwise, not exact filename .... BUG
|
* but otherwise, not exact filename .... BUG
|
||||||
*/
|
*/
|
||||||
std::string name = (qname.trimmed()).toStdString();
|
std::string name = (qname.trimmed()).toStdString();
|
||||||
|
#endif
|
||||||
rsFiles->FileCancel(id); /* hash */
|
rsFiles->FileCancel(id); /* hash */
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -74,6 +74,8 @@ class TransfersDialog : public MainPage
|
|||||||
void playFiles(QStringList files);
|
void playFiles(QStringList files);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
QString getPeerName(const std::string& peer_id) const ;
|
||||||
|
|
||||||
QStandardItemModel *DLListModel;
|
QStandardItemModel *DLListModel;
|
||||||
QStandardItemModel *ULListModel;
|
QStandardItemModel *ULListModel;
|
||||||
QItemSelectionModel *selection;
|
QItemSelectionModel *selection;
|
||||||
|
@ -259,12 +259,14 @@ void TurtleSearchDialog::download()
|
|||||||
"", 0, srcIds);
|
"", 0, srcIds);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
std::list<std::string> srcIds;
|
||||||
std::cout << "Issuing file request from search dialog: -"
|
std::cout << "Issuing file request from search dialog: -"
|
||||||
<< (item->text(SR_NAME_COL)).toStdString() << "-"
|
<< (item->text(SR_NAME_COL)).toStdString() << "-"
|
||||||
<< (item->text(SR_HASH_COL)).toStdString() << "-"
|
<< (item->text(SR_HASH_COL)).toStdString() << "-"
|
||||||
<< (item->text(SR_REALSIZE_COL)).toInt() << std::endl ;
|
<< (item->text(SR_REALSIZE_COL)).toInt() << std::endl ;
|
||||||
|
|
||||||
rsTurtle->turtleDownload(item->text(SR_NAME_COL).toStdString(),item->text(SR_HASH_COL).toStdString(),item->text(SR_REALSIZE_COL).toInt()) ;
|
rsTurtle->monitorFileTunnels(item->text(SR_NAME_COL).toStdString(),item->text(SR_HASH_COL).toStdString(),item->text(SR_REALSIZE_COL).toInt()) ;
|
||||||
|
rsFiles->FileRequest(item->text(SR_NAME_COL).toStdString(),item->text(SR_HASH_COL).toStdString(),item->text(SR_REALSIZE_COL).toInt(),"",0,srcIds) ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,11 +67,18 @@ class RsTurtle
|
|||||||
//
|
//
|
||||||
virtual TurtleRequestId turtleSearch(const std::string& match_string) = 0 ;
|
virtual TurtleRequestId turtleSearch(const std::string& match_string) = 0 ;
|
||||||
|
|
||||||
// Launches a complete download file operation: diggs one or more
|
// Initiates tunnel handling for the given file hash. tunnels. Launches
|
||||||
// tunnels. Launches an exception if an error occurs during the
|
// an exception if an error occurs during the initialization process. The
|
||||||
// initialization process.
|
// turtle router itself does not initiate downloads, it only maintains
|
||||||
|
// tunnels for the given hash. The download should be driven by the file
|
||||||
|
// transfer module by calling ftServer::FileRequest().
|
||||||
//
|
//
|
||||||
virtual void turtleDownload(const std::string& name,const std::string& file_hash,uint64_t size) = 0 ;
|
virtual void monitorFileTunnels(const std::string& name,const std::string& file_hash,uint64_t size) = 0 ;
|
||||||
|
|
||||||
|
// Tells the turtle router to stop handling tunnels for the given file hash. Traditionally this should
|
||||||
|
// be called after calling ftServer::fileCancel().
|
||||||
|
//
|
||||||
|
virtual void stopMonitoringFileTunnels(const std::string& file_hash) = 0 ;
|
||||||
|
|
||||||
// Sets the file sharing strategy. It concerns all local files. It would
|
// Sets the file sharing strategy. It concerns all local files. It would
|
||||||
// be better to handle this for each file, of course.
|
// be better to handle this for each file, of course.
|
||||||
|
Loading…
Reference in New Issue
Block a user