Merging branches/v0.5-peernet/libbitdht (Merging r4237 through r4353 into '.')

There are many significant improvements to the DHT here. 
See commit logs on v0.5-peernet branch for details.

This is not the final merge, but brings over the majority of expected v0.5-peernet/libbitdht changes 




git-svn-id: http://svn.code.sf.net/p/retroshare/code/trunk@4354 b45a01b8-16f6-495d-af2f-9b41ad6348cc
This commit is contained in:
drbob 2011-06-29 10:46:11 +00:00
parent f517442989
commit fff40eceac
40 changed files with 6843 additions and 746 deletions

View File

@ -0,0 +1,137 @@
/*
* bitdht/bdaccount.cc
*
* BitDHT: An Flexible DHT library.
*
* Copyright 2011 by Robert Fernie
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License Version 3 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA.
*
* Please report all bugs and problems to "bitdht@lunamutt.com".
*
*/
#include "bitdht/bdaccount.h"
#include <string.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#define LPF_FACTOR (0.90)
bdAccount::bdAccount()
:mNoStats(BDACCOUNT_NUM_ENTRIES),
mCountersOut(BDACCOUNT_NUM_ENTRIES), mCountersRecv(BDACCOUNT_NUM_ENTRIES),
mLpfOut(BDACCOUNT_NUM_ENTRIES), mLpfRecv(BDACCOUNT_NUM_ENTRIES),
mLabel(BDACCOUNT_NUM_ENTRIES)
{
mLabel[BDACCOUNT_MSG_OUTOFDATEPING] = "OUTOFDATEPING ";
mLabel[BDACCOUNT_MSG_PING] = "PING ";
mLabel[BDACCOUNT_MSG_PONG] = "PONG ";
mLabel[BDACCOUNT_MSG_QUERYNODE] = "QUERYNODE ";
mLabel[BDACCOUNT_MSG_QUERYHASH] = "QUERYHASH ";
mLabel[BDACCOUNT_MSG_REPLYFINDNODE] = "REPLYFINDNODE ";
mLabel[BDACCOUNT_MSG_REPLYQUERYHASH] = "REPLYQUERYHASH ";
mLabel[BDACCOUNT_MSG_POSTHASH] = "POSTHASH ";
mLabel[BDACCOUNT_MSG_REPLYPOSTHASH] = "REPLYPOSTHASH ";
mLabel[BDACCOUNT_MSG_CONNECTREQUEST] = "CONNECTREQUEST ";
mLabel[BDACCOUNT_MSG_CONNECTREPLY] = "CONNECTREPLY ";
mLabel[BDACCOUNT_MSG_CONNECTSTART] = "CONNECTSTART ";
mLabel[BDACCOUNT_MSG_CONNECTACK] = "CONNECTACK ";
resetStats();
}
void bdAccount::incCounter(uint32_t idx, bool out)
{
if ((idx < 0) || (idx > mNoStats-1))
{
std::cerr << "bdAccount::incCounter() Invalid Index";
std::cerr << std::endl;
}
if (out)
{
mCountersOut[idx]++;
}
else
{
mCountersRecv[idx]++;
}
return;
}
void bdAccount::doStats()
{
int i;
for(i = 0; i < mNoStats; i++)
{
mLpfOut[i] *= (LPF_FACTOR) ;
mLpfOut[i] += (1.0 - LPF_FACTOR) * mCountersOut[i];
mLpfRecv[i] *= (LPF_FACTOR) ;
mLpfRecv[i] += (1.0 - LPF_FACTOR) * mCountersRecv[i];
}
resetCounters();
}
void bdAccount::printStats(std::ostream &out)
{
int i;
out << " Send Recv: ";
out << std::endl;
for(i = 0; i < mNoStats; i++)
{
out << "Send" << mLabel[i] << " : " << std::setw(10) << mLpfOut[i];
out << " ";
out << "Recv" << mLabel[i] << " : " << std::setw(10) << mLpfRecv[i];
out << std::endl;
}
}
void bdAccount::resetCounters()
{
int i;
for(i = 0; i < mNoStats; i++)
{
mCountersOut[i] = 0;
mCountersRecv[i] = 0;
}
}
void bdAccount::resetStats()
{
int i;
for(i = 0; i < mNoStats; i++)
{
mLpfOut[i] = 0;
mLpfRecv[i] = 0;
}
resetCounters();
}

View File

@ -0,0 +1,79 @@
#ifndef BITDHT_ACCOUNT_H
#define BITDHT_ACCOUNT_H
/*
* bitdht/bdaccount.h
*
* BitDHT: An Flexible DHT library.
*
* Copyright 2011 by Robert Fernie
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License Version 3 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA.
*
* Please report all bugs and problems to "bitdht@lunamutt.com".
*
*/
#include <vector>
#include <string>
#include <inttypes.h>
#define BDACCOUNT_MSG_OUTOFDATEPING 0
#define BDACCOUNT_MSG_PING 1
#define BDACCOUNT_MSG_PONG 2
#define BDACCOUNT_MSG_QUERYNODE 3
#define BDACCOUNT_MSG_QUERYHASH 4
#define BDACCOUNT_MSG_REPLYFINDNODE 5
#define BDACCOUNT_MSG_REPLYQUERYHASH 6
#define BDACCOUNT_MSG_POSTHASH 7
#define BDACCOUNT_MSG_REPLYPOSTHASH 8
#define BDACCOUNT_MSG_CONNECTREQUEST 9
#define BDACCOUNT_MSG_CONNECTREPLY 10
#define BDACCOUNT_MSG_CONNECTSTART 11
#define BDACCOUNT_MSG_CONNECTACK 12
#define BDACCOUNT_NUM_ENTRIES 13
class bdAccount
{
public:
bdAccount();
void incCounter(uint32_t idx, bool out);
void doStats();
void printStats(std::ostream &out);
void resetCounters();
void resetStats();
private:
int mNoStats;
std::vector<double> mCountersOut;
std::vector<double> mCountersRecv;
std::vector<double> mLpfOut;
std::vector<double> mLpfRecv;
std::vector<std::string> mLabel;
// Statistics.
};
#endif // BITDHT_ACCOUNT_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,274 @@
#ifndef BITDHT_CONNECTION_H
#define BITDHT_CONNECTION_H
/*
* bitdht/bdconnection.h
*
* BitDHT: An Flexible DHT library.
*
* Copyright 2011 by Robert Fernie
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License Version 3 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA.
*
* Please report all bugs and problems to "bitdht@lunamutt.com".
*
*/
#include "bitdht/bdiface.h"
class bdQueryManager;
class bdNodePublisher;
/************************************************************************************************************
************************************** ProxyTuple + Connection State ****************************************
************************************************************************************************************/
#define BITDHT_CONNREQUEST_READY 1
#define BITDHT_CONNREQUEST_PAUSED 2
#define BITDHT_CONNREQUEST_INPROGRESS 3
#define BITDHT_CONNREQUEST_EXTCONNECT 4
#define BITDHT_CONNREQUEST_DONE 5
#define BITDHT_CONNREQUEST_TIMEOUT_CONNECT 30
#define BITDHT_CONNREQUEST_TIMEOUT_INPROGRESS 30
#define BITDHT_CONNREQUEST_MAX_AGE 60
#define BITDHT_CONNECTION_WAITING_AUTH 1
#define BITDHT_CONNECTION_WAITING_REPLY 2
#define BITDHT_CONNECTION_WAITING_START 3
#define BITDHT_CONNECTION_WAITING_ACK 4
#define BITDHT_CONNECTION_COMPLETED 5
#define BD_CONNECTION_START_RETRY_PERIOD 5 // Should only take a couple of seconds to get reply.
#define BD_CONNECTION_START_MAX_RETRY 3
#define BD_CONNECTION_MAX_TIMEOUT 30
class bdProxyTuple
{
public:
bdProxyTuple() { return; }
bdProxyTuple(bdNodeId *s, bdNodeId *p, bdNodeId *d)
:srcId(*s), proxyId(*p), destId(*d) { return; }
bdNodeId srcId;
bdNodeId proxyId;
bdNodeId destId;
};
std::ostream &operator<<(std::ostream &out, const bdProxyTuple &t);
int operator<(const bdProxyTuple &a, const bdProxyTuple &b);
int operator==(const bdProxyTuple &a, const bdProxyTuple &b);
class bdConnection
{
public:
bdConnection();
/** Functions to tweak the connection status */
// User initialised Connection.
int ConnectionSetup(bdId *proxyId, bdId *srcConnAddr, bdId *destConnAddr, int mode);
int ConnectionSetupDirect(bdId *destId, bdId *srcConnAddr);
// Initialise a new Connection. (receiving a Connection Request)
int ConnectionRequestDirect(bdId *id, bdId *srcConnAddr, bdId *destConnAddr);
int ConnectionRequestProxy(bdId *id, bdId *srcConnAddr, bdNodeId *ownId, bdId *destConnAddr, int mode);
int ConnectionRequestEnd(bdId *id, bdId *srcConnAddr, bdId *destConnAddr, int mode);
// Setup Finishing Stage, (receiving a Connection Reply).
int upgradeProxyConnectionToFinish(bdId *id, bdId *srcConnAddr, bdId *destConnAddr, int mode, int status);
int AuthoriseDirectConnection(bdId *srcId, bdId *proxyId, bdId *destId, int mode, int loc);
int AuthoriseProxyConnection(bdId *srcId, bdId *proxyId, bdId *destId, int mode, int loc);
int AuthoriseEndConnection(bdId *srcId, bdId *proxyId, bdId *destId, int mode, int loc);
int CompleteConnection(bdId *id, bdId *srcConnAddr, bdId *destConnAddr);
int checkForDefaultConnectAddress();
/* Connection State, and TimeStamp of Update */
int mState;
time_t mLastEvent;
/* Addresses of Start/Proxy/End Nodes */
bdId mSrcId;
bdId mDestId;
bdId mProxyId;
/* Where we are in the connection,
* and what connection mode.
*/
int mPoint;
int mMode;
/* must have ip:ports of connection ends (if proxied) */
bdId mSrcConnAddr;
bdId mDestConnAddr;
int mBandwidth;
/* START/ACK Finishing ****/
time_t mLastStart; /* timer for retries */
int mRetryCount; /* retry counter */
bool mSrcAck;
bool mDestAck;
// Completion TS.
time_t mCompletedTS;
};
class bdConnectionRequest
{
public:
int setupDirectConnection(struct sockaddr_in *laddr, bdNodeId *target);
int setupProxyConnection(struct sockaddr_in *laddr, bdNodeId *target, uint32_t mode);
int addGoodProxy(const bdId *srcId);
int checkGoodProxyPeer(const bdId *Id);
bdNodeId mTarget;
struct sockaddr_in mLocalAddr;
int mMode;
int mState;
time_t mStateTS;
time_t mPauseTS;
uint32_t mErrCode;
std::list<bdId> mGoodProxies;
std::list<bdId> mPotentialProxies;
int mRecycled;
bdId mCurrentAttempt;
std::list<bdId> mPeersTried;
};
std::ostream &operator<<(std::ostream &out, const bdConnectionRequest &req);
std::ostream &operator<<(std::ostream &out, const bdConnection &conn);
/*********
* The Connection Management Class.
* this encapsulates all of the functionality..
* except for a couple of message in/outs + callback.
*/
class bdConnectManager
{
public:
bdConnectManager(bdNodeId *ownid, bdSpace *space, bdQueryManager *qmgr, bdDhtFunctions *fns, bdNodePublisher *pub);
/* connection functions */
void requestConnection(bdNodeId *id, uint32_t modes);
void allowConnection(bdNodeId *id, uint32_t modes);
/* high level */
void shutdownConnections();
void printConnections();
/* Connections: Configuration */
void defaultConnectionOptions();
virtual void setConnectionOptions(uint32_t allowedModes, uint32_t flags);
/* Connections: Initiation */
int requestConnection(struct sockaddr_in *laddr, bdNodeId *target, uint32_t mode, uint32_t start);
int requestConnection_direct(struct sockaddr_in *laddr, bdNodeId *target);
int requestConnection_proxy(struct sockaddr_in *laddr, bdNodeId *target, uint32_t mode);
int killConnectionRequest(struct sockaddr_in *laddr, bdNodeId *target, uint32_t mode);
int checkExistingConnectionAttempt(bdNodeId *target);
void addPotentialConnectionProxy(const bdId *srcId, const bdId *target);
void updatePotentialConnectionProxy(const bdId *id, uint32_t mode);
int checkPeerForFlag(const bdId *id, uint32_t with_flag);
int tickConnections();
void iterateConnectionRequests();
int startConnectionAttempt(bdConnectionRequest *req);
// internal Callback -> normally continues to callbackConnect().
void callbackConnectRequest(bdId *srcId, bdId *proxyId, bdId *destId,
int mode, int point, int cbtype, int errcode);
/* Connections: Outgoing */
int startConnectionAttempt(bdId *proxyId, bdId *srcConnAddr, bdId *destConnAddr, int mode);
void AuthConnectionOk(bdId *srcId, bdId *proxyId, bdId *destId, int mode, int loc);
void AuthConnectionNo(bdId *srcId, bdId *proxyId, bdId *destId, int mode, int loc, int errcode);
void iterateConnections();
/* Connections: Utility State */
bdConnection *findExistingConnection(bdNodeId *srcId, bdNodeId *proxyId, bdNodeId *destId);
bdConnection *newConnection(bdNodeId *srcId, bdNodeId *proxyId, bdNodeId *destId);
int cleanConnection(bdNodeId *srcId, bdNodeId *proxyId, bdNodeId *destId);
int determinePosition(bdNodeId *sender, bdNodeId *src, bdNodeId *dest);
int determineProxyId(bdNodeId *sender, bdNodeId *src, bdNodeId *dest, bdNodeId *proxyId);
bdConnection *findSimilarConnection(bdNodeId *srcId, bdNodeId *destId);
bdConnection *findExistingConnectionBySender(bdId *sender, bdId *src, bdId *dest);
bdConnection *newConnectionBySender(bdId *sender, bdId *src, bdId *dest);
int cleanConnectionBySender(bdId *sender, bdId *src, bdId *dest);
// Overloaded Generalised Connection Callback.
virtual void callbackConnect(bdId *srcId, bdId *proxyId, bdId *destId,
int mode, int point, int cbtype, int errcode);
/* Connections: */
int recvedConnectionRequest(bdId *id, bdId *srcConnAddr, bdId *destConnAddr, int mode);
int recvedConnectionReply(bdId *id, bdId *srcConnAddr, bdId *destConnAddr, int mode, int status);
int recvedConnectionStart(bdId *id, bdId *srcConnAddr, bdId *destConnAddr, int mode, int bandwidth);
int recvedConnectionAck(bdId *id, bdId *srcConnAddr, bdId *destConnAddr, int mode);
private:
std::map<bdProxyTuple, bdConnection> mConnections;
std::map<bdNodeId, bdConnectionRequest> mConnectionRequests;
uint32_t mConfigAllowedModes;
bool mConfigAutoProxy;
/****************************** Connection Code (in bdconnection.cc) ****************************/
private:
bdNodeId mOwnId;
bdSpace *mNodeSpace;
bdQueryManager *mQueryMgr;
bdDhtFunctions *mFns;
bdNodePublisher *mPub;
};
#endif // BITDHT_CONNECTION_H

View File

@ -0,0 +1,174 @@
/*
* bitdht/bdfilter.cc
*
* BitDHT: An Flexible DHT library.
*
* Copyright 2010 by Robert Fernie
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License Version 3 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA.
*
* Please report all bugs and problems to "bitdht@lunamutt.com".
*
*/
#include "bitdht/bdfilter.h"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
/**
* #define DEBUG_FILTER 1
**/
bdFilter::bdFilter(const bdNodeId *ownId, std::list<bdFilteredPeer> &startList,
uint32_t filterFlags, bdDhtFunctions *fns)
{
/* */
mOwnId = *ownId;
mFns = fns;
time_t now = time(NULL);
std::list<bdFilteredPeer>::iterator it;
for(it = startList.begin(); it != startList.end(); it++)
{
mFiltered.push_back(*it);
}
mFilterFlags = filterFlags;
}
bool bdFilter::filtered(std::list<bdFilteredPeer> &answer)
{
answer = mFiltered;
return (answer.size() > 0);
}
bool bdFilter::filteredIPs(std::list<struct sockaddr_in> &answer)
{
std::list<bdFilteredPeer>::iterator it;
for(it = mFiltered.begin(); it != mFiltered.end(); it++)
{
answer.push_back(it->mAddr);
}
return (answer.size() > 0);
}
int bdFilter::checkPeer(const bdId *id, uint32_t mode)
{
bool add = false;
uint32_t flags = 0;
if ((mFilterFlags & BITDHT_FILTER_REASON_OWNID) &&
isOwnIdWithoutBitDhtFlags(id, mode))
{
add = true;
flags |= BITDHT_FILTER_REASON_OWNID;
}
if (add)
{
bool isNew = addPeerToFilter(id, flags);
if (isNew)
{
return 1;
}
}
return 0;
}
int bdFilter::addPeerToFilter(const bdId *id, uint32_t flags)
{
std::list<bdFilteredPeer>::iterator it;
bool found = false;
for(it = mFiltered.begin(); it != mFiltered.end(); it++)
{
if (id->addr.sin_addr.s_addr == it->mAddr.sin_addr.s_addr)
{
found = true;
it->mLastSeen = time(NULL);
it->mFilterFlags |= flags;
break;
}
}
if (!found)
{
time_t now = time(NULL);
bdFilteredPeer fp;
fp.mAddr = id->addr;
fp.mAddr.sin_port = 0;
fp.mFilterFlags = flags;
fp.mFilterTS = now;
fp.mLastSeen = now;
mFiltered.push_back(fp);
uint32_t saddr = id->addr.sin_addr.s_addr;
mIpsBanned.insert(saddr);
std::cerr << "Adding New Banned Ip Address: " << inet_ntoa(id->addr.sin_addr);
std::cerr << std::endl;
return true;
}
return false;
}
/* fast check if the addr is in the structure */
int bdFilter::addrOkay(struct sockaddr_in *addr)
{
std::set<uint32_t>::const_iterator it = mIpsBanned.find(addr->sin_addr.s_addr);
if (it == mIpsBanned.end())
{
return 1; // Address is Okay!
}
std::cerr << "Detected Packet From Banned Ip Address: " << inet_ntoa(addr->sin_addr);
std::cerr << std::endl;
return 0;
}
bool bdFilter::isOwnIdWithoutBitDhtFlags(const bdId *id, uint32_t peerFlags)
{
if (peerFlags & BITDHT_PEER_STATUS_RECV_PONG)
{
if (peerFlags & BITDHT_PEER_STATUS_DHT_ENGINE)
{
/* okay! */
return false;
}
/* now check distance */
bdMetric dist;
mFns->bdDistance(&mOwnId, &(id->id), &dist);
int bucket = mFns->bdBucketDistance(&dist);
/* if they match us... kill it */
if (bucket == 0)
{
return true;
}
}
return false;
}

View File

@ -0,0 +1,82 @@
#ifndef BITDHT_FILTER_H
#define BITDHT_FILTER_H
/*
* bitdht/bdfilter.h
*
* BitDHT: An Flexible DHT library.
*
* Copyright 2010 by Robert Fernie
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License Version 3 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA.
*
* Please report all bugs and problems to "bitdht@lunamutt.com".
*
*/
/* This class is used to detect bad and filter them peers
*
*/
#include "bitdht/bdiface.h"
#include <set>
/* Query result flags are in bdiface.h */
#define BITDHT_FILTER_REASON_OWNID 0x0001
class bdFilteredPeer
{
public:
struct sockaddr_in mAddr;
uint32_t mFilterFlags; /* reasons why we are filtering */
time_t mFilterTS;
time_t mLastSeen;
};
class bdFilter
{
public:
bdFilter(const bdNodeId *ownid, std::list<bdFilteredPeer> &initialFilters,
uint32_t filterFlags, bdDhtFunctions *fns);
// get the answer.
bool filtered(std::list<bdFilteredPeer> &answer);
bool filteredIPs(std::list<struct sockaddr_in> &answer);
int checkPeer(const bdId *id, uint32_t peerFlags);
int addrOkay(struct sockaddr_in *addr);
private:
int addPeerToFilter(const bdId *id, uint32_t flags);
bool isOwnIdWithoutBitDhtFlags(const bdId *id, uint32_t peerFlags);
// searching for
bdNodeId mOwnId;
uint32_t mFilterFlags;
std::list<bdFilteredPeer> mFiltered;
bdDhtFunctions *mFns;
// = addr.sin_addr.s_addr (uint32_t) stored in network order.
std::set<uint32_t> mIpsBanned;
};
#endif

View File

@ -29,6 +29,7 @@
#include <iosfwd>
#include <map>
#include <string>
#include <list>
#include <inttypes.h>
#include "util/bdnet.h"
@ -94,7 +95,8 @@ virtual int bdDistance(const bdNodeId *n1, const bdNodeId *n2, bdMetric *metric)
virtual int bdBucketDistance(const bdNodeId *n1, const bdNodeId *n2) = 0;
virtual int bdBucketDistance(const bdMetric *metric) = 0;
virtual uint32_t bdLikelySameNode(const bdId *id1, const bdId *id2) = 0;
virtual bool bdSimilarId(const bdId *id1, const bdId *id2) = 0;
virtual bool bdUpdateSimilarId(bdId *dest, const bdId *src) = 0;
virtual void bdRandomMidId(const bdNodeId *target, const bdNodeId *other, bdNodeId *mid) = 0;
@ -105,11 +107,15 @@ virtual void bdPrintNodeId(std::ostream &out, const bdNodeId *a) = 0;
/* NODE OPTIONS */
#define BITDHT_OPTIONS_MAINTAIN_UNSTABLE_PORT 0x00000001
/* peer flags
* order is important!
* higher bits = more priority.
* BITDHT_PEER_STATUS_RECVPING
* BITDHT_PEER_STATUS_RECVPONG
* BITDHT_PEER_STATUS_RECVNODES
* BITDHT_PEER_STATUS_RECVHASHES
@ -121,14 +127,137 @@ virtual void bdPrintNodeId(std::ostream &out, const bdNodeId *a) = 0;
#define BITDHT_PEER_STATUS_MASK_RECVD 0x000000ff
#define BITDHT_PEER_STATUS_MASK_DHT 0x0000ff00
#define BITDHT_PEER_STATUS_MASK_KNOWN 0x00ff0000
#define BITDHT_PEER_STATUS_RECV_PONG 0x00000001
#define BITDHT_PEER_STATUS_RECV_NODES 0x00000002
#define BITDHT_PEER_STATUS_RECV_HASHES 0x00000004
#define BITDHT_PEER_STATUS_RECV_PING 0x00000001
#define BITDHT_PEER_STATUS_RECV_PONG 0x00000002
#define BITDHT_PEER_STATUS_RECV_NODES 0x00000004
#define BITDHT_PEER_STATUS_RECV_HASHES 0x00000008
#define BITDHT_PEER_STATUS_RECV_CONNECT_MSG 0x00000010
#define BITDHT_PEER_STATUS_DHT_ENGINE 0x00000100
#define BITDHT_PEER_STATUS_DHT_APPL 0x00000200
#define BITDHT_PEER_STATUS_DHT_VERSION 0x00000400
#define BITDHT_PEER_STATUS_DHT_ENGINE_VERSION 0x00000200
#define BITDHT_PEER_STATUS_DHT_APPL 0x00000400
#define BITDHT_PEER_STATUS_DHT_APPL_VERSION 0x00000800
#define BITDHT_PEER_STATUS_DHT_WHITELIST 0x00010000
#define BITDHT_PEER_STATUS_DHT_FOF 0x00020000
#define BITDHT_PEER_STATUS_DHT_FRIEND 0x00040000
// EXTRA FLAGS are our internal thoughts about the peer.
#define BITDHT_PEER_EXFLAG_MASK_BASIC 0x000000ff
#define BITDHT_PEER_EXFLAG_UNSTABLE 0x00000001 // Port changes.
#define BITDHT_PEER_EXFLAG_ATTACHED 0x00000002 // We will ping in heavily. (if unstable)
#define BITDHT_CONNECT_MODE_DIRECT 0x00000001
#define BITDHT_CONNECT_MODE_PROXY 0x00000002
#define BITDHT_CONNECT_MODE_RELAY 0x00000004
#define BITDHT_CONNECT_OPTION_AUTOPROXY 0x00000001
// STATUS CODES. == 0 is okay, != 0 is error.
#define BITDHT_CONNECT_ANSWER_OKAY 0x00000000
#define BITDHT_CONNECT_ERROR_NONE (BITDHT_CONNECT_ANSWER_OKAY)
#define BITDHT_CONNECT_ERROR_MASK_TYPE 0x0000ffff
#define BITDHT_CONNECT_ERROR_MASK_SOURCE 0x00ff0000
#define BITDHT_CONNECT_ERROR_MASK_CRMOVE 0xff000000
#define BITDHT_CONNECT_ERROR_SOURCE_START 0x00010000
#define BITDHT_CONNECT_ERROR_SOURCE_MID 0x00020000
#define BITDHT_CONNECT_ERROR_SOURCE_END 0x00040000
#define BITDHT_CONNECT_ERROR_SOURCE_OTHER 0x00080000
#define BITDHT_CONNECT_ERROR_CRMOVE_FATAL 0x01000000
#define BITDHT_CONNECT_ERROR_CRMOVE_NOMOREIDS 0x02000000
#define BITDHT_CONNECT_ERROR_CRMOVE_NEXTID 0x04000000
#define BITDHT_CONNECT_ERROR_CRMOVE_PAUSED 0x08000000
// ERROR CODES.
#define BITDHT_CONNECT_ERROR_GENERIC 0x00000001
#define BITDHT_CONNECT_ERROR_PROTOCOL 0x00000002
#define BITDHT_CONNECT_ERROR_TIMEOUT 0x00000003
#define BITDHT_CONNECT_ERROR_TEMPUNAVAIL 0x00000004 // Haven't got ext address yet.
#define BITDHT_CONNECT_ERROR_NOADDRESS 0x00000005 // Can't find the peer in tables.
#define BITDHT_CONNECT_ERROR_UNREACHABLE 0x00000006 // Symmetric NAT
#define BITDHT_CONNECT_ERROR_UNSUPPORTED 0x00000007
#define BITDHT_CONNECT_ERROR_OVERLOADED 0x00000008
#define BITDHT_CONNECT_ERROR_AUTH_DENIED 0x00000009
#define BITDHT_CONNECT_ERROR_DUPLICATE 0x0000000a
// These are slightly special ones used for CB_REQUEST
#define BITDHT_CONNECT_ERROR_TOOMANYRETRY 0x0000000b
#define BITDHT_CONNECT_ERROR_OUTOFPROXY 0x0000000c
#define BITDHT_CONNECT_ERROR_USER 0x0000000d
/* Definitions of bdSpace Peer and Bucket are publically available,
* so we can expose the bucket entries for the gui.
*/
class bdPeer
{
public:
bdPeer():mPeerFlags(0), mLastSendTime(0), mLastRecvTime(0), mFoundTime(0), mExtraFlags(0) { return; }
bdId mPeerId;
uint32_t mPeerFlags;
time_t mLastSendTime;
time_t mLastRecvTime;
time_t mFoundTime; /* time stamp that peer was found */
uint32_t mExtraFlags;
};
class bdBucket
{
public:
bdBucket();
/* list so we can queue properly */
std::list<bdPeer> entries;
};
class bdQueryStatus
{
public:
uint32_t mStatus;
uint32_t mQFlags;
std::list<bdId> mResults;
};
class bdQuerySummary
{
public:
bdNodeId mId;
bdMetric mLimit;
uint32_t mState;
time_t mQueryTS;
uint32_t mQueryFlags;
int32_t mSearchTime;
int32_t mQueryIdlePeerRetryPeriod; // seconds between retries.
// closest peers
std::multimap<bdMetric, bdPeer> mClosest;
std::multimap<bdMetric, bdPeer> mPotentialPeers;
std::list<bdPeer> mProxiesUnknown;
std::list<bdPeer> mProxiesFlagged;
};
/* Status options */
@ -140,10 +269,25 @@ virtual void bdPrintNodeId(std::ostream &out, const bdNodeId *a) = 0;
#define BITDHT_QUERY_SUCCESS 6
/* Query Flags */
#define BITDHT_QFLAGS_NONE 0
#define BITDHT_QFLAGS_DISGUISE 1
#define BITDHT_QFLAGS_DO_IDLE 2
#define BITDHT_QFLAGS_INTERNAL 4 // means it runs through startup.
#define BITDHT_QFLAGS_NONE 0x0000
#define BITDHT_QFLAGS_DISGUISE 0x0001
#define BITDHT_QFLAGS_DO_IDLE 0x0002
#define BITDHT_QFLAGS_INTERNAL 0x0004 // means it runs through startup.
#define BITDHT_QFLAGS_QUICK 0x0008 // ONE Request per peer.
#define BITDHT_QFLAGS_UPDATES 0x0010 // Do regular updates.
/* Connect Callback Flags */
#define BITDHT_CONNECT_CB_AUTH 1
#define BITDHT_CONNECT_CB_PENDING 2
#define BITDHT_CONNECT_CB_START 3
#define BITDHT_CONNECT_CB_PROXY 4
#define BITDHT_CONNECT_CB_FAILED 5
#define BITDHT_CONNECT_CB_REQUEST 6
#define BD_PROXY_CONNECTION_UNKNOWN_POINT 0
#define BD_PROXY_CONNECTION_START_POINT 1
#define BD_PROXY_CONNECTION_MID_POINT 2
#define BD_PROXY_CONNECTION_END_POINT 3
class BitDhtCallback
{
@ -156,6 +300,11 @@ virtual int dhtNodeCallback(const bdId * /*id*/, uint32_t /*peerflags*/) { ret
// must be implemented.
virtual int dhtPeerCallback(const bdId *id, uint32_t status) = 0;
virtual int dhtValueCallback(const bdNodeId *id, std::string key, uint32_t status) = 0;
// connection callback. Not required for basic behaviour, but forced for initial development.
virtual int dhtConnectCallback(const bdId *srcId, const bdId *proxyId, const bdId *destId,
uint32_t mode, uint32_t point, uint32_t cbtype, uint32_t errcode) = 0; /* { return 0; } */
};
@ -168,6 +317,12 @@ virtual void addFindNode(bdNodeId *id, uint32_t mode) = 0;
virtual void removeFindNode(bdNodeId *id) = 0;
virtual void findDhtValue(bdNodeId *id, std::string key, uint32_t mode) = 0;
/***** Connections Requests *****/
virtual void ConnectionRequest(struct sockaddr_in *laddr, bdNodeId *target, uint32_t mode, uint32_t start) = 0;
virtual void ConnectionAuth(bdId *srcId, bdId *proxyId, bdId *destId, uint32_t mode, uint32_t loc, uint32_t answer) = 0;
virtual void ConnectionOptions(uint32_t allowedModes, uint32_t flags) = 0;
/***** Add / Remove Callback Clients *****/
virtual void addCallback(BitDhtCallback *cb) = 0;
virtual void removeCallback(BitDhtCallback *cb) = 0;
@ -175,6 +330,10 @@ virtual void removeCallback(BitDhtCallback *cb) = 0;
/***** Get Results Details *****/
virtual int getDhtPeerAddress(const bdNodeId *id, struct sockaddr_in &from) = 0;
virtual int getDhtValue(const bdNodeId *id, std::string key, std::string &value) = 0;
virtual int getDhtBucket(const int idx, bdBucket &bucket) = 0;
virtual int getDhtQueries(std::map<bdNodeId, bdQueryStatus> &queries) = 0;
virtual int getDhtQueryStatus(const bdNodeId *id, bdQuerySummary &query) = 0;
/* stats and Dht state */
virtual int startDht() = 0;
@ -184,5 +343,13 @@ virtual uint32_t statsNetworkSize() = 0;
virtual uint32_t statsBDVersionSize() = 0; /* same version as us! */
};
// general helper functions for decoding error messages.
std::string decodeConnectionError(uint32_t errcode);
std::string decodeConnectionErrorCRMove(uint32_t errcode);
std::string decodeConnectionErrorSource(uint32_t errcode);
std::string decodeConnectionErrorType(uint32_t errcode);
#endif

View File

@ -42,18 +42,27 @@
#include "bitdht/bdmanager.h"
#include "bitdht/bdmsgs.h"
#include "bitdht/bencode.h"
#include "bitdht/bdquerymgr.h"
#include <algorithm>
#include <sstream>
#include <iomanip>
#include "util/bdnet.h"
#include "util/bdrandom.h"
/***
* #define DEBUG_MGR 1
* #define DEBUG_MGR_PKT 1
***/
//#define DEBUG_MGR 1
//#define LOCAL_NET_FLAG (BITDHT_PEER_STATUS_DHT_APPL)
#define LOCAL_NET_FLAG (BITDHT_PEER_STATUS_DHT_ENGINE)
// This is eventually what we want.
//#define LOCAL_NET_FLAG (BITDHT_PEER_STATUS_DHT_ENGINE_VERSION)
bdNodeManager::bdNodeManager(bdNodeId *id, std::string dhtVersion, std::string bootfile, bdDhtFunctions *fns)
:bdNode(id, dhtVersion, bootfile, fns)
@ -65,7 +74,6 @@ bdNodeManager::bdNodeManager(bdNodeId *id, std::string dhtVersion, std::string b
mNetworkSize = 0;
mBdNetworkSize = 0;
/* setup a query for self */
#ifdef DEBUG_MGR
std::cerr << "bdNodeManager::bdNodeManager() ID: ";
mFns->bdPrintNodeId(std::cerr, id);
@ -105,6 +113,10 @@ int bdNodeManager::startDht()
mMode = BITDHT_MGR_STATE_STARTUP;
mModeTS = now;
mStartTS = now;
mSearchingDone = false;
mSearchTS = now;
return 1;
}
@ -183,7 +195,7 @@ void bdNodeManager::startQueries()
it->second.mStatus = BITDHT_QUERY_QUERYING;
uint32_t qflags = it->second.mQFlags | BITDHT_QFLAGS_DISGUISE;
addQuery(&(it->first), qflags);
mQueryMgr->addQuery(&(it->first), qflags);
// add all queries at the same time!
//return;
@ -208,7 +220,7 @@ void bdNodeManager::removeFindNode(bdNodeId *id)
}
/* cleanup any actions */
clearQuery(&(it->first));
mQueryMgr->clearQuery(&(it->first));
//clearPing(&(it->first));
/* remove from map */
@ -243,7 +255,7 @@ void bdNodeManager::iteration()
#endif
bdNodeId id;
getOwnId(&id);
addQuery(&id, BITDHT_QFLAGS_DO_IDLE | BITDHT_QFLAGS_DISGUISE);
mQueryMgr->addQuery(&id, BITDHT_QFLAGS_DO_IDLE | BITDHT_QFLAGS_DISGUISE);
mMode = BITDHT_MGR_STATE_FINDSELF;
mModeTS = now;
@ -256,9 +268,11 @@ void bdNodeManager::iteration()
* if, after 60 secs, we haven't reached MIN_OP_SPACE_SIZE, restart....
*/
#define MAX_FINDSELF_TIME 60
#define TRANSITION_OP_SPACE_SIZE 100 /* 1 query / sec, should take 12-15 secs */
#define MIN_OP_SPACE_SIZE 20
//#define MAX_FINDSELF_TIME 60
//#define MIN_OP_SPACE_SIZE 20
#define MAX_FINDSELF_TIME 10
#define MIN_OP_SPACE_SIZE 2 // for testing. self + oneother.
{
uint32_t nodeSpaceSize = mNodeSpace.calcSpaceSize();
@ -277,7 +291,7 @@ void bdNodeManager::iteration()
if (modeAge > MAX_FINDSELF_TIME)
{
if (nodeSpaceSize > MIN_OP_SPACE_SIZE)
if (nodeSpaceSize >= MIN_OP_SPACE_SIZE)
{
mMode = BITDHT_MGR_STATE_REFRESH;
mModeTS = now;
@ -337,13 +351,42 @@ void bdNodeManager::iteration()
/* run a random search for ourselves, from own App DHT peer */
QueryRandomLocalNet();
#define SEARCH_MAX_SIZE 10
if (mBdNetworkSize < SEARCH_MAX_SIZE)
{
std::cerr << "Local Netsize: " << mBdNetworkSize << " to small...searching";
std::cerr << std::endl;
/* if the network size is very small */
SearchForLocalNet();
mSearchingDone = false;
}
else
{
if (!mSearchingDone)
{
mSearchingDone = true;
mSearchTS = now;
std::cerr << "Completed LocalNet Search in : " << mSearchTS-mStartTS;
std::cerr << std::endl;
}
}
#ifdef DEBUG_MGR
std::cerr << "bdNodeManager::iteration(): REFRESH ";
std::cerr << std::endl;
#endif
status();
status(); /* calculates mNetworkSize */
mAccount.printStats(std::cerr);
/* Finally, Fail, and restart if we lose all peers */
if (mNetworkSize < MIN_OP_SPACE_SIZE)
{
mMode = BITDHT_MGR_STATE_FAILED;
mModeTS = now;
}
}
break;
@ -385,37 +428,113 @@ void bdNodeManager::iteration()
/* NB: This is a bit of a hack, the code is duplicated from bdnode & bdquery.
* should use fn calls into their functions for good generality
*/
void bdNodeManager::QueryRandomLocalNet()
#define RANDOM_SEARCH_FRAC (0.1)
int bdNodeManager::QueryRandomLocalNet()
{
bdId id;
bdNodeId targetNodeId;
uint32_t withFlag = BITDHT_PEER_STATUS_DHT_APPL;
uint32_t withFlag = LOCAL_NET_FLAG;
if (mNodeSpace.findRandomPeerWithFlag(id, withFlag))
{
/* calculate mid point */
mFns->bdRandomMidId(&mOwnId, &(id.id), &targetNodeId);
/* if we've got a very small network size... then ask them about a random peer.
* (so we get there 159/158 boxes!
*/
bool isRandom = false;
if ((mBdNetworkSize < SEARCH_MAX_SIZE) || (RANDOM_SEARCH_FRAC > bdRandom::random_f32()))
{
bdStdRandomNodeId(&targetNodeId);
isRandom = true;
}
else
{
/* calculate mid point */
mFns->bdRandomMidId(&mOwnId, &(id.id), &targetNodeId);
}
/* do standard find_peer message */
bdToken transId;
genNewTransId(&transId);
msgout_find_node(&id, &transId, &targetNodeId);
mQueryMgr->addWorthyPeerSource(&id); /* Tell BitDHT that we really want to ping their peers */
send_query(&id, &targetNodeId);
//#ifdef DEBUG_NODE_MSGS
std::cerr << "bdNodeManager::QueryRandomLocalNet() Querying : ";
mFns->bdPrintId(std::cerr, &id);
std::cerr << " searching for : ";
mFns->bdPrintNodeId(std::cerr, &targetNodeId);
std::cerr << std::endl;
//#endif
if (isRandom)
{
std::cerr << "bdNodeManager::QueryRandomLocalNet() Search is Random!";
std::cerr << std::endl;
}
#ifdef DEBUG_NODE_MSGS
#endif
return 1;
}
else
{
//#ifdef DEBUG_NODE_MSGS
#ifdef DEBUG_NODE_MSGS
#endif
std::cerr << "bdNodeManager::QueryRandomLocalNet() No LocalNet Peer Found";
std::cerr << std::endl;
//#endif
}
return 0;
}
void bdNodeManager::SearchForLocalNet()
{
#ifdef DEBUG_MGR
#endif
std::cerr << "bdNodeManager::SearchForLocalNet()";
std::cerr << std::endl;
/* Check how many "Search Queries" we've got going. */
/* check queries */
std::map<bdNodeId, bdQueryStatus>::iterator it;
std::map<bdNodeId, bdQueryStatus> queryStatus;
mQueryMgr->QueryStatus(queryStatus);
int numSearchQueries = 0;
for(it = queryStatus.begin(); it != queryStatus.end(); it++)
{
if (it->second.mQFlags & BITDHT_QFLAGS_INTERNAL)
{
std::cerr << "bdNodeManager::SearchForLocalNet() Existing Internal Search: ";
mFns->bdPrintNodeId(std::cerr, &(it->first));
std::cerr << std::endl;
numSearchQueries++;
}
}
#define MAX_SEARCH_QUERIES 5
for(;numSearchQueries < MAX_SEARCH_QUERIES; numSearchQueries++)
{
/* install a new query */
bdNodeId targetNodeId;
bdStdRandomNodeId(&targetNodeId);
uint32_t qflags = BITDHT_QFLAGS_INTERNAL | BITDHT_QFLAGS_DISGUISE;
mQueryMgr->addQuery(&targetNodeId, qflags);
#ifdef DEBUG_NODE_MSGS
#endif
std::cerr << "bdNodeManager::SearchForLocalNet() Adding New Internal Search: ";
mFns->bdPrintNodeId(std::cerr, &(targetNodeId));
std::cerr << std::endl;
}
}
@ -432,7 +551,7 @@ int bdNodeManager::status()
/* update the network numbers */
mNetworkSize = mNodeSpace.calcNetworkSize();
mBdNetworkSize = mNodeSpace.calcNetworkSizeWithFlag(
BITDHT_PEER_STATUS_DHT_APPL);
LOCAL_NET_FLAG);
#ifdef DEBUG_MGR
std::cerr << "BitDHT NetworkSize: " << mNetworkSize << std::endl;
@ -455,7 +574,7 @@ int bdNodeManager::checkStatus()
std::map<bdNodeId, bdQueryStatus> queryStatus;
QueryStatus(queryStatus);
mQueryMgr->QueryStatus(queryStatus);
for(it = queryStatus.begin(); it != queryStatus.end(); it++)
{
@ -552,7 +671,7 @@ int bdNodeManager::checkStatus()
mFns->bdPrintNodeId(std::cerr, &(it->first));
std::cerr << std::endl;
#endif
clearQuery(&(it->first));
mQueryMgr->clearQuery(&(it->first));
}
/* FIND in activePeers */
@ -794,6 +913,21 @@ int bdNodeManager::getDhtValue(const bdNodeId *id, std::string key, std::string
return 1;
}
int bdNodeManager::getDhtBucket(const int idx, bdBucket &bucket)
{
return mNodeSpace.getDhtBucket(idx, bucket);
}
int bdNodeManager::getDhtQueries(std::map<bdNodeId, bdQueryStatus> &queries)
{
mQueryMgr->QueryStatus(queries);
return 1;
}
int bdNodeManager::getDhtQueryStatus(const bdNodeId *id, bdQuerySummary &query)
{
return mQueryMgr->QuerySummary(id, query);
}
/***** Add / Remove Callback Clients *****/
@ -870,7 +1004,7 @@ void bdNodeManager::doPeerCallback(const bdId *id, uint32_t status)
#ifdef DEBUG_MGR
std::cerr << "bdNodeManager::doPeerCallback()";
mFns->bdPrintNodeId(std::cerr, id);
mFns->bdPrintId(std::cerr, id);
std::cerr << "status: " << status;
std::cerr << std::endl;
#endif
@ -900,6 +1034,8 @@ void bdNodeManager::doValueCallback(const bdNodeId *id, std::string key, uint32_
return;
}
/******************* Internals *************************/
int bdNodeManager::isBitDhtPacket(char *data, int size, struct sockaddr_in & from)
@ -1006,12 +1142,12 @@ bdDebugCallback::~bdDebugCallback()
{
}
int bdDebugCallback::dhtPeerCallback(const bdNodeId *id, uint32_t status)
int bdDebugCallback::dhtPeerCallback(const bdId *id, uint32_t status)
{
#ifdef DEBUG_MGR
std::cerr << "bdDebugCallback::dhtPeerCallback() Id: ";
#endif
bdStdPrintNodeId(std::cerr, id);
bdStdPrintId(std::cerr, id);
#ifdef DEBUG_MGR
std::cerr << " status: " << std::hex << status << std::dec << std::endl;
#endif
@ -1033,3 +1169,77 @@ int bdDebugCallback::dhtValueCallback(const bdNodeId *id, std::string key, uint3
}
/******************* Connection Stuff ********************/
void bdNodeManager::ConnectionRequest(struct sockaddr_in *laddr, bdNodeId *target, uint32_t mode, uint32_t start)
{
std::cerr << "bdNodeManager::ConnectionRequest()";
std::cerr << std::endl;
mConnMgr->requestConnection(laddr, target, mode, start);
}
void bdNodeManager::ConnectionAuth(bdId *srcId, bdId *proxyId, bdId *destId, uint32_t mode, uint32_t loc, uint32_t answer)
{
std::cerr << "bdNodeManager::ConnectionAuth()";
std::cerr << std::endl;
if (answer == BITDHT_CONNECT_ANSWER_OKAY)
{
mConnMgr->AuthConnectionOk(srcId, proxyId, destId, mode, loc);
}
else
{
mConnMgr->AuthConnectionNo(srcId, proxyId, destId, mode, loc, answer);
}
}
void bdNodeManager::ConnectionOptions(uint32_t allowedModes, uint32_t flags)
{
mConnMgr->setConnectionOptions(allowedModes, flags);
}
/***** Connections Requests *****/
// Overloaded from bdnode for external node callback.
void bdNodeManager::callbackConnect(bdId *srcId, bdId *proxyId, bdId *destId, int mode, int point, int cbtype, int errcode)
{
std::cerr << "bdNodeManager::callbackConnect()";
std::cerr << std::endl;
#ifdef DEBUG_MGR
#endif
/* search list */
std::list<BitDhtCallback *>::iterator it;
for(it = mCallbacks.begin(); it != mCallbacks.end(); it++)
{
(*it)->dhtConnectCallback(srcId, proxyId, destId, mode, point, cbtype, errcode);
}
return;
}
int bdDebugCallback::dhtConnectCallback(const bdId *srcId, const bdId *proxyId, const bdId *destId,
uint32_t mode, uint32_t point, uint32_t cbtype, uint32_t errcode)
{
#ifdef DEBUG_MGR
std::cerr << "bdDebugCallback::dhtConnectCallback() Type: " << cbtype;
std::cerr << " errCode: " << errcode;
std::cerr << " srcId: ";
bdStdPrintId(std::cerr, srcId);
std::cerr << " proxyId: ";
bdStdPrintId(std::cerr, proxyId);
std::cerr << " destId: ";
bdStdPrintId(std::cerr, destId);
std::cerr << " mode: " << mode;
std::cerr << " point: " << point << std::endl;
#endif
return 1;
}

View File

@ -113,6 +113,17 @@ virtual void removeCallback(BitDhtCallback *cb);
/***** Get Results Details *****/
virtual int getDhtPeerAddress(const bdNodeId *id, struct sockaddr_in &from);
virtual int getDhtValue(const bdNodeId *id, std::string key, std::string &value);
virtual int getDhtBucket(const int idx, bdBucket &bucket);
virtual int getDhtQueries(std::map<bdNodeId, bdQueryStatus> &queries);
virtual int getDhtQueryStatus(const bdNodeId *id, bdQuerySummary &query);
/***** Connection Interface ****/
virtual void ConnectionRequest(struct sockaddr_in *laddr, bdNodeId *target, uint32_t mode, uint32_t start);
virtual void ConnectionAuth(bdId *srcId, bdId *proxyId, bdId *destId,
uint32_t mode, uint32_t loc, uint32_t answer);
virtual void ConnectionOptions(uint32_t allowedModes, uint32_t flags);
/* stats and Dht state */
virtual int startDht();
@ -124,7 +135,9 @@ virtual uint32_t statsBDVersionSize(); /* same version as us! */
// Overloaded from bdnode for external node callback.
virtual void addPeer(const bdId *id, uint32_t peerflags);
// Overloaded from bdnode for external node callback.
virtual void callbackConnect(bdId *srcId, bdId *proxyId, bdId *destId,
int mode, int point, int cbtype, int errcode);
int isBitDhtPacket(char *data, int size, struct sockaddr_in &from);
private:
@ -139,7 +152,9 @@ int checkStatus();
int checkPingStatus();
int SearchOutOfDate();
void startQueries();
void QueryRandomLocalNet();
int QueryRandomLocalNet();
void SearchForLocalNet();
std::map<bdNodeId, bdQueryPeer> mActivePeers;
std::list<BitDhtCallback *> mCallbacks;
@ -147,6 +162,10 @@ void QueryRandomLocalNet();
uint32_t mMode;
time_t mModeTS;
time_t mStartTS;
time_t mSearchTS;
bool mSearchingDone;
bdDhtFunctions *mFns;
uint32_t mNetworkSize;
@ -162,8 +181,10 @@ class bdDebugCallback: public BitDhtCallback
{
public:
~bdDebugCallback();
virtual int dhtPeerCallback(const bdNodeId *id, uint32_t status);
virtual int dhtPeerCallback(const bdId *id, uint32_t status);
virtual int dhtValueCallback(const bdNodeId *id, std::string key, uint32_t status);
virtual int dhtConnectCallback(const bdId *srcId, const bdId *proxyId, const bdId *destId,
uint32_t mode, uint32_t point, uint32_t cbtype, uint32_t errcode);
};

View File

@ -576,6 +576,13 @@ uint32_t beMsgType(be_node *n)
#endif
return BITDHT_MSG_TYPE_POST_HASH;
}
else if (beMsgMatchString(query, "connect", 7))
{
#ifdef DEBUG_MSG_TYPE
std::cerr << "bsMsgType() QUERY:connect MSG TYPE" << std::endl;
#endif
return BITDHT_MSG_TYPE_CONNECT;
}
#ifdef DEBUG_MSG_TYPE
std::cerr << "bsMsgType() QUERY:UNKNOWN MSG TYPE, dumping dict" << std::endl;
/* dump answer */
@ -735,6 +742,28 @@ int beMsgGetListBdIds(be_node *n, std::list<bdId> &nodes)
return 1;
}
int beMsgGetBdId(be_node *n, bdId &id)
{
/* extract the string pointer, and size */
/* split into parts */
if (n->type != BE_STR)
{
return 0;
}
int len = be_str_len(n);
if (len < BITDHT_COMPACTNODEID_LEN)
{
return 0;
}
if (decodeCompactNodeId(&id, n->val.s, BITDHT_COMPACTNODEID_LEN))
{
return 1;
}
return 0;
}
std::string encodeCompactNodeId(bdId *id)
{
std::string enc;
@ -831,3 +860,175 @@ int beMsgGetUInt32(be_node *n, uint32_t *port)
}
/********************************************************************************************************************
* CONNECTION EXTENSIONS
*
*/
/*
ping Query = {"t":"aa", "y":"q", "q":"ping", "a":{"id":"abcdefghij0123456789"}}
bencoded = d1:ad2:id20:abcdefghij0123456789e1:q4:ping1:t2:aa1:y1:qe
*/
/*
Response = {"t":"aa", "y":"r", "r": {"id":"mnopqrstuvwxyz123456"}}
bencoded = d1:rd2:id20:mnopqrstuvwxyz123456e1:t2:aa1:y1:re
*/
/*
find_node Query = {"t":"aa", "y":"q", "q":"find_node", "a": {"id":"abcdefghij0123456789", "target":"mnopqrstuvwxyz123456"}}
bencoded = d1:ad2:id20:abcdefghij01234567896:target20:mnopqrstuvwxyz123456e1:q9:find_node1:t2:aa1:y1:qe
*/
#if 0
int bitdht_find_node_msg(bdToken *tid, bdNodeId *id, bdNodeId *target,
char *msg, int avail)
{
#ifdef DEBUG_MSGS
fprintf(stderr, "bitdht_find_node_msg()\n");
#endif
be_node *dict = be_create_dict();
be_node *iddict = be_create_dict();
be_node *idnode = be_create_str_wlen((char *) id->data, BITDHT_KEY_LEN);
be_node *targetnode = be_create_str_wlen((char *) target->data, BITDHT_KEY_LEN);
be_node *tidnode = be_create_str_wlen((char *) tid->data, tid->len);
be_node *yqrnode = be_create_str("q");
be_node *findnode = be_create_str("find_node");
be_add_keypair(iddict, "id", idnode);
be_add_keypair(iddict, "target", targetnode);
be_add_keypair(dict, "a", iddict);
be_add_keypair(dict, "t", tidnode);
be_add_keypair(dict, "y", yqrnode);
be_add_keypair(dict, "q", findnode);
#ifdef DEBUG_MSG_DUMP
/* dump answer */
be_dump(dict);
#endif
int blen = be_encode(dict, msg, avail);
be_free(dict);
return blen;
}
#endif
/****
* Thinking about the format of this message.
* id: ownId is stanard in all other messages, so should keep the same!.
* src:
* target:
* mode: d,p or r
*
* A -> B -> C
* direct: A ------> B
* ---> id:A src:A target:B mode:d
* <--- id:B src:A target:B mode:d a:OK
*
* proxy: A ------> B -------> C
* A->B id:A src:A target:C mode:p q
*
* a)
* B->A id:B src:A target:C mode:p r:NOK
*
* b)
* B->C id:B src:A target:C mode:p q
* C->B id:C src:A target:C mode:p r:NOK
* B->A id:B src:A target:C mode:p r:NOK
*
* c)
* B->C id:B src:A target:C mode:p q
* C->B id:C src:A target:C mode:p r:OK
* B->A id:B src:A target:C mode:p r:OK
* connect happens.
* Dropped packets will affect this!
*
*
* REQUIRED BITS FOR A MESSAGE
* 1) DIRECT
* -> REQUEST, ownId, targetId, transId, mode.
* -> RESPONSE, ownId, targetId, transId, mode, answer.
*
* 2) PROXY
*/
int bitdht_connect_genmsg(bdToken *tid, bdNodeId *id, int msgtype, bdId *src, bdId *dest, int mode, int status, char *msg, int avail)
{
#ifdef DEBUG_MSGS
fprintf(stderr, "bitdht_connect_genmsg()\n");
#endif
be_node *dict = be_create_dict();
be_node *iddict = be_create_dict();
be_node *idnode = be_create_str_wlen((char *) id->data, BITDHT_KEY_LEN);
std::string srcEnc = encodeCompactNodeId(src);
std::string destEnc = encodeCompactNodeId(dest);
be_node *srcnode = be_create_str_wlen(srcEnc.c_str(), BITDHT_COMPACTNODEID_LEN);
be_node *destnode = be_create_str_wlen(destEnc.c_str(), BITDHT_COMPACTNODEID_LEN);
be_node *typenode = be_create_int(msgtype);
be_node *statusnode = be_create_int(status);
be_node *modenode = be_create_int(mode);
be_node *tidnode = be_create_str_wlen((char *) tid->data, tid->len);
be_node *yqrnode = be_create_str("q");
be_node *cmdnode = be_create_str("connect");
#if 0
be_node *modenode = NULL;
switch(mode)
{
case BITDHT_CONNECT_MODE_DIRECT:
modenode = be_create_str("d");
break;
case BITDHT_CONNECT_MODE_PROXY:
modenode = be_create_str("p");
break;
case BITDHT_CONNECT_MODE_RELAY:
modenode = be_create_str("r");
break;
default:
modenode = be_create_str("u");
break;
}
#endif
be_add_keypair(iddict, "id", idnode);
be_add_keypair(iddict, "src", srcnode);
be_add_keypair(iddict, "dest", destnode);
be_add_keypair(iddict, "mode", modenode);
be_add_keypair(iddict, "status", statusnode);
be_add_keypair(iddict, "type", typenode);
be_add_keypair(dict, "a", iddict);
be_add_keypair(dict, "t", tidnode);
be_add_keypair(dict, "y", yqrnode);
be_add_keypair(dict, "q", cmdnode);
#ifdef DEBUG_MSG_DUMP
/* dump answer */
be_dump(dict);
#endif
int blen = be_encode(dict, msg, avail);
be_free(dict);
return blen;
}

View File

@ -45,6 +45,20 @@
#define BITDHT_MSG_TYPE_POST_HASH 8
#define BITDHT_MSG_TYPE_REPLY_POST 9
// THESE ARE EXTENSIONS
#define BITDHT_MSG_TYPE_CONNECT 20
// CONNECTIONS.
#define BITDHT_MSG_TYPE_CONNECT_REQUEST 101
#define BITDHT_MSG_TYPE_CONNECT_REPLY 102
#define BITDHT_MSG_TYPE_CONNECT_START 103
#define BITDHT_MSG_TYPE_CONNECT_ACK 104
// FANCY HASHES.
#define BITDHT_COMPACTNODEID_LEN 26
#define BITDHT_COMPACTPEERID_LEN 6
@ -78,6 +92,10 @@ int bitdht_reply_announce_msg(bdToken *tid, bdNodeId *id,
char *msg, int avail);
// Extensions.
int bitdht_connect_genmsg(bdToken *tid, bdNodeId *id, int msgtype, bdId *src, bdId *dest, int mode, int status, char *msg, int avail);
//int response_peers_message()
//int response_closestnodes_message()
@ -94,6 +112,7 @@ be_node *makeCompactNodeIdString(std::list<bdId> &nodes);
int beMsgGetToken(be_node *n, bdToken &token);
int beMsgGetNodeId(be_node *n, bdNodeId &nodeId);
int beMsgGetBdId(be_node *n, bdId &id);
int beMsgGetListBdIds(be_node *n, std::list<bdId> &nodes);
int beMsgGetListStrings(be_node *n, std::list<std::string> &values);

File diff suppressed because it is too large Load Diff

View File

@ -34,6 +34,11 @@
#include "bitdht/bdhash.h"
#include "bitdht/bdhistory.h"
#include "bitdht/bdconnection.h"
#include "bitdht/bdaccount.h"
class bdFilter;
#define BD_QUERY_NEIGHBOURS 1
#define BD_QUERY_HASH 2
@ -90,39 +95,70 @@ class bdNodeNetMsg
};
class bdNode
class bdNodePublisher
{
public:
/* simplified outgoing msg functions (for the managers) */
virtual void send_ping(bdId *id) = 0; /* message out */
virtual void send_query(bdId *id, bdNodeId *targetNodeId) = 0; /* message out */
virtual void send_connect_msg(bdId *id, int msgtype,
bdId *srcAddr, bdId *destAddr, int mode, int status) = 0;
// internal Callback -> normally continues to callbackConnect().
virtual void callbackConnect(bdId *srcId, bdId *proxyId, bdId *destId,
int mode, int point, int cbtype, int errcode) = 0;
};
class bdNode: public bdNodePublisher
{
public:
bdNode(bdNodeId *id, std::string dhtVersion, std::string bootfile,
bdDhtFunctions *fns);
void init(); /* sets up the self referential classes (mQueryMgr & mConnMgr) */
void setNodeOptions(uint32_t optFlags);
/* startup / shutdown node */
void restartNode();
void shutdownNode();
void getOwnId(bdNodeId *id);
// virtual so manager can do callback.
// peer flags defined in bdiface.h
virtual void addPeer(const bdId *id, uint32_t peerflags);
void printState();
void checkPotentialPeer(bdId *id);
void addPotentialPeer(bdId *id);
void addQuery(const bdNodeId *id, uint32_t qflags);
void clearQuery(const bdNodeId *id);
void QueryStatus(std::map<bdNodeId, bdQueryStatus> &statusMap);
void checkPotentialPeer(bdId *id, bdId *src);
void addPotentialPeer(bdId *id, bdId *src);
void iterationOff();
void iteration();
void processRemoteQuery();
void updateStore();
/* simplified outgoing msg functions (for the managers) */
virtual void send_ping(bdId *id); /* message out */
virtual void send_query(bdId *id, bdNodeId *targetNodeId); /* message out */
virtual void send_connect_msg(bdId *id, int msgtype,
bdId *srcAddr, bdId *destAddr, int mode, int status);
/* interaction with outside world */
// This is implemented in bdManager.
// virtual void callbackConnect(bdId *srcId, bdId *proxyId, bdId *destId,
// int mode, int point, int cbtype, int errcode);
/* interaction with outside world (Accessed by controller to deliver us msgs) */
int outgoingMsg(struct sockaddr_in *addr, char *msg, int *len);
void incomingMsg(struct sockaddr_in *addr, char *msg, int len);
// Below is internal Management of incoming / outgoing messages.
private:
/* internal interaction with network */
void sendPkt(char *msg, int len, struct sockaddr_in addr);
void recvPkt(char *msg, int len, struct sockaddr_in addr);
@ -163,6 +199,11 @@ void recvPkt(char *msg, int len, struct sockaddr_in addr);
bdNodeId *info_hash, uint32_t port, bdToken *token);
void msgin_reply_post(bdId *id, bdToken *transId);
void msgout_connect_genmsg(bdId *id, bdToken *transId, int msgtype,
bdId *srcAddr, bdId *destAddr, int mode, int status);
void msgin_connect_genmsg(bdId *id, bdToken *transId, int msgtype,
bdId *srcAddr, bdId *destAddr, int mode, int status);
/* token handling */
@ -175,72 +216,43 @@ void recvPkt(char *msg, int len, struct sockaddr_in addr);
uint32_t checkIncomingMsg(bdId *id, bdToken *transId, uint32_t msgType);
void cleanupTransIdRegister();
void getOwnId(bdNodeId *id);
void doStats();
void printStats(std::ostream &out);
void printQueries();
void resetCounters();
void resetStats();
/********** Variables **********/
private:
/**** Some Variables are Protected to allow inherited classes to use *****/
protected:
bdSpace mNodeSpace;
bdQueryManager *mQueryMgr;
bdConnectManager *mConnMgr;
bdFilter *mFilterPeers;
bdNodeId mOwnId;
bdId mLikelyOwnId; // Try to workout own id address.
bdSpace mNodeSpace;
std::string mDhtVersion;
bdAccount mAccount;
bdStore mStore;
bdDhtFunctions *mFns;
bdHashSpace mHashSpace;
private:
bdStore mStore;
std::string mDhtVersion;
uint32_t mNodeOptionFlags;
bdDhtFunctions *mFns;
bdHashSpace mHashSpace;
bdHistory mHistory; /* for understanding the DHT */
std::list<bdQuery *> mLocalQueries;
std::list<bdRemoteQuery> mRemoteQueries;
std::list<bdId> mPotentialPeers;
std::list<bdNodeNetMsg *> mOutgoingMsgs;
std::list<bdNodeNetMsg *> mIncomingMsgs;
// Statistics.
double mCounterOutOfDatePing;
double mCounterPings;
double mCounterPongs;
double mCounterQueryNode;
double mCounterQueryHash;
double mCounterReplyFindNode;
double mCounterReplyQueryHash;
double mCounterRecvPing;
double mCounterRecvPong;
double mCounterRecvQueryNode;
double mCounterRecvQueryHash;
double mCounterRecvReplyFindNode;
double mCounterRecvReplyQueryHash;
double mLpfOutOfDatePing;
double mLpfPings;
double mLpfPongs;
double mLpfQueryNode;
double mLpfQueryHash;
double mLpfReplyFindNode;
double mLpfReplyQueryHash;
double mLpfRecvPing;
double mLpfRecvPong;
double mLpfRecvQueryNode;
double mLpfRecvQueryHash;
double mLpfRecvReplyFindNode;
double mLpfRecvReplyQueryHash;
};

View File

@ -29,7 +29,8 @@
void bdPrintTransId(std::ostream &out, bdToken *transId)
{
out << transId->data;
//out << transId->data;
bdPrintToken(out, transId);
return;
}

View File

@ -26,6 +26,8 @@
#include "bitdht/bdpeer.h"
#include "util/bdnet.h"
#include "util/bdrandom.h"
#include "bitdht/bdiface.h"
#include <stdlib.h>
#include <stdio.h>
@ -179,137 +181,6 @@ int operator==(const bdId &a, const bdId &b)
}
#if 0
void bdRandomId(bdId *id)
{
bdRandomNodeId(&(id->id));
id->addr.sin_addr.s_addr = rand();
id->addr.sin_port = rand();
return;
}
void bdRandomNodeId(bdNodeId *id)
{
uint32_t *a_data = (uint32_t *) id->data;
for(int i = 0; i < BITDHT_KEY_INTLEN; i++)
{
a_data[i] = rand();
}
return;
}
/* fills in dbNodeId r, with XOR of a and b */
int bdDistance(const bdNodeId *a, const bdNodeId *b, bdMetric *r)
{
uint8_t *a_data = (uint8_t *) a->data;
uint8_t *b_data = (uint8_t *) b->data;
uint8_t *ans = (uint8_t *) r->data;
for(int i = 0; i < BITDHT_KEY_LEN; i++)
{
*(ans++) = *(a_data++) ^ *(b_data++);
}
return 1;
}
void bdRandomMidId(const bdNodeId *target, const bdNodeId *other, bdNodeId *midId)
{
bdMetric dist;
/* get distance between a & c */
bdDistance(target, other, &dist);
/* generate Random Id */
bdRandomNodeId(midId);
/* zero bits of Random Id until under 1/2 of distance
* done in bytes for ease... matches one extra byte than distance = 0
* -> hence wierd order of operations
*/
bool done = false;
for(int i = 0; i < BITDHT_KEY_LEN; i++)
{
midId->data[i] = target->data[i];
if (dist.data[i] != 0)
break;
}
}
std::string bdConvertToPrintable(std::string input)
{
std::ostringstream out;
for(uint32_t i = 0; i < input.length(); i++)
{
/* sensible chars */
if ((input[i] > 31) && (input[i] < 127))
{
out << input[i];
}
else
{
out << "[0x" << std::hex << (uint32_t) input[i] << "]";
out << std::dec;
}
}
return out.str();
}
void bdPrintNodeId(std::ostream &out, const bdNodeId *a)
{
for(int i = 0; i < BITDHT_KEY_LEN; i++)
{
out << std::setw(2) << std::setfill('0') << std::hex << (uint32_t) (a->data)[i];
}
out << std::dec;
return;
}
void bdPrintId(std::ostream &out, const bdId *a)
{
bdPrintNodeId(out, &(a->id));
out << " ip:" << inet_ntoa(a->addr.sin_addr);
out << ":" << ntohs(a->addr.sin_port);
return;
}
/* returns 0-160 depending on bucket */
int bdBucketDistance(const bdNodeId *a, const bdNodeId *b)
{
bdMetric m;
bdDistance(a, b, &m);
return bdBucketDistance(&m);
}
/* returns 0-160 depending on bucket */
int bdBucketDistance(const bdMetric *m)
{
for(int i = 0; i < BITDHT_KEY_BITLEN; i++)
{
int bit = BITDHT_KEY_BITLEN - i - 1;
int byte = i / 8;
int bbit = 7 - (i % 8);
unsigned char comp = (1 << bbit);
#ifdef BITDHT_DEBUG
fprintf(stderr, "bdBucketDistance: bit:%d byte:%d bbit:%d comp:%x, data:%x\n", bit, byte, bbit, comp, m->data[byte]);
#endif
if (comp & m->data[byte])
{
return bit;
}
}
return 0;
}
#endif
bdBucket::bdBucket()
{
@ -321,6 +192,11 @@ bdSpace::bdSpace(bdNodeId *ownId, bdDhtFunctions *fns)
{
/* make some space for data */
buckets.resize(mFns->bdNumBuckets());
mAttachTS = 0;
mAttachedFlags = 0;
mAttachedCount = 0;
return;
}
@ -336,8 +212,17 @@ int bdSpace::clear()
return 1;
}
int bdSpace::setAttachedFlag(uint32_t withflags, int count)
{
mAttachedFlags = withflags;
mAttachedCount = count;
mAttachTS = 0;
return 1;
}
int bdSpace::find_nearest_nodes(const bdNodeId *id, int number, std::list<bdId> /*excluding*/, std::multimap<bdMetric, bdId> &nearest)
int bdSpace::find_nearest_nodes_with_flags(const bdNodeId *id, int number,
std::list<bdId> /* excluding */,
std::multimap<bdMetric, bdId> &nearest, uint32_t with_flags)
{
std::multimap<bdMetric, bdId> closest;
std::multimap<bdMetric, bdId>::iterator mit;
@ -363,16 +248,19 @@ int bdSpace::find_nearest_nodes(const bdNodeId *id, int number, std::list<bdId>
{
for(eit = it->entries.begin(); eit != it->entries.end(); eit++)
{
mFns->bdDistance(id, &(eit->mPeerId.id), &dist);
closest.insert(std::pair<bdMetric, bdId>(dist, eit->mPeerId));
if ((!with_flags) || ((with_flags & eit->mPeerFlags) == with_flags))
{
mFns->bdDistance(id, &(eit->mPeerId.id), &dist);
closest.insert(std::pair<bdMetric, bdId>(dist, eit->mPeerId));
#if 0
std::cerr << "Added NodeId: ";
bdPrintNodeId(std::cerr, &(eit->mPeerId.id));
std::cerr << " Metric: ";
bdPrintNodeId(std::cerr, &(dist));
std::cerr << std::endl;
std::cerr << "Added NodeId: ";
bdPrintNodeId(std::cerr, &(eit->mPeerId.id));
std::cerr << " Metric: ";
bdPrintNodeId(std::cerr, &(dist));
std::cerr << std::endl;
#endif
}
}
}
@ -429,7 +317,124 @@ int bdSpace::find_nearest_nodes(const bdNodeId *id, int number, std::list<bdId>
return 1;
}
int bdSpace::find_nearest_nodes(const bdNodeId *id, int number,
std::multimap<bdMetric, bdId> &nearest)
{
std::list<bdId> excluding;
uint32_t with_flag = 0;
return find_nearest_nodes_with_flags(id, number, excluding, nearest, with_flag);
}
/* This is much cheaper than find nearest... we only look in the one bucket
*/
int bdSpace::find_node(const bdNodeId *id, int number, std::list<bdId> &matchIds, uint32_t with_flags)
{
bdMetric dist;
mFns->bdDistance(id, &(mOwnId), &dist);
int buckno = mFns->bdBucketDistance(&dist);
std::cerr << "bdSpace::find_node(NodeId:";
mFns->bdPrintNodeId(std::cerr, id);
std::cerr << ")";
std::cerr << " Number: " << number;
std::cerr << " Bucket #: " << buckno;
std::cerr << std::endl;
#ifdef DEBUG_BD_SPACE
#endif
bdBucket &buck = buckets[buckno];
std::list<bdPeer>::iterator eit;
int matchCount = 0;
for(eit = buck.entries.begin(); eit != buck.entries.end(); eit++)
{
std::cerr << "bdSpace::find_node() Checking Against Peer: ";
mFns->bdPrintId(std::cerr, &(eit->mPeerId));
std::cerr << " withFlags: " << eit->mPeerFlags;
std::cerr << std::endl;
if ((!with_flags) || ((with_flags & eit->mPeerFlags) == with_flags))
{
if (*id == eit->mPeerId.id)
{
matchIds.push_back(eit->mPeerId);
matchCount++;
std::cerr << "bdSpace::find_node() Found Matching Peer: ";
mFns->bdPrintId(std::cerr, &(eit->mPeerId));
std::cerr << " withFlags: " << eit->mPeerFlags;
std::cerr << std::endl;
}
}
else
{
if (*id == eit->mPeerId.id)
{
//matchIds.push_back(eit->mPeerId);
//matchCount++;
std::cerr << "bdSpace::find_node() Found (WITHOUT FLAGS) Matching Peer: ";
mFns->bdPrintId(std::cerr, &(eit->mPeerId));
std::cerr << " withFlags: " << eit->mPeerFlags;
std::cerr << std::endl;
}
}
}
std::cerr << "bdSpace::find_node() Found " << matchCount << " Matching Peers";
std::cerr << std::endl << std::endl;
#ifdef DEBUG_BD_SPACE
#endif
return matchCount;
}
/* even cheaper again... no big lists */
int bdSpace::find_exactnode(const bdId *id, bdPeer &peer)
{
bdMetric dist;
mFns->bdDistance(&(id->id), &(mOwnId), &dist);
int buckno = mFns->bdBucketDistance(&dist);
std::cerr << "bdSpace::find_exactnode(Id:";
mFns->bdPrintId(std::cerr, id);
std::cerr << ")";
std::cerr << " Bucket #: " << buckno;
std::cerr << std::endl;
#ifdef DEBUG_BD_SPACE
#endif
bdBucket &buck = buckets[buckno];
std::list<bdPeer>::iterator eit;
int matchCount = 0;
for(eit = buck.entries.begin(); eit != buck.entries.end(); eit++)
{
if (*id == eit->mPeerId)
{
std::cerr << "bdSpace::find_exactnode() Found Matching Peer: ";
mFns->bdPrintId(std::cerr, &(eit->mPeerId));
std::cerr << " withFlags: " << eit->mPeerFlags;
std::cerr << std::endl;
peer = (*eit);
return 1;
}
}
std::cerr << "bdSpace::find_exactnode() ERROR Failed to find Matching Peer: ";
std::cerr << std::endl;
return 0;
}
#if 0
int bdSpace::out_of_date_peer(bdId &id)
{
/*
@ -446,20 +451,229 @@ int bdSpace::out_of_date_peer(bdId &id)
/* iterate through the buckets, and sort by distance */
for(it = buckets.begin(); it != buckets.end(); it++)
{
for(eit = it->entries.begin(); eit != it->entries.end(); eit++)
for(eit = it->entries.begin(); eit != it->entries.end(); )
{
/* timeout on last send time! */
if (ts - eit->mLastSendTime > BITDHT_MAX_SEND_PERIOD )
{
id = eit->mPeerId;
eit->mLastSendTime = ts;
return 1;
/* We want to ping a peer iff:
* 1) They are out-of-date: mLastRecvTime is too old.
* 2) They don't have 0x0001 flag (we haven't received a PONG) and never sent.
*/
if ((ts - eit->mLastRecvTime > BITDHT_MAX_SEND_PERIOD ) ||
!(eit->mPeerFlags & BITDHT_PEER_STATUS_RECV_PONG))
{
id = eit->mPeerId;
eit->mLastSendTime = ts;
return 1;
}
}
/* we also want to remove very old entries (should it happen here?)
* which are not pushed out by newer entries (will happen in for closer buckets)
*/
bool discard = false;
/* discard very old entries */
if (ts - eit->mLastRecvTime > BITDHT_DISCARD_PERIOD)
{
discard = true;
}
/* discard peers which have not responded to anything (ie have no flags set) */
if ((ts - eit->mFoundTime > BITDHT_MAX_RESPONSE_PERIOD ) &&
(eit->mPeerFlags == 0))
{
discard = true;
}
/* INCREMENT */
if (discard)
{
eit = it->entries.erase(eit);
}
else
{
eit++;
}
}
}
return 0;
}
#endif
#define BITDHT_ATTACHED_SEND_PERIOD 17
int bdSpace::scanOutOfDatePeers(std::list<bdId> &peerIds)
{
/*
*
*/
bool doAttached = (mAttachedCount > 0);
int attachedCount = 0;
std::map<bdMetric, bdId> closest;
std::map<bdMetric, bdId>::iterator mit;
std::vector<bdBucket>::iterator it;
std::list<bdPeer>::iterator eit;
time_t ts = time(NULL);
/* iterate through the buckets, and sort by distance */
for(it = buckets.begin(); it != buckets.end(); it++)
{
for(eit = it->entries.begin(); eit != it->entries.end(); )
{
bool added = false;
if (doAttached)
{
if (eit->mExtraFlags & BITDHT_PEER_EXFLAG_ATTACHED)
{
/* add to send list, if we haven't pinged recently */
if ((ts - eit->mLastSendTime > BITDHT_ATTACHED_SEND_PERIOD ) &&
(ts - eit->mLastRecvTime > BITDHT_ATTACHED_SEND_PERIOD ))
{
peerIds.push_back(eit->mPeerId);
eit->mLastSendTime = ts;
added = true;
}
attachedCount++;
}
}
/* timeout on last send time! */
if ((!added) && (ts - eit->mLastSendTime > BITDHT_MAX_SEND_PERIOD ))
{
/* We want to ping a peer iff:
* 1) They are out-of-date: mLastRecvTime is too old.
* 2) They don't have 0x0001 flag (we haven't received a PONG) and never sent.
*/
if ((ts - eit->mLastRecvTime > BITDHT_MAX_SEND_PERIOD ) ||
!(eit->mPeerFlags & BITDHT_PEER_STATUS_RECV_PONG))
{
peerIds.push_back(eit->mPeerId);
eit->mLastSendTime = ts;
}
}
/* we also want to remove very old entries (should it happen here?)
* which are not pushed out by newer entries (will happen in for closer buckets)
*/
bool discard = false;
/* discard very old entries */
if (ts - eit->mLastRecvTime > BITDHT_DISCARD_PERIOD)
{
discard = true;
}
/* discard peers which have not responded to anything (ie have no flags set) */
/* changed into have not id'ed themselves, as we've added ping to list of flags. */
if ((ts - eit->mFoundTime > BITDHT_MAX_RESPONSE_PERIOD ) &&
!(eit->mPeerFlags & BITDHT_PEER_STATUS_RECV_PONG))
{
discard = true;
}
/* INCREMENT */
if (discard)
{
eit = it->entries.erase(eit);
}
else
{
eit++;
}
}
}
#define ATTACH_UPDATE_PERIOD 600
if ((ts - mAttachTS > ATTACH_UPDATE_PERIOD) || (attachedCount != mAttachedCount))
{
//std::cerr << "Updating ATTACH Stuff";
//std::cerr << std::endl;
updateAttachedPeers(); /* XXX TEMP HACK to look at stability */
mAttachTS = ts;
}
return (peerIds.size());
}
int bdSpace::updateAttachedPeers()
{
/*
*
*/
bool doAttached = (mAttachedCount > 0);
int attachedCount = 0;
if (!doAttached)
{
return 0;
}
std::map<bdMetric, bdId> closest;
std::map<bdMetric, bdId>::iterator mit;
std::vector<bdBucket>::iterator it;
std::list<bdPeer>::reverse_iterator eit;
/* skip the first bucket, as we don't want to ping ourselves! */
it = buckets.begin();
if (it != buckets.end())
{
it++;
}
/* iterate through the buckets (sorted by distance) */
for(; it != buckets.end(); it++)
{
/* start from the back, as these are the most recently seen (and more likely to be the old ATTACHED) */
for(eit = it->entries.rbegin(); eit != it->entries.rend(); eit++)
{
bool added = false;
if (doAttached)
{
if ((eit->mPeerFlags & mAttachedFlags) == mAttachedFlags)
{
/* flag as attached */
eit->mExtraFlags |= BITDHT_PEER_EXFLAG_ATTACHED;
/* inc count, and cancel search if we've found them */
attachedCount++;
if (attachedCount >= mAttachedCount)
{
doAttached = false;
}
}
else
{
eit->mExtraFlags &= ~BITDHT_PEER_EXFLAG_ATTACHED;
}
}
else
{
eit->mExtraFlags &= ~BITDHT_PEER_EXFLAG_ATTACHED;
}
}
}
}
/* Called to add or update peer.
* sorts bucket lists by lastRecvTime.
* updates requested node.
@ -468,6 +682,7 @@ int bdSpace::out_of_date_peer(bdId &id)
/* peer flags
* order is important!
* higher bits = more priority.
* BITDHT_PEER_STATUS_RECVPING
* BITDHT_PEER_STATUS_RECVPONG
* BITDHT_PEER_STATUS_RECVNODES
* BITDHT_PEER_STATUS_RECVHASHES
@ -511,15 +726,22 @@ int bdSpace::add_peer(const bdId *id, uint32_t peerflags)
/* loop through ids, to find it */
for(it = buck.entries.begin(); it != buck.entries.end(); it++)
{
if (*id == it->mPeerId)
// should check addr too!
{
/* similar id check */
if (mFns->bdSimilarId(id, &(it->mPeerId)))
{
bdPeer peer = *it;
it = buck.entries.erase(it);
peer.mLastRecvTime = ts;
peer.mPeerFlags |= peerflags; /* must be cumulative ... so can do online, replynodes, etc */
/* also update port from incoming id, as we have definitely recved from it */
if (mFns->bdUpdateSimilarId(&(peer.mPeerId), id))
{
/* updated it... must be Unstable */
peer.mExtraFlags |= BITDHT_PEER_EXFLAG_UNSTABLE;
}
buck.entries.push_back(peer);
#ifdef DEBUG_BD_SPACE
@ -549,7 +771,7 @@ int bdSpace::add_peer(const bdId *id, uint32_t peerflags)
{
/* check head of list */
bdPeer &peer = buck.entries.front();
if (peer.mLastRecvTime - ts > BITDHT_MAX_RECV_PERIOD)
if (ts - peer.mLastRecvTime > BITDHT_MAX_RECV_PERIOD)
{
#ifdef DEBUG_BD_SPACE
std::cerr << "Dropping Out-of-Date peer in bucket" << std::endl;
@ -590,8 +812,10 @@ int bdSpace::add_peer(const bdId *id, uint32_t peerflags)
newPeer.mPeerId = *id;
newPeer.mLastRecvTime = ts;
newPeer.mLastSendTime = ts; //????
newPeer.mLastSendTime = 0; // ts; //????
newPeer.mFoundTime = ts;
newPeer.mPeerFlags = peerflags;
newPeer.mExtraFlags = 0;
buck.entries.push_back(newPeer);
@ -764,6 +988,16 @@ int bdSpace::printDHT()
}
int bdSpace::getDhtBucket(const int idx, bdBucket &bucket)
{
if ((idx < 0) || (idx > (int) buckets.size() - 1 ))
{
return 0;
}
bucket = buckets[idx];
return 1;
}
uint32_t bdSpace::calcNetworkSize()
{
std::vector<bdBucket>::iterator it;
@ -982,7 +1216,7 @@ bool bdSpace::findRandomPeerWithFlag(bdId &id, uint32_t withFlag)
if(totalcount == 0)
return false ;
uint32_t rnd = rand() % totalcount;
uint32_t rnd = bdRandom::random_u32() % totalcount;
uint32_t i = 0;
uint32_t buck = 0;
@ -1004,10 +1238,11 @@ bool bdSpace::findRandomPeerWithFlag(bdId &id, uint32_t withFlag)
{
if (i == rnd)
{
#ifdef BITDHT_DEBUG
std::cerr << "bdSpace::findRandomPeerWithFlag() found #" << i;
std::cerr << " in bucket #" << buck;
std::cerr << std::endl;
#endif
/* found */
id = lit->mPeerId;
return true;
@ -1018,6 +1253,8 @@ bool bdSpace::findRandomPeerWithFlag(bdId &id, uint32_t withFlag)
}
std::cerr << "bdSpace::findRandomPeerWithFlag() failed to find " << rnd << " / " << totalcount;
std::cerr << std::endl;
#ifdef BITDHT_DEBUG
#endif
return false;
}

View File

@ -49,8 +49,14 @@
#define BITDHT_ULLONG_BITS 64
#define BITDHT_MAX_SEND_PERIOD 600 // retry every 10 secs.
#define BITDHT_MAX_RECV_PERIOD 1500 // out-of-date
#define BITDHT_MAX_RESPONSE_PERIOD (15)
#define BITDHT_MAX_SEND_PERIOD 300 // 5 minutes.
#define BITDHT_MAX_RECV_PERIOD (BITDHT_MAX_SEND_PERIOD + BITDHT_MAX_RESPONSE_PERIOD) // didn't respond to a ping.
// Properly out of date.
#define BITDHT_DISCARD_PERIOD (2 * BITDHT_MAX_SEND_PERIOD + BITDHT_MAX_RESPONSE_PERIOD) // didn't respond to two pings.
// Must have a FLAG by this time. (Make it really quick - so we through away the rubbish).
#include <list>
@ -116,6 +122,10 @@ int operator==(const bdId &a, const bdId &b);
//std::string bdConvertToPrintable(std::string input);
/****
* DEFINED in bdiface.h
*
class bdPeer
{
public:
@ -124,21 +134,22 @@ class bdPeer
uint32_t mPeerFlags;
time_t mLastSendTime;
time_t mLastRecvTime;
time_t mFoundTime; /* time stamp that peer was found */
time_t mFoundTime; // time stamp that peer was found
};
class bdBucket
{
public:
bdBucket();
/* list so we can queue properly */
// list so we can queue properly
std::list<bdPeer> entries;
};
*
*
*****/
class bdSpace
{
@ -148,13 +159,28 @@ class bdSpace
int clear();
int setAttachedFlag(uint32_t withflags, int count);
/* accessors */
int find_nearest_nodes(const bdNodeId *id, int number,
std::list<bdId> excluding, std::multimap<bdMetric, bdId> &nearest);
std::multimap<bdMetric, bdId> &nearest);
int find_nearest_nodes_with_flags(const bdNodeId *id, int number,
std::list<bdId> excluding,
std::multimap<bdMetric, bdId> &nearest, uint32_t with_flag);
int find_node(const bdNodeId *id, int number,
std::list<bdId> &matchIds, uint32_t with_flag);
int find_exactnode(const bdId *id, bdPeer &peer);
// switched to more efficient single sweep.
//int out_of_date_peer(bdId &id); // side-effect updates, send flag on peer.
int scanOutOfDatePeers(std::list<bdId> &peerIds);
int updateAttachedPeers();
int out_of_date_peer(bdId &id); // side-effect updates, send flag on peer.
int add_peer(const bdId *id, uint32_t mode);
int printDHT();
int getDhtBucket(const int idx, bdBucket &bucket);
uint32_t calcNetworkSize();
uint32_t calcNetworkSizeWithFlag(uint32_t withFlag);
@ -172,6 +198,10 @@ int updateOwnId(bdNodeId *newOwnId);
std::vector<bdBucket> buckets;
bdNodeId mOwnId;
bdDhtFunctions *mFns;
uint32_t mAttachedFlags;
uint32_t mAttachedCount;
time_t mAttachTS;
};

View File

@ -37,7 +37,7 @@
**/
#define EXPECTED_REPLY 20
#define EXPECTED_REPLY 10 // Speed up queries
#define QUERY_IDLE_RETRY_PEER_PERIOD 300 // 5min = (mFns->bdNodesPerBucket() * 30)
@ -65,6 +65,7 @@ bdQuery::bdQuery(const bdNodeId *id, std::list<bdId> &startList, uint32_t queryF
bdPeer peer;
peer.mLastSendTime = 0;
peer.mLastRecvTime = 0;
peer.mPeerFlags = 0;
peer.mFoundTime = now;
peer.mPeerId = *it;
@ -81,8 +82,10 @@ bdQuery::bdQuery(const bdNodeId *id, std::list<bdId> &startList, uint32_t queryF
mQueryFlags = queryFlags;
mQueryTS = now;
mSearchTime = 0;
mClosestListSize = (int) (1.5 * mFns->bdNodesPerBucket());
mQueryIdlePeerRetryPeriod = QUERY_IDLE_RETRY_PEER_PERIOD;
mRequiredPeerFlags = BITDHT_PEER_STATUS_DHT_ENGINE_VERSION; // XXX to update later.
/* setup the limit of the search
* by default it is setup to 000000 = exact match
@ -126,7 +129,8 @@ int bdQuery::nextQuery(bdId &id, bdNodeId &targetNodeId)
bool notFinished = false;
std::multimap<bdMetric, bdPeer>::iterator it;
for(it = mClosest.begin(); it != mClosest.end(); it++)
int i = 0;
for(it = mClosest.begin(); it != mClosest.end(); it++, i++)
{
bool queryPeer = false;
@ -156,11 +160,15 @@ int bdQuery::nextQuery(bdId &id, bdNodeId &targetNodeId)
/* expecting every peer to be up-to-date is too hard...
* enough just to have received lists from each
* - replacement policy will still work.
*
* Need to wait at least EXPECTED_REPLY, to make sure their answers are pinged
*/
if (it->second.mLastRecvTime == 0)
if (((it->second.mLastRecvTime == 0) || (now - it->second.mLastRecvTime < EXPECTED_REPLY)) &&
(i < mFns->bdNodesPerBucket()))
{
#ifdef DEBUG_QUERY
fprintf(stderr, "NextQuery() Never Received: notFinished = true: ");
fprintf(stderr, "NextQuery() Never Received @Idx(%d) notFinished = true: ", i);
mFns->bdPrintId(std::cerr, &(it->second.mPeerId));
std::cerr << std::endl;
#endif
@ -241,7 +249,7 @@ int bdQuery::nextQuery(bdId &id, bdNodeId &targetNodeId)
{
mState = BITDHT_QUERY_SUCCESS;
}
else if ((mPotentialClosest.begin()->second).mPeerId.id == mId)
else if ((mPotentialPeers.begin()->second).mPeerId.id == mId)
{
mState = BITDHT_QUERY_PEER_UNREACHABLE;
}
@ -257,7 +265,7 @@ int bdQuery::nextQuery(bdId &id, bdNodeId &targetNodeId)
return 0;
}
int bdQuery::addPeer(const bdId *id, uint32_t mode)
int bdQuery::addClosestPeer(const bdId *id, uint32_t mode)
{
bdMetric dist;
time_t ts = time(NULL);
@ -276,7 +284,9 @@ int bdQuery::addPeer(const bdId *id, uint32_t mode)
int i = 0;
int actualCloser = 0;
int toDrop = 0;
for(it = mClosest.begin(); it != sit; it++, i++, actualCloser++)
// switched end condition to upper_bound to provide stability for NATTED peers.
// we will favour the older entries!
for(it = mClosest.begin(); it != eit; it++, i++, actualCloser++)
{
time_t sendts = ts - it->second.mLastSendTime;
bool hasSent = (it->second.mLastSendTime != 0);
@ -292,7 +302,8 @@ int bdQuery::addPeer(const bdId *id, uint32_t mode)
fprintf(stderr, "Searching.... %di = %d - %d peers closer than this one\n", i, actualCloser, toDrop);
#endif
if (i > mFns->bdNodesPerBucket() - 1)
if (i > mClosestListSize - 1)
{
#ifdef DEBUG_QUERY
fprintf(stderr, "Distance to far... dropping\n");
@ -304,11 +315,20 @@ int bdQuery::addPeer(const bdId *id, uint32_t mode)
for(it = sit; it != eit; it++, i++)
{
/* full id check */
if (it->second.mPeerId == *id)
if (mFns->bdSimilarId(id, &(it->second.mPeerId)))
{
#ifdef DEBUG_QUERY
fprintf(stderr, "Peer Already here!\n");
#endif
if (mode)
{
/* also update port from incoming id, as we have definitely recved from it */
if (mFns->bdUpdateSimilarId(&(it->second.mPeerId), id))
{
/* updated it... must be Unstable */
it->second.mExtraFlags |= BITDHT_PEER_EXFLAG_UNSTABLE;
}
}
if (mode & BITDHT_PEER_STATUS_RECV_NODES)
{
/* only update recvTime if sendTime > checkTime.... (then its our query) */
@ -316,6 +336,7 @@ int bdQuery::addPeer(const bdId *id, uint32_t mode)
fprintf(stderr, "Updating LastRecvTime\n");
#endif
it->second.mLastRecvTime = ts;
it->second.mPeerFlags |= mode;
}
return 1;
}
@ -353,7 +374,7 @@ int bdQuery::addPeer(const bdId *id, uint32_t mode)
}
/* trim it back */
while(mClosest.size() > (uint32_t) (mFns->bdNodesPerBucket() - 1))
while(mClosest.size() > (uint32_t) (mClosestListSize - 1))
{
std::multimap<bdMetric, bdPeer>::iterator it;
it = mClosest.end();
@ -379,6 +400,7 @@ int bdQuery::addPeer(const bdId *id, uint32_t mode)
/* add it in */
bdPeer peer;
peer.mPeerId = *id;
peer.mPeerFlags = mode;
peer.mLastSendTime = 0;
peer.mLastRecvTime = 0;
peer.mFoundTime = ts;
@ -393,17 +415,71 @@ int bdQuery::addPeer(const bdId *id, uint32_t mode)
}
/* we also want to track unreachable node ... this allows us
* to detect if peer are online - but uncontactible by dht.
*
* simple list of closest.
/*******************************************************************************************
********************************* Add Peer Interface *************************************
*******************************************************************************************/
/**** These functions are called by bdNode to add peers to the query
* They add/update the three sets of lists.
*
* int bdQuery::addPeer(const bdId *id, uint32_t mode)
* Proper message from a peer.
*
* int bdQuery::addPotentialPeer(const bdId *id, const bdId *src, uint32_t srcmode)
* This returns 1 if worthy of pinging, 0 if to ignore.
*/
int bdQuery::addPotentialPeer(const bdId *id, uint32_t mode)
#define PEER_MESSAGE 0
#define FIND_NODE_RESPONSE 1
int bdQuery::addPeer(const bdId *id, uint32_t mode)
{
addClosestPeer(id, mode);
updatePotentialPeer(id, mode, PEER_MESSAGE);
updateProxy(id, mode);
return 1;
}
int bdQuery::addPotentialPeer(const bdId *id, const bdId *src, uint32_t srcmode)
{
// is it a Potential Proxy? Always Check This.
addProxy(id, src, srcmode);
int worthy = worthyPotentialPeer(id);
int shouldPing = 0;
if (worthy)
{
shouldPing = updatePotentialPeer(id, 0, FIND_NODE_RESPONSE);
}
return shouldPing;
}
/*******************************************************************************************
********************************* Closest Peer ********************************************
*******************************************************************************************/
/*******************************************************************************************
******************************** Potential Peer *******************************************
*******************************************************************************************/
/*******
* Potential Peers are a list of the closest answers to our queries.
* Lots of these peers will not be reachable.... so will only exist in this list.
* They will also never have there PeerFlags set ;(
*
*/
/*** utility functions ***/
int bdQuery::worthyPotentialPeer(const bdId *id)
{
bdMetric dist;
time_t ts = time(NULL);
mFns->bdDistance(&mId, &(id->id), &dist);
#ifdef DEBUG_QUERY
@ -412,7 +488,7 @@ int bdQuery::addPotentialPeer(const bdId *id, uint32_t mode)
fprintf(stderr, ", %u)\n", mode);
#endif
/* first we check if this is a worthy potential peer....
/* we check if this is a worthy potential peer....
* if it is already in mClosest -> false. old peer.
* if it is > mClosest.rbegin() -> false. too far way.
*/
@ -422,129 +498,399 @@ int bdQuery::addPotentialPeer(const bdId *id, uint32_t mode)
sit = mClosest.lower_bound(dist);
eit = mClosest.upper_bound(dist);
for(it = sit; it != eit; it++)
{
if (it->second.mPeerId == *id)
{
/* already there */
retval = 0;
#ifdef DEBUG_QUERY
fprintf(stderr, "Peer already in mClosest\n");
#endif
}
//empty loop.
}
/* check if outside range, & bucket is full */
if ((sit == mClosest.end()) && (mClosest.size() >= mFns->bdNodesPerBucket()))
{
#ifdef DEBUG_QUERY
fprintf(stderr, "Peer to far away for Potential\n");
#endif
retval = 0; /* too far way */
return 0; /* too far way */
}
/* return if false; */
if (!retval)
{
#ifdef DEBUG_QUERY
fprintf(stderr, "Flagging as Not a Potential Peer!\n");
#endif
return retval;
}
/* finally if a worthy & new peer -> add into potential closest
* and repeat existance tests with PotentialPeers
*/
sit = mPotentialClosest.lower_bound(dist);
eit = mPotentialClosest.upper_bound(dist);
int i = 0;
for(it = mPotentialClosest.begin(); it != sit; it++, i++)
for(it = sit; it != eit; it++)
{
//empty loop.
}
if (i > mFns->bdNodesPerBucket() - 1)
{
#ifdef DEBUG_QUERY
fprintf(stderr, "Distance to far... dropping\n");
fprintf(stderr, "Flagging as Potential Peer!\n");
#endif
/* outside the list - so we won't add to mPotentialClosest
* but inside mClosest still - so should still try it
*/
retval = 1;
return retval;
}
for(it = sit; it != eit; it++, i++)
{
if (it->second.mPeerId == *id)
if (mFns->bdSimilarId(id, &(it->second.mPeerId)))
{
/* this means its already been pinged */
// Not updating Full Peer Id here... as inspection function.
#ifdef DEBUG_QUERY
fprintf(stderr, "Peer Already here in mPotentialClosest!\n");
fprintf(stderr, "Peer already in mClosest\n");
#endif
if (mode & BITDHT_PEER_STATUS_RECV_NODES)
return 0;
}
}
return 1; /* either within mClosest Range (but not there!), or there aren't enough peers */
}
/*****
*
* mLastSendTime ... is the last FIND_NODE_RESPONSE that we returned 1. (indicating to PING).
* mLastRecvTime ... is the last time we received an updatei about/from them
*
* The update is dependent on the flags passed in the function call. (saves duplicate code).
*
*
* XXX IMPORTANT TO DECIDE WHAT IS RETURNED HERE.
* original algorithm return 0 if exists in potential peers, 1 if unknown.
* This is used to limit the number of pings to non-responding potentials.
*
* MUST think about this. Need to install HISTORY tracking again. to look at the statistics.
*
* It is important that the potential Peers list extends all the way back to == mClosest().end().
* Otherwise we end up with [TARGET] .... [ POTENTIAL ] ..... [ CLOSEST ] ......
* and the gap between POT and CLOSEST will get hammered with pings.
*
*/
#define MIN_PING_POTENTIAL_PERIOD 300
int bdQuery::updatePotentialPeer(const bdId *id, uint32_t mode, uint32_t addType)
{
bdMetric dist;
time_t now = time(NULL);
mFns->bdDistance(&mId, &(id->id), &dist);
std::multimap<bdMetric, bdPeer>::iterator it, sit, eit;
sit = mPotentialPeers.lower_bound(dist);
eit = mPotentialPeers.upper_bound(dist);
bool found = false;
for(it = sit; it != eit; it++)
{
if (mFns->bdSimilarId(id, &(it->second.mPeerId)))
{
found = true;
it->second.mPeerFlags |= mode;
it->second.mLastRecvTime = now;
if (addType == FIND_NODE_RESPONSE)
{
#ifdef DEBUG_QUERY
fprintf(stderr, "Updating LastRecvTime\n");
#endif
it->second.mLastRecvTime = ts;
// We could lose peers here by not updating port... but should be okay.
if (now - it->second.mLastSendTime > MIN_PING_POTENTIAL_PERIOD)
{
it->second.mLastSendTime = now;
return 1;
}
}
#ifdef DEBUG_QUERY
fprintf(stderr, "Flagging as Not a Potential Peer!\n");
#endif
retval = 0;
return retval;
else if (mode)
{
/* also update port from incoming id, as we have definitely recved from it */
if (mFns->bdUpdateSimilarId(&(it->second.mPeerId), id))
{
/* updated it... must be Unstable */
it->second.mExtraFlags |= BITDHT_PEER_EXFLAG_UNSTABLE;
}
}
return 0;
}
}
#ifdef DEBUG_QUERY
fprintf(stderr, "Peer not in Query\n");
#endif
// Removing this check - so that we can have varying length PotentialPeers.
// Peer will always be added, then probably removed straight away.
/* trim it back */
while(mPotentialClosest.size() > (uint32_t) (mFns->bdNodesPerBucket() - 1))
#if 0
/* check if outside range, & bucket is full */
if ((sit == mPotentialPeers.end()) && (mPotentialPeers.size() >= mFns->bdNodesPerBucket()))
{
std::multimap<bdMetric, bdPeer>::iterator it;
it = mPotentialClosest.end();
if(!mPotentialClosest.empty())
{
--it;
#ifdef DEBUG_QUERY
fprintf(stderr, "Removing Furthest Peer: ");
mFns->bdPrintId(std::cerr, &(it->second.mPeerId));
fprintf(stderr, "\n");
fprintf(stderr, "Peer to far away for Potential\n");
#endif
mPotentialClosest.erase(it);
}
return 0;
}
#ifdef DEBUG_QUERY
fprintf(stderr, "bdQuery::addPotentialPeer(): Closer Peer!: ");
mFns->bdPrintId(std::cerr, id);
fprintf(stderr, "\n");
#endif
/* add it in */
bdPeer peer;
peer.mPeerId = *id;
peer.mPeerFlags = mode;
peer.mFoundTime = now;
peer.mLastRecvTime = now;
peer.mLastSendTime = 0;
peer.mLastRecvTime = ts;
peer.mFoundTime = ts;
mPotentialClosest.insert(std::pair<bdMetric, bdPeer>(dist, peer));
if (addType == FIND_NODE_RESPONSE)
{
peer.mLastSendTime = now;
}
mPotentialPeers.insert(std::pair<bdMetric, bdPeer>(dist, peer));
#ifdef DEBUG_QUERY
fprintf(stderr, "Flagging as Potential Peer!\n");
#endif
retval = 1;
return retval;
trimPotentialPeers_toClosest();
return 1;
}
int bdQuery::trimPotentialPeers_FixedLength()
{
/* trim it back */
while(mPotentialPeers.size() > (uint32_t) (mFns->bdNodesPerBucket()))
{
std::multimap<bdMetric, bdPeer>::iterator it;
it = mPotentialPeers.end();
it--; // must be more than 1 peer here?
#ifdef DEBUG_QUERY
fprintf(stderr, "Removing Furthest Peer: ");
mFns->bdPrintId(std::cerr, &(it->second.mPeerId));
fprintf(stderr, "\n");
#endif
mPotentialPeers.erase(it);
}
return 1;
}
int bdQuery::trimPotentialPeers_toClosest()
{
if (mPotentialPeers.size() <= (uint32_t) (mFns->bdNodesPerBucket()))
return 1;
std::multimap<bdMetric, bdPeer>::reverse_iterator it;
it = mClosest.rbegin();
bdMetric lastClosest = it->first;
/* trim it back */
while(mPotentialPeers.size() > (uint32_t) (mFns->bdNodesPerBucket()))
{
std::multimap<bdMetric, bdPeer>::iterator it;
it = mPotentialPeers.end();
it--; // must be more than 1 peer here?
if (lastClosest < it->first)
{
#ifdef DEBUG_QUERY
fprintf(stderr, "Removing Furthest Peer: ");
mFns->bdPrintId(std::cerr, &(it->second.mPeerId));
fprintf(stderr, "\n");
#endif
mPotentialPeers.erase(it);
}
else
{
return 1;
}
}
return 1;
}
/*******************************************************************************************
******************************** Potential Proxies ****************************************
*******************************************************************************************/
/********
* Potential Proxies. a list of peers that have returned our target in response to a query.
*
* We are particularly interested in peers with specific flags...
* But all the peers have been pinged already by the time they reach this list.
* So there are two options:
* 1) Track everythings mode history - which is a waste of resources.
* 2) Store the list, and ping later.
*
* We will store these in two lists: Flags & Unknown.
* we keep the most recent of each, and move around as required.
*
* we could also check the Closest/PotentialPeer lists to grab the flags,
* for an unknown peer?
*
* All Functions manipulating PotentialProxies are here.
* We need several functions:
*
* For Extracting Proxies.
bool bdQuery::proxies(std::list<bdId> &answer)
bool bdQuery::potentialProxies(std::list<bdId> &answer)
*
* For Adding/Updating Proxies.
int bdQuery::addProxy(const bdId *id, const bdId *src, uint32_t srcmode)
int bdQuery::updateProxy(const bdId *id, uint32_t mode)
*
*/
/*** Two Functions to extract Proxies... ***/
bool bdQuery::proxies(std::list<bdId> &answer)
{
/* get all the matches to our query */
std::list<bdPeer>::iterator it;
int i = 0;
for(it = mProxiesFlagged.begin(); it != mProxiesFlagged.end(); it++, i++)
{
answer.push_back(it->mPeerId);
}
return (i > 0);
}
bool bdQuery::potentialProxies(std::list<bdId> &answer)
{
/* get all the matches to our query */
std::list<bdPeer>::iterator it;
int i = 0;
for(it = mProxiesUnknown.begin(); it != mProxiesUnknown.end(); it++, i++)
{
answer.push_back(it->mPeerId);
}
return (i > 0);
}
int bdQuery::addProxy(const bdId *id, const bdId *src, uint32_t srcmode)
{
bdMetric dist;
time_t now = time(NULL);
mFns->bdDistance(&mId, &(id->id), &dist);
/* finally if it is an exact match, add as potential proxy */
int bucket = mFns->bdBucketDistance(&dist);
if ((bucket != 0) || (src == NULL))
{
/* not a potential proxy */
return 0;
}
#ifdef DEBUG_QUERY
fprintf(stderr, "Bucket = 0, Have Potential Proxy!\n");
#endif
bool found = false;
if (updateProxyList(src, srcmode, mProxiesUnknown))
{
found = true;
}
if (!found)
{
if (updateProxyList(src, srcmode, mProxiesFlagged))
{
found = true;
}
}
if (!found)
{
/* if we get here. its not in the list */
#ifdef DEBUG_QUERY
fprintf(stderr, "Adding Source to Proxy List:\n");
#endif
bdPeer peer;
peer.mPeerId = *src;
peer.mPeerFlags = srcmode;
peer.mLastSendTime = 0;
peer.mLastRecvTime = now;
peer.mFoundTime = now;
/* add it in */
if ((srcmode & mRequiredPeerFlags) == mRequiredPeerFlags)
{
mProxiesFlagged.push_front(peer);
}
else
{
mProxiesUnknown.push_front(peer);
}
}
trimProxies();
return 1;
}
int bdQuery::updateProxy(const bdId *id, uint32_t mode)
{
if (!updateProxyList(id, mode, mProxiesUnknown))
{
updateProxyList(id, mode, mProxiesFlagged);
}
trimProxies();
return 1;
}
/**** Utility functions that do all the work! ****/
int bdQuery::updateProxyList(const bdId *id, uint32_t mode, std::list<bdPeer> &searchProxyList)
{
std::list<bdPeer>::iterator it;
for(it = searchProxyList.begin(); it != searchProxyList.end(); it++)
{
if (mFns->bdSimilarId(id, &(it->mPeerId)))
{
/* found it ;( */
#ifdef DEBUG_QUERY
std::cerr << "bdQuery::updateProxyList() Found peer, updating";
std::cerr << std::endl;
#endif
time_t now = time(NULL);
if (mode)
{
/* also update port from incoming id, as we have definitely recved from it */
if (mFns->bdUpdateSimilarId(&(it->mPeerId), id))
{
/* updated it... must be Unstable */
it->mExtraFlags |= BITDHT_PEER_EXFLAG_UNSTABLE;
}
}
it->mPeerFlags |= mode;
it->mLastRecvTime = now;
/* now move it to the front of required list...
* note this could be exactly the same list as &searchProxyList, or a different one!
*/
bdPeer peer = *it;
it = searchProxyList.erase(it);
if ((peer.mPeerFlags & mRequiredPeerFlags) == mRequiredPeerFlags)
{
mProxiesFlagged.push_front(peer);
}
else
{
mProxiesUnknown.push_front(peer);
}
return 1;
break;
}
}
return 0;
}
#define MAX_POTENTIAL_PROXIES 10
int bdQuery::trimProxies()
{
/* drop excess Potential Proxies */
while(mProxiesUnknown.size() > MAX_POTENTIAL_PROXIES)
{
mProxiesUnknown.pop_back();
}
while(mProxiesFlagged.size() > MAX_POTENTIAL_PROXIES)
{
mProxiesFlagged.pop_back();
}
return 1;
}
/*******************************************************************************************
******************************** Potential Proxies ****************************************
*******************************************************************************************/
/* print query.
*/
@ -590,6 +936,18 @@ int bdQuery::printQuery()
fprintf(stderr," LastRecv: %ld ago", ts-it->second.mLastRecvTime);
fprintf(stderr, "\n");
}
std::list<bdPeer>::iterator lit;
fprintf(stderr, "\nPotential Proxies:\n");
for(lit = mPotentialProxies.begin(); lit != mPotentialProxies.end(); lit++)
{
fprintf(stderr, "ProxyId: ");
mFns->bdPrintId(std::cerr, &(lit->mPeerId));
fprintf(stderr," Found: %ld ago", ts-lit->mFoundTime);
fprintf(stderr," LastSent: %ld ago", ts-lit->mLastSendTime);
fprintf(stderr," LastRecv: %ld ago", ts-lit->mLastRecvTime);
fprintf(stderr, "\n");
}
#else
// shortened version.
fprintf(stderr, "Closest Available Peer: ");
@ -598,6 +956,7 @@ int bdQuery::printQuery()
{
mFns->bdPrintId(std::cerr, &(it->second.mPeerId));
fprintf(stderr, " Bucket: %d ", mFns->bdBucketDistance(&(it->first)));
fprintf(stderr," Flags: %x", it->second.mPeerFlags);
fprintf(stderr," Found: %ld ago", ts-it->second.mFoundTime);
fprintf(stderr," LastSent: %ld ago", ts-it->second.mLastSendTime);
fprintf(stderr," LastRecv: %ld ago", ts-it->second.mLastRecvTime);
@ -605,16 +964,42 @@ int bdQuery::printQuery()
fprintf(stderr, "\n");
fprintf(stderr, "Closest Potential Peer: ");
it = mPotentialClosest.begin();
if (it != mPotentialClosest.end())
it = mPotentialPeers.begin();
if (it != mPotentialPeers.end())
{
mFns->bdPrintId(std::cerr, &(it->second.mPeerId));
fprintf(stderr, " Bucket: %d ", mFns->bdBucketDistance(&(it->first)));
fprintf(stderr," Flags: %x", it->second.mPeerFlags);
fprintf(stderr," Found: %ld ago", ts-it->second.mFoundTime);
fprintf(stderr," LastSent: %ld ago", ts-it->second.mLastSendTime);
fprintf(stderr," LastRecv: %ld ago", ts-it->second.mLastRecvTime);
}
fprintf(stderr, "\n");
std::list<bdPeer>::iterator lit;
fprintf(stderr, "Flagged Proxies:\n");
for(lit = mProxiesFlagged.begin(); lit != mProxiesFlagged.end(); lit++)
{
fprintf(stderr, "ProxyId: ");
mFns->bdPrintId(std::cerr, &(lit->mPeerId));
fprintf(stderr," Flags: %x", it->second.mPeerFlags);
fprintf(stderr," Found: %ld ago", ts-lit->mFoundTime);
fprintf(stderr," LastSent: %ld ago", ts-lit->mLastSendTime);
fprintf(stderr," LastRecv: %ld ago", ts-lit->mLastRecvTime);
fprintf(stderr, "\n");
}
fprintf(stderr, "Potential Proxies:\n");
for(lit = mProxiesUnknown.begin(); lit != mProxiesUnknown.end(); lit++)
{
fprintf(stderr, "ProxyId: ");
mFns->bdPrintId(std::cerr, &(lit->mPeerId));
fprintf(stderr," Flags: %x", it->second.mPeerFlags);
fprintf(stderr," Found: %ld ago", ts-lit->mFoundTime);
fprintf(stderr," LastSent: %ld ago", ts-lit->mLastSendTime);
fprintf(stderr," LastRecv: %ld ago", ts-lit->mLastRecvTime);
fprintf(stderr, "\n");
}
#endif
return 1;

View File

@ -44,12 +44,14 @@ class bdQuery
// get the answer.
bool result(std::list<bdId> &answer);
bool proxies(std::list<bdId> &answer);
bool potentialProxies(std::list<bdId> &answer);
// returning results get passed to all queries.
//void addNode(const bdId *id, int mode);
int nextQuery(bdId &id, bdNodeId &targetId);
int addPeer(const bdId *id, uint32_t mode);
int addPotentialPeer(const bdId *id, uint32_t mode);
int addPotentialPeer(const bdId *id, const bdId *src, uint32_t srcmode);
int printQuery();
// searching for
@ -62,15 +64,38 @@ int printQuery();
int32_t mQueryIdlePeerRetryPeriod; // seconds between retries.
private:
//private:
// closest peers
// Closest Handling Fns.
int addClosestPeer(const bdId *id, uint32_t mode);
// Potential Handling Fns.
int worthyPotentialPeer(const bdId *id);
int updatePotentialPeer(const bdId *id, uint32_t mode, uint32_t addType);
int trimPotentialPeers_FixedLength();
int trimPotentialPeers_toClosest();
// Proxy Handling Fns.
int addProxy(const bdId *id, const bdId *src, uint32_t srcmode);
int updateProxy(const bdId *id, uint32_t mode);
int updateProxyList(const bdId *id, uint32_t mode, std::list<bdPeer> &searchProxyList);
int trimProxies();
// closest peers.
std::multimap<bdMetric, bdPeer> mClosest;
std::multimap<bdMetric, bdPeer> mPotentialClosest;
std::multimap<bdMetric, bdPeer> mPotentialPeers;
uint32_t mRequiredPeerFlags;
std::list<bdPeer> mProxiesUnknown;
std::list<bdPeer> mProxiesFlagged;
int mClosestListSize;
bdDhtFunctions *mFns;
};
#if 0
class bdQueryStatus
{
public:
@ -79,6 +104,8 @@ class bdQueryStatus
std::list<bdId> mResults;
};
#endif
/* this is just a container class.

View File

@ -0,0 +1,374 @@
/*
* bitdht/bdnode.cc
*
* BitDHT: An Flexible DHT library.
*
* Copyright 2010 by Robert Fernie
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License Version 3 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA.
*
* Please report all bugs and problems to "bitdht@lunamutt.com".
*
*/
#include "bitdht/bdquerymgr.h"
#include "bitdht/bdnode.h"
#include <string.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#define BITDHT_QUERY_START_PEERS 10
#define BITDHT_QUERY_NEIGHBOUR_PEERS 8
#define BITDHT_MAX_REMOTE_QUERY_AGE 10
/****
* #define DEBUG_NODE_MULTIPEER 1
* #define DEBUG_NODE_MSGS 1
* #define DEBUG_NODE_ACTIONS 1
* #define DEBUG_NODE_MSGIN 1
* #define DEBUG_NODE_MSGOUT 1
***/
//#define DEBUG_NODE_MSGS 1
bdQueryManager::bdQueryManager(bdSpace *space, bdDhtFunctions *fns, bdNodePublisher *pub)
:mNodeSpace(space), mFns(fns), mPub(pub)
{
}
/***** Startup / Shutdown ******/
void bdQueryManager::shutdownQueries()
{
/* clear the queries */
std::list<bdQuery *>::iterator it;
for(it = mLocalQueries.begin(); it != mLocalQueries.end();it++)
{
delete (*it);
}
mLocalQueries.clear();
}
void bdQueryManager::printQueries()
{
std::cerr << "bdQueryManager::printQueries()";
std::cerr << std::endl;
int i = 0;
std::list<bdQuery *>::iterator it;
for(it = mLocalQueries.begin(); it != mLocalQueries.end(); it++, i++)
{
fprintf(stderr, "Query #%d:\n", i);
(*it)->printQuery();
fprintf(stderr, "\n");
}
}
int bdQueryManager::iterateQueries(int maxQueries)
{
#ifdef DEBUG_NODE_MULTIPEER
std::cerr << "bdQueryManager::iterateQueries() of Peer: ";
mFns->bdPrintNodeId(std::cerr, &mOwnId);
std::cerr << std::endl;
#endif
/* allow each query to send up to one query... until maxMsgs has been reached */
int numQueries = mLocalQueries.size();
int sentQueries = 0;
int i = 0;
bdId id;
bdNodeId targetNodeId;
while((i < numQueries) && (sentQueries < maxQueries))
{
bdQuery *query = mLocalQueries.front();
mLocalQueries.pop_front();
mLocalQueries.push_back(query);
/* go through the possible queries */
if (query->nextQuery(id, targetNodeId))
{
#ifdef DEBUG_NODE_MSGS
std::cerr << "bdQueryManager::iteration() send_query(";
mFns->bdPrintId(std::cerr, &id);
std::cerr << ",";
mFns->bdPrintNodeId(std::cerr, &targetNodeId);
std::cerr << ")";
std::cerr << std::endl;
#endif
mPub->send_query(&id, &targetNodeId);
sentQueries++;
}
i++;
}
#ifdef DEBUG_NODE_ACTIONS
std::cerr << "bdQueryManager::iteration() maxMsgs: " << maxMsgs << " sentPings: " << sentPings;
std::cerr << " / " << allowedPings;
std::cerr << " sentQueries: " << sentQueries;
std::cerr << " / " << numQueries;
std::cerr << std::endl;
#endif
//printQueries();
return sentQueries;
}
bool bdQueryManager::checkPotentialPeer(bdId *id, bdId *src)
{
bool isWorthyPeer = false;
/* also push to queries */
std::list<bdQuery *>::iterator it;
for(it = mLocalQueries.begin(); it != mLocalQueries.end(); it++)
{
if ((*it)->addPotentialPeer(id, src, 0))
{
isWorthyPeer = true;
}
}
if (!isWorthyPeer)
{
isWorthyPeer = checkWorthyPeerSources(src);
}
return isWorthyPeer;
}
void bdQueryManager::addPeer(const bdId *id, uint32_t peerflags)
{
#ifdef DEBUG_NODE_ACTIONS
fprintf(stderr, "bdQueryManager::addPeer(");
mFns->bdPrintId(std::cerr, id);
fprintf(stderr, ")\n");
#endif
/* iterate through queries */
std::list<bdQuery *>::iterator it;
for(it = mLocalQueries.begin(); it != mLocalQueries.end(); it++)
{
(*it)->addPeer(id, peerflags);
}
}
/************************************ Query Details *************************/
void bdQueryManager::addQuery(const bdNodeId *id, uint32_t qflags)
{
std::list<bdId> startList;
std::multimap<bdMetric, bdId> nearest;
std::multimap<bdMetric, bdId>::iterator it;
mNodeSpace->find_nearest_nodes(id, BITDHT_QUERY_START_PEERS, nearest);
fprintf(stderr, "bdQueryManager::addQuery(");
mFns->bdPrintNodeId(std::cerr, id);
fprintf(stderr, ")\n");
for(it = nearest.begin(); it != nearest.end(); it++)
{
startList.push_back(it->second);
}
bdQuery *query = new bdQuery(id, startList, qflags, mFns);
mLocalQueries.push_back(query);
}
void bdQueryManager::clearQuery(const bdNodeId *rmId)
{
std::list<bdQuery *>::iterator it;
for(it = mLocalQueries.begin(); it != mLocalQueries.end();)
{
if ((*it)->mId == *rmId)
{
bdQuery *query = (*it);
it = mLocalQueries.erase(it);
delete query;
}
else
{
it++;
}
}
}
void bdQueryManager::QueryStatus(std::map<bdNodeId, bdQueryStatus> &statusMap)
{
std::list<bdQuery *>::iterator it;
for(it = mLocalQueries.begin(); it != mLocalQueries.end(); it++)
{
bdQueryStatus status;
status.mStatus = (*it)->mState;
status.mQFlags = (*it)->mQueryFlags;
(*it)->result(status.mResults);
statusMap[(*it)->mId] = status;
}
}
int bdQueryManager::QuerySummary(const bdNodeId *id, bdQuerySummary &query)
{
std::list<bdQuery *>::iterator it;
for(it = mLocalQueries.begin(); it != mLocalQueries.end(); it++)
{
if ((*it)->mId == *id)
{
query.mId = (*it)->mId;
query.mLimit = (*it)->mLimit;
query.mState = (*it)->mState;
query.mQueryTS = (*it)->mQueryTS;
query.mQueryFlags = (*it)->mQueryFlags;
query.mSearchTime = (*it)->mSearchTime;
query.mClosest = (*it)->mClosest;
query.mPotentialPeers = (*it)->mPotentialPeers;
query.mProxiesUnknown = (*it)->mProxiesUnknown;
query.mProxiesFlagged = (*it)->mProxiesFlagged;
query.mQueryIdlePeerRetryPeriod = (*it)->mQueryIdlePeerRetryPeriod;
return 1;
}
}
return 0;
}
/* Extract Results from Peer Queries */
#define BDQRYMGR_RESULTS 1
#define BDQRYMGR_PROXIES 2
#define BDQRYMGR_POTPROXIES 3
int bdQueryManager::getResults(bdNodeId *target, std::list<bdId> &answer, int querytype)
{
/* grab any peers from any existing query */
int results = 0;
std::list<bdQuery *>::iterator qit;
for(qit = mLocalQueries.begin(); qit != mLocalQueries.end(); qit++)
{
if (!((*qit)->mId == (*target)))
{
continue;
}
#ifdef DEBUG_NODE_CONNECTION
std::cerr << "bdQueryManager::getResults() Found Matching Query";
std::cerr << std::endl;
#endif
switch(querytype)
{
default:
case BDQRYMGR_RESULTS:
results = (*qit)->result(answer);
break;
case BDQRYMGR_PROXIES:
results = (*qit)->proxies(answer);
break;
case BDQRYMGR_POTPROXIES:
results = (*qit)->potentialProxies(answer);
break;
}
/* will only be one matching query.. so end loop */
return results;
}
}
int bdQueryManager::result(bdNodeId *target, std::list<bdId> &answer)
{
return getResults(target, answer, BDQRYMGR_RESULTS);
}
int bdQueryManager::proxies(bdNodeId *target, std::list<bdId> &answer)
{
return getResults(target, answer, BDQRYMGR_PROXIES);
}
int bdQueryManager::potentialProxies(bdNodeId *target, std::list<bdId> &answer)
{
return getResults(target, answer, BDQRYMGR_POTPROXIES);
}
/************ WORTHY PEERS **********/
#define MAX_WORTHY_PEER_AGE 15
void bdQueryManager::addWorthyPeerSource(bdId *src)
{
time_t now = time(NULL);
bdPeer peer;
peer.mPeerId = *src;
peer.mFoundTime = now;
std::cerr << "bdQueryManager::addWorthyPeerSource(";
mFns->bdPrintId(std::cerr, src);
std::cerr << ")" << std::endl;
#ifdef DEBUG_NODE_ACTIONS
#endif
mWorthyPeerSources.push_back(peer);
}
bool bdQueryManager::checkWorthyPeerSources(bdId *src)
{
if (!src)
return false;
time_t now = time(NULL);
std::list<bdPeer>::iterator it;
for(it = mWorthyPeerSources.begin(); it != mWorthyPeerSources.end(); )
{
if (now - it->mFoundTime > MAX_WORTHY_PEER_AGE)
{
std::cerr << "bdQueryManager::checkWorthyPeerSource() Discard old Source: ";
mFns->bdPrintId(std::cerr, &(it->mPeerId));
std::cerr << std::endl;
it = mWorthyPeerSources.erase(it);
}
else
{
if (it->mPeerId == *src)
{
//std::cerr << "bdQueryManager::checkWorthyPeerSource(";
//mFns->bdPrintId(std::cerr, src);
//std::cerr << ") = true" << std::endl;
return true;
}
it++;
}
}
return false;
}

View File

@ -0,0 +1,78 @@
#ifndef BITDHT_QUERY_MANAGER_H
#define BITDHT_QUERY_MANAGER_H
/*
* bitdht/bdquerymgr.h
*
* BitDHT: An Flexible DHT library.
*
* Copyright 2011 by Robert Fernie
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License Version 3 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA.
*
* Please report all bugs and problems to "bitdht@lunamutt.com".
*
*/
#include "bitdht/bdquery.h"
class bdNodePublisher;
class bdQueryManager
{
public:
bdQueryManager(bdSpace *space, bdDhtFunctions *fns, bdNodePublisher *pub);
void shutdownQueries();
void printQueries();
int iterateQueries(int maxqueries);
bool checkPotentialPeer(bdId *id, bdId *src);
void addPeer(const bdId *id, uint32_t peerflags);
void addQuery(const bdNodeId *id, uint32_t qflags);
void clearQuery(const bdNodeId *id);
void QueryStatus(std::map<bdNodeId, bdQueryStatus> &statusMap);
int QuerySummary(const bdNodeId *id, bdQuerySummary &query);
int result(bdNodeId *target, std::list<bdId> &answer);
int proxies(bdNodeId *target, std::list<bdId> &answer);
int potentialProxies(bdNodeId *target, std::list<bdId> &answer);
// extra "Worthy Peers" we will want to ping.
void addWorthyPeerSource(bdId *src);
bool checkWorthyPeerSources(bdId *src);
private:
int getResults(bdNodeId *target, std::list<bdId> &answer, int querytype);
/* NB: No Mutex Protection... Single threaded, Mutex at higher level!
*/
bdSpace *mNodeSpace;
bdDhtFunctions *mFns;
bdNodePublisher *mPub;
std::list<bdQuery *> mLocalQueries;
std::list<bdPeer> mWorthyPeerSources;
};
#endif // BITDHT_QUERY_MANAGER_H

View File

@ -26,10 +26,12 @@
#include "bitdht/bdstddht.h"
#include "bitdht/bdpeer.h"
#include "util/bdrandom.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <limits.h>
#include <iostream>
#include <sstream>
@ -42,9 +44,8 @@
void bdStdRandomId(bdId *id)
{
bdStdRandomNodeId(&(id->id));
id->addr.sin_addr.s_addr = rand();
id->addr.sin_port = rand();
id->addr.sin_addr.s_addr = bdRandom::random_u32();
id->addr.sin_port = (bdRandom::random_u32() % USHRT_MAX);
return;
}
@ -54,7 +55,7 @@ void bdStdRandomNodeId(bdNodeId *id)
uint32_t *a_data = (uint32_t *) id->data;
for(int i = 0; i < BITDHT_KEY_INTLEN; i++)
{
a_data[i] = rand();
a_data[i] = bdRandom::random_u32();
}
return;
}
@ -69,13 +70,34 @@ void bdStdZeroNodeId(bdNodeId *id)
return;
}
uint32_t bdStdLikelySameNode(const bdId *n1, const bdId *n2)
// Ignore differences in port....
// must be careful which one we accept after this.
// can could end-up with the wrong port.
// However this only matters with firewalled peers anyway.
// So not too serious.
bool bdStdSimilarId(const bdId *n1, const bdId *n2)
{
if (*n1 == *n2)
if (n1->id == n2->id)
{
return 1;
if (n1->addr.sin_addr.s_addr == n2->addr.sin_addr.s_addr)
{
return true;
}
}
return 0;
return false;
}
bool bdStdUpdateSimilarId(bdId *dest, const bdId *src)
{
/* only difference that's currently allowed */
if (dest->addr.sin_port == src->addr.sin_port)
{
/* no update required */
return false;
}
dest->addr.sin_port = src->addr.sin_port;
return true;
}
@ -116,6 +138,43 @@ void bdStdRandomMidId(const bdNodeId *target, const bdNodeId *other, bdNodeId *m
}
}
int bdStdLoadNodeId(bdNodeId *id, std::string input)
{
uint8_t *a_data = (uint8_t *) id->data;
int reqlen = BITDHT_KEY_LEN * 2;
if (input.size() < reqlen)
{
return 0;
}
for(int i = 0; i < BITDHT_KEY_LEN; i++)
{
char ch1 = input[2 * i];
char ch2 = input[2 * i + 1];
uint8_t value1 = 0;
uint8_t value2 = 0;
/* do char1 */
if (ch1 >= '0' && ch1 <= '9')
value1 = (ch1 - '0');
else if (ch1 >= 'A' && ch1 <= 'F')
value1 = (ch1 - 'A' + 10);
else if (ch1 >= 'a' && ch1 <= 'f')
value1 = (ch1 - 'a' + 10);
/* do char2 */
if (ch2 >= '0' && ch2 <= '9')
value2 = (ch2 - '0');
else if (ch2 >= 'A' && ch2 <= 'F')
value2 = (ch2 - 'A' + 10);
else if (ch2 >= 'a' && ch2 <= 'f')
value2 = (ch2 - 'a' + 10);
a_data[i] = (value1 << 4) + value2;
}
return 1;
}
std::string bdStdConvertToPrintable(std::string input)
{
std::ostringstream out;
@ -222,9 +281,15 @@ int bdStdDht::bdBucketDistance(const bdMetric *metric)
}
uint32_t bdStdDht::bdLikelySameNode(const bdId *id1, const bdId *id2)
bool bdStdDht::bdSimilarId(const bdId *id1, const bdId *id2)
{
return bdStdLikelySameNode(id1, id2);
return bdStdSimilarId(id1, id2);
}
bool bdStdDht::bdUpdateSimilarId(bdId *dest, const bdId *src)
{
return bdStdUpdateSimilarId(dest, src);
}

View File

@ -52,12 +52,14 @@ int bdStdBucketDistance(const bdNodeId *a, const bdNodeId *b);
void bdStdRandomMidId(const bdNodeId *target, const bdNodeId *other, bdNodeId *mid);
int bdStdLoadNodeId(bdNodeId *id, std::string input);
void bdStdPrintId(std::ostream &out, const bdId *a);
void bdStdPrintNodeId(std::ostream &out, const bdNodeId *a);
std::string bdStdConvertToPrintable(std::string input);
uint32_t bdStdLikelySameNode(const bdId*, const bdId*);
//uint32_t bdStdSimilarNode(const bdId*, const bdId*);
class bdStdDht: public bdDhtFunctions
@ -74,7 +76,8 @@ virtual int bdDistance(const bdNodeId *n1, const bdNodeId *n2, bdMetric *metric)
virtual int bdBucketDistance(const bdNodeId *n1, const bdNodeId *n2);
virtual int bdBucketDistance(const bdMetric *metric);
virtual uint32_t bdLikelySameNode(const bdId *id1, const bdId *id2);
virtual bool bdSimilarId(const bdId *id1, const bdId *id2);
virtual bool bdUpdateSimilarId(bdId *dest, const bdId *src); /* returns true if update was necessary */
virtual void bdRandomMidId(const bdNodeId *target, const bdNodeId *other, bdNodeId *mid);

View File

@ -101,6 +101,7 @@ int bdStore::reloadFromStore()
}
// This is a very ugly function!
int bdStore::getPeer(bdPeer *peer)
{
#ifdef DEBUG_STORE
@ -119,6 +120,34 @@ int bdStore::getPeer(bdPeer *peer)
return 0;
}
int bdStore::filterIpList(const std::list<struct sockaddr_in> &filteredIPs)
{
// Nasty O(n^2) iteration over 500 entries!!!.
// hope its not used to often.
std::list<struct sockaddr_in>::const_iterator it;
for(it = filteredIPs.begin(); it != filteredIPs.end(); it++)
{
std::list<bdPeer>::iterator sit;
for(sit = store.begin(); sit != store.end();)
{
if (it->sin_addr.s_addr == sit->mPeerId.addr.sin_addr.s_addr)
{
std::cerr << "bdStore::filterIpList() Found Bad entry in Store. Erasing!";
std::cerr << std::endl;
sit = store.erase(sit);
}
else
{
sit++;
}
}
}
}
#define MAX_ENTRIES 500
/* maintain a sorted list */
@ -219,3 +248,5 @@ void bdStore::writeStore()
return writeStore(mStoreFile);
}

View File

@ -38,6 +38,7 @@ class bdStore
bdStore(std::string file, bdDhtFunctions *fns);
int reloadFromStore(); /* for restarts */
int filterIpList(const std::list<struct sockaddr_in> &filteredIPs);
int clear();
int getPeer(bdPeer *peer);

View File

@ -100,9 +100,14 @@ HEADERS += \
bitdht/bdhistory.h \
util/bdnet.h \
util/bdthreads.h \
util/bdrandom.h \
udp/udplayer.h \
udp/udpstack.h \
udp/udpbitdht.h \
bitdht/bdconnection.h \
bitdht/bdfilter.h \
bitdht/bdaccount.h \
bitdht/bdquerymgr.h \
SOURCES += \
bitdht/bencode.c \
@ -116,10 +121,15 @@ SOURCES += \
bitdht/bdmanager.cc \
bitdht/bdstddht.cc \
bitdht/bdhistory.cc \
util/bdnet.cc \
util/bdnet.cc \
util/bdthreads.cc \
util/bdrandom.cc \
udp/udplayer.cc \
udp/udpstack.cc \
udp/udpbitdht.cc \
bitdht/bdconnection.cc \
bitdht/bdfilter.cc \
bitdht/bdaccount.cc \
bitdht/bdquerymgr.cc \

View File

@ -27,6 +27,7 @@
#include "bitdht/bdmanager.h"
#include "bitdht/bdstddht.h"
#include "udp/udplayer.h"
#include "util/bdrandom.h"
#include <stdlib.h>
@ -85,7 +86,7 @@ int main(int argc, char **argv)
{
for(j = 0; j < 2; j++)
{
int peeridx = rand() % n_nodes;
int peeridx = bdRandom::random_u32() % n_nodes;
for(i = 0, it = nodes.begin();
(i < peeridx) && (it != nodes.end()); i++, it++)
{
@ -93,7 +94,7 @@ int main(int argc, char **argv)
}
if (it != nodes.end())
{
nit->second->addPotentialPeer((bdId *) &(it->first));
nit->second->addPotentialPeer((bdId *) &(it->first), NULL);
}
}
}

View File

@ -26,6 +26,7 @@
#include "bitdht/bdnode.h"
#include "bitdht/bdstddht.h"
#include "util/bdrandom.h"
#include <stdlib.h>
@ -87,10 +88,10 @@ int main(int argc, char **argv)
for(j = 0; j < 5; j++)
{
int peeridx = rand() % n_nodes;
int peeridx = bdRand::random_u32() % n_nodes;
bdId pid = portIdx[peeridx];
node->addPotentialPeer(&pid);
node->addPotentialPeer(&pid, NULL);
}
}

View File

@ -63,7 +63,7 @@ int main(int argc, char **argv)
std::multimap<bdMetric, bdId> nearest;
std::multimap<bdMetric, bdId>::iterator it;
space.find_nearest_nodes(&(queryId.id), N_PEERS_TO_START, startList, nearest);
space.find_nearest_nodes(&(queryId.id), N_PEERS_TO_START, nearest);
for(it = nearest.begin(); it != nearest.end(); it++)
{

View File

@ -56,10 +56,9 @@ int main(int argc, char **argv)
{
bdId tmpId;
bdStdRandomId(&tmpId);
std::list<bdId> list1;
std::multimap<bdMetric, bdId> list2;
space.find_nearest_nodes(&(tmpId.id), N_PEERS_TO_FIND, list1, list2);
space.find_nearest_nodes(&(tmpId.id), N_PEERS_TO_FIND, list2);
}
return 1;

View File

@ -72,8 +72,6 @@ int main(int argc, char **argv)
bool doThreadJoin = false;
int noQueries = 0;
srand(time(NULL));
while((c = getopt(argc, argv,"rjp:b:u:q:")) != -1)
{
switch (c)

View File

@ -48,6 +48,9 @@
//#define DEBUG_UDP_BITDHT 1
#define BITDHT_VERSION_IDENTIFER 1
//#define BITDHT_VERSION "01" // Original RS 0.5.0/0.5.1 version.
#define BITDHT_VERSION "02" // Connections + Full DHT implementation.
/*************************************/
UdpBitDht::UdpBitDht(UdpPublisher *pub, bdNodeId *id, std::string appVersion, std::string bootstrapfile, bdDhtFunctions *fns)
@ -57,6 +60,7 @@ UdpBitDht::UdpBitDht(UdpPublisher *pub, bdNodeId *id, std::string appVersion, st
#ifdef BITDHT_VERSION_IDENTIFER
usedVersion = "BD";
usedVersion += BITDHT_VERSION;
#endif
usedVersion += appVersion;
@ -118,6 +122,30 @@ void UdpBitDht::removeCallback(BitDhtCallback *cb)
mBitDhtManager->removeCallback(cb);
}
void UdpBitDht::ConnectionRequest(struct sockaddr_in *laddr, bdNodeId *target, uint32_t mode, uint32_t start)
{
bdStackMutex stack(dhtMtx); /********** MUTEX LOCKED *************/
mBitDhtManager->ConnectionRequest(laddr, target, mode, start);
}
void UdpBitDht::ConnectionAuth(bdId *srcId, bdId *proxyId, bdId *destId, uint32_t mode, uint32_t loc, uint32_t answer)
{
bdStackMutex stack(dhtMtx); /********** MUTEX LOCKED *************/
mBitDhtManager->ConnectionAuth(srcId, proxyId, destId, mode, loc, answer);
}
void UdpBitDht::ConnectionOptions(uint32_t allowedModes, uint32_t flags)
{
bdStackMutex stack(dhtMtx); /********** MUTEX LOCKED *************/
mBitDhtManager->ConnectionOptions(allowedModes, flags);
}
int UdpBitDht::getDhtPeerAddress(const bdNodeId *id, struct sockaddr_in &from)
{
bdStackMutex stack(dhtMtx); /********** MUTEX LOCKED *************/
@ -132,6 +160,30 @@ int UdpBitDht::getDhtValue(const bdNodeId *id, std::string key, std::string &va
return mBitDhtManager->getDhtValue(id, key, value);
}
int UdpBitDht::getDhtBucket(const int idx, bdBucket &bucket)
{
bdStackMutex stack(dhtMtx); /********** MUTEX LOCKED *************/
return mBitDhtManager->getDhtBucket(idx, bucket);
}
int UdpBitDht::getDhtQueries(std::map<bdNodeId, bdQueryStatus> &queries)
{
bdStackMutex stack(dhtMtx); /********** MUTEX LOCKED *************/
return mBitDhtManager->getDhtQueries(queries);
}
int UdpBitDht::getDhtQueryStatus(const bdNodeId *id, bdQuerySummary &query)
{
bdStackMutex stack(dhtMtx); /********** MUTEX LOCKED *************/
return mBitDhtManager->getDhtQueryStatus(id, query);
}
/* stats and Dht state */
int UdpBitDht:: startDht()

View File

@ -67,9 +67,18 @@ virtual void findDhtValue(bdNodeId *id, std::string key, uint32_t mode);
virtual void addCallback(BitDhtCallback *cb);
virtual void removeCallback(BitDhtCallback *cb);
/***** Connections Requests *****/
virtual void ConnectionRequest(struct sockaddr_in *laddr, bdNodeId *target, uint32_t mode, uint32_t start);
virtual void ConnectionAuth(bdId *srcId, bdId *proxyId, bdId *destId, uint32_t mode, uint32_t loc, uint32_t answer);
virtual void ConnectionOptions(uint32_t allowedModes, uint32_t flags);
/***** Get Results Details *****/
virtual int getDhtPeerAddress(const bdNodeId *id, struct sockaddr_in &from);
virtual int getDhtValue(const bdNodeId *id, std::string key, std::string &value);
virtual int getDhtBucket(const int idx, bdBucket &bucket);
virtual int getDhtQueries(std::map<bdNodeId, bdQueryStatus> &queries);
virtual int getDhtQueryStatus(const bdNodeId *id, bdQuerySummary &query);
/* stats and Dht state */
virtual int startDht();

View File

@ -24,6 +24,7 @@
*/
#include "udp/udplayer.h"
#include "util/bdrandom.h"
#include <iostream>
#include <sstream>
@ -310,7 +311,7 @@ void UdpLayer::recv_loop()
}
int UdpLayer::sendPkt(const void *data, int size, sockaddr_in &to, int ttl)
int UdpLayer::sendPkt(const void *data, int size, const sockaddr_in &to, int ttl)
{
/* if ttl is different -> set it */
if (ttl != getTTL())
@ -492,7 +493,7 @@ int UdpLayer::receiveUdpPacket(void *data, int *size, struct sockaddr_in &from)
return -1;
}
int UdpLayer::sendUdpPacket(const void *data, int size, struct sockaddr_in &to)
int UdpLayer::sendUdpPacket(const void *data, int size, const struct sockaddr_in &to)
{
/* send out */
#ifdef DEBUG_UDP_LAYER
@ -525,28 +526,26 @@ LossyUdpLayer::~LossyUdpLayer() { return; }
int LossyUdpLayer::receiveUdpPacket(void *data, int *size, struct sockaddr_in &from)
{
double prob = (1.0 * (rand() / (RAND_MAX + 1.0)));
if (prob < lossFraction)
if (0 < UdpLayer::receiveUdpPacket(data, size, from))
{
/* but discard */
if (0 < UdpLayer::receiveUdpPacket(data, size, from))
float prob = bdRandom::random_f32();
if (prob < lossFraction)
{
/* discard */
std::cerr << "LossyUdpLayer::receiveUdpPacket() Dropping packet!";
std::cerr << std::endl;
std::cerr << printPkt(data, *size);
std::cerr << std::endl;
std::cerr << "LossyUdpLayer::receiveUdpPacket() Packet Dropped!";
std::cerr << std::endl;
size = 0;
return -1;
}
size = 0;
return -1;
return *size;
}
// otherwise read normally;
return UdpLayer::receiveUdpPacket(data, size, from);
return -1;
}
int LossyUdpLayer::sendUdpPacket(const void *data, int size, struct sockaddr_in &to)
@ -571,3 +570,99 @@ int LossyUdpLayer::sendUdpPacket(const void *data, int size, struct sockaddr_in
return UdpLayer::sendUdpPacket(data, size, to);
}
/**************************** LossyUdpLayer - for Testing **************/
PortRange::PortRange() :lport(0), uport(0) { return; }
PortRange::PortRange(uint16_t lp, uint16_t up) :lport(lp), uport(up) { return; }
bool PortRange::inRange(uint16_t port)
{
if (port < lport)
{
return false;
}
if (port > uport)
{
return false;
}
return true;
}
RestrictedUdpLayer::RestrictedUdpLayer(UdpReceiver *udpr,
struct sockaddr_in &local)
:UdpLayer(udpr, local)
{
return;
}
RestrictedUdpLayer::~RestrictedUdpLayer() { return; }
void RestrictedUdpLayer::addRestrictedPortRange(int lp, int up)
{
PortRange pr(lp, up);
mLostPorts.push_back(pr);
}
int RestrictedUdpLayer::receiveUdpPacket(void *data, int *size, struct sockaddr_in &from)
{
if (0 < UdpLayer::receiveUdpPacket(data, size, from))
{
/* check the port against list */
uint16_t inPort = ntohs(from.sin_port);
std::list<PortRange>::iterator it;
for(it = mLostPorts.begin(); it != mLostPorts.end(); it++)
{
if (it->inRange(inPort))
{
#ifdef DEBUG_UDP_LAYER
std::cerr << "RestrictedUdpLayer::receiveUdpPacket() Dropping packet";
std::cerr << ", Port(" << inPort << ") in restricted range!";
std::cerr << std::endl;
//std::cerr << printPkt(data, *size);
//std::cerr << std::endl;
#endif
size = 0;
return -1;
}
}
/* acceptable port */
return *size;
}
return -1;
}
int RestrictedUdpLayer::sendUdpPacket(const void *data, int size, struct sockaddr_in &to)
{
/* check the port against list */
uint16_t outPort = ntohs(to.sin_port);
std::list<PortRange>::iterator it;
for(it = mLostPorts.begin(); it != mLostPorts.end(); it++)
{
if (it->inRange(outPort))
{
/* drop */
#ifdef DEBUG_UDP_LAYER
std::cerr << "RestrictedUdpLayer::sendUdpPacket() Dropping packet";
std::cerr << ", Port(" << outPort << ") in restricted range!";
std::cerr << std::endl;
//std::cerr << printPkt(data, *size);
//std::cerr << std::endl;
#endif
return size;
}
}
// otherwise read normally;
return UdpLayer::sendUdpPacket(data, size, to);
}

View File

@ -60,7 +60,7 @@ class UdpPublisher
{
public:
virtual ~UdpPublisher() {}
virtual int sendPkt(const void *data, int size, struct sockaddr_in &to, int ttl) = 0;
virtual int sendPkt(const void *data, int size, const struct sockaddr_in &to, int ttl) = 0;
};
@ -86,7 +86,7 @@ void recv_loop(); /* uses callback to UdpReceiver */
/* Higher Level Interface */
//int readPkt(void *data, int *size, struct sockaddr_in &from);
int sendPkt(const void *data, int size, struct sockaddr_in &to, int ttl);
int sendPkt(const void *data, int size, const struct sockaddr_in &to, int ttl);
/* monitoring / updates */
int okay();
@ -98,7 +98,7 @@ void recv_loop(); /* uses callback to UdpReceiver */
protected:
virtual int receiveUdpPacket(void *data, int *size, struct sockaddr_in &from);
virtual int sendUdpPacket(const void *data, int size, struct sockaddr_in &to);
virtual int sendUdpPacket(const void *data, int size, const struct sockaddr_in &to);
int setTTL(int t);
int getTTL();
@ -134,5 +134,35 @@ virtual int sendUdpPacket(const void *data, int size, struct sockaddr_in &to);
double lossFraction;
};
class PortRange
{
public:
PortRange();
PortRange(uint16_t lp, uint16_t up);
bool inRange(uint16_t port);
uint16_t lport;
uint16_t uport;
};
/* For Testing - drops packets */
class RestrictedUdpLayer: public UdpLayer
{
public:
RestrictedUdpLayer(UdpReceiver *udpr, struct sockaddr_in &local);
virtual ~RestrictedUdpLayer();
void addRestrictedPortRange(int lp, int up);
protected:
virtual int receiveUdpPacket(void *data, int *size, struct sockaddr_in &from);
virtual int sendUdpPacket(const void *data, int size, struct sockaddr_in &to);
std::list<PortRange> mLostPorts;
};
#endif

View File

@ -47,6 +47,34 @@ UdpStack::UdpStack(struct sockaddr_in &local)
return;
}
UdpStack::UdpStack(int testmode, struct sockaddr_in &local)
:udpLayer(NULL), laddr(local)
{
std::cerr << "UdpStack::UdpStack() Evoked in TestMode" << std::endl;
if (testmode == UDP_TEST_LOSSY_LAYER)
{
std::cerr << "UdpStack::UdpStack() Installing LossyUdpLayer" << std::endl;
udpLayer = new LossyUdpLayer(this, laddr, UDP_TEST_LOSSY_FRAC);
}
else if (testmode == UDP_TEST_RESTRICTED_LAYER)
{
std::cerr << "UdpStack::UdpStack() Installing RestrictedUdpLayer" << std::endl;
udpLayer = new RestrictedUdpLayer(this, laddr);
}
else
{
std::cerr << "UdpStack::UdpStack() Installing Standard UdpLayer" << std::endl;
// standard layer
openSocket();
}
return;
}
UdpLayer *UdpStack::getUdpLayer() /* for testing only */
{
return udpLayer;
}
bool UdpStack::resetAddress(struct sockaddr_in &local)
{
std::cerr << "UdpStack::resetAddress(" << local << ")";
@ -84,7 +112,7 @@ int UdpStack::recvPkt(void *data, int size, struct sockaddr_in &from)
return 1;
}
int UdpStack::sendPkt(const void *data, int size, struct sockaddr_in &to, int ttl)
int UdpStack::sendPkt(const void *data, int size, const struct sockaddr_in &to, int ttl)
{
/* print packet information */
#ifdef DEBUG_UDP_RECV
@ -198,7 +226,7 @@ UdpSubReceiver::UdpSubReceiver(UdpPublisher *pub)
return;
}
int UdpSubReceiver::sendPkt(const void *data, int size, struct sockaddr_in &to, int ttl)
int UdpSubReceiver::sendPkt(const void *data, int size, const struct sockaddr_in &to, int ttl)
{
/* print packet information */
#ifdef DEBUG_UDP_RECV

View File

@ -55,7 +55,7 @@ class UdpSubReceiver: public UdpReceiver
UdpSubReceiver(UdpPublisher *pub);
/* calls mPublisher->sendPkt */
virtual int sendPkt(const void *data, int size, struct sockaddr_in &to, int ttl);
virtual int sendPkt(const void *data, int size, const struct sockaddr_in &to, int ttl);
/* callback for recved data (overloaded from UdpReceiver) */
//virtual int recvPkt(void *data, int size, struct sockaddr_in &from) = 0;
@ -63,13 +63,21 @@ virtual int sendPkt(const void *data, int size, struct sockaddr_in &to, int ttl)
};
#define UDP_TEST_LOSSY_LAYER 1
#define UDP_TEST_RESTRICTED_LAYER 2
#define UDP_TEST_LOSSY_FRAC (0.10)
class UdpStack: public UdpReceiver, public UdpPublisher
{
public:
UdpStack(struct sockaddr_in &local);
UdpStack(int testmode, struct sockaddr_in &local);
virtual ~UdpStack() { return; }
UdpLayer *getUdpLayer(); /* for testing only */
bool resetAddress(struct sockaddr_in &local);
@ -79,7 +87,7 @@ int removeReceiver(UdpReceiver *recv);
/* Packet IO */
/* pass-through send packets */
virtual int sendPkt(const void *data, int size, struct sockaddr_in &to, int ttl);
virtual int sendPkt(const void *data, int size, const struct sockaddr_in &to, int ttl);
/* callback for recved data (overloaded from UdpReceiver) */
virtual int recvPkt(void *data, int size, struct sockaddr_in &from);

View File

@ -256,7 +256,11 @@ int bdnet_w2u_errno(int err)
break;
*
***/
case WSANOTINITIALISED:
std::cerr << "tou_net_w2u_errno(" << err << ") WSANOTINITIALISED. Fix Your Code!";
std::cerr << std::endl;
break;
default:
std::cerr << "tou_net_w2u_errno(" << err << ") Unknown";
std::cerr << std::endl;
@ -346,3 +350,10 @@ ssize_t bdnet_sendto(int s, const void *buf, size_t len, int flags,
#endif
/********************************** WINDOWS/UNIX SPECIFIC PART ******************/
void bdsockaddr_clear(struct sockaddr_in *addr)
{
memset(addr, 0, sizeof(*addr));
}

View File

@ -103,7 +103,7 @@ int bdnet_inet_aton(const char *name, struct in_addr *addr);
/* check if we can modify the TTL on a UDP packet */
int bdnet_checkTTL(int fd);
void bdsockaddr_clear(struct sockaddr_in *addr);
/* Extra stuff to declare for windows error handling (mimics unix errno)
*/

View File

@ -0,0 +1,93 @@
#include <stdlib.h>
#include <string>
#include <unistd.h>
#include "util/bdrandom.h"
uint32_t bdRandom::index = 0 ;
std::vector<uint32_t> bdRandom::MT(bdRandom::N,0u) ;
bdMutex bdRandom::rndMtx ;
#if defined(_WIN32) || defined(__MINGW32__)
static bool auto_seed = bdRandom::seed( (time(NULL) + ((uint32_t) pthread_self().p)*0x1293fe)^0x18e34a12 ) ;
#else
#ifdef __APPLE__
static bool auto_seed = bdRandom::seed( (time(NULL) + pthread_mach_thread_np(pthread_self())*0x1293fe + (getpid()^0x113ef76b))^0x18e34a12 ) ;
#else
static bool auto_seed = bdRandom::seed( (time(NULL) + pthread_self()*0x1293fe + (getpid()^0x113ef76b))^0x18e34a12 ) ;
#endif
#endif
bool bdRandom::seed(uint32_t s)
{
bdStackMutex mtx(rndMtx) ;
MT.resize(N,0) ; // because MT might not be already resized
uint32_t j ;
MT[0]= s & 0xffffffffUL;
for (j=1; j<N; j++)
MT[j] = (1812433253UL * (MT[j-1] ^ (MT[j-1] >> 30)) + j) & 0xffffffffUL ;
return true ;
}
void bdRandom::locked_next_state()
{
for(uint32_t i=0;i<N;++i)
{
uint32_t y = ((MT[i]) & UMASK) | ((MT[(i+1)%(int)N]) & LMASK) ;
MT[i] = MT[(i + M) % (int)N] ^ (y >> 1) ;
if((y & 1) == 1)
MT[i] = MT[i] ^ 0x9908b0df ;
}
index = 0 ;
}
uint32_t bdRandom::random_u32()
{
uint32_t y;
{
bdStackMutex mtx(rndMtx) ;
y = MT[index++] ;
if(index == N)
locked_next_state();
}
// Tempering
y ^= (y >> 11);
y ^= (y << 7 ) & 0x9d2c5680UL;
y ^= (y << 15) & 0xefc60000UL;
y ^= (y >> 18);
return y;
}
uint64_t bdRandom::random_u64()
{
return ((uint64_t)random_u32() << 32ul) + random_u32() ;
}
float bdRandom::random_f32()
{
return random_u32() / (float)(~(uint32_t)0) ;
}
double bdRandom::random_f64()
{
return random_u64() / (double)(~(uint64_t)0) ;
}
std::string bdRandom::random_alphaNumericString(uint32_t len)
{
std::string s = "" ;
for(uint32_t i=0;i<len;++i)
s += (char)( (random_u32()%94) + 33) ;
return s ;
}

View File

@ -0,0 +1,71 @@
#ifndef BITDHT_UTILS_BDRANDOM_H
#define BITDHT_UTILS_BDRANDOM_H
/****************************************************************
* libbitdht is distributed under the following license:
*
* Copyright (C) 2010 Cyril Soler <csoler@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
****************************************************************/
/* This Source Code is basically a direct copy of libretroshare's RsRandom.
* the function names have just been renamed. drbob
*/
// bdRandom contains a random number generator that is
// - thread safe
// - system independant
// - fast
// - cryptographically safe
//
// The implementation is adapted from the Mersenne Twister page of Wikipedia.
//
// http://en.wikipedia.org/wiki/Mersenne_twister
#include <vector>
#include "util/bdthreads.h"
class bdRandom
{
public:
static uint32_t random_u32() ;
static uint64_t random_u64() ;
static float random_f32() ;
static double random_f64() ;
static bool seed(uint32_t s) ;
static std::string random_alphaNumericString(uint32_t length) ;
private:
static bdMutex rndMtx ;
static const uint32_t N = 624;
static const uint32_t M = 397;
static const uint32_t MATRIX_A = 0x9908b0dfUL;
static const uint32_t UMASK = 0x80000000UL;
static const uint32_t LMASK = 0x7fffffffUL;
static void locked_next_state() ;
static uint32_t index ;
static std::vector<uint32_t> MT ;
};
#endif