mirror of
https://github.com/RetroShare/RetroShare.git
synced 2024-10-01 02:35:48 -04:00
Fixed up bugs in Recogn System - basics work now.
- added missing decrement to DataCount in rsmemcache::erase() - added missing note_event_locked() in rstickevent ... can now use previous tick info, - revamped recogn scheduling. on demand rather than periodic. new function recogn_schedule(). - deferred loading of pgpIdList for pgphashes, until we know it will be needed. git-svn-id: http://svn.code.sf.net/p/retroshare/code/branches/v0.5-gxs_finale@6893 b45a01b8-16f6-495d-af2f-9b41ad6348cc
This commit is contained in:
parent
cf8a63888b
commit
6871f9875a
@ -148,7 +148,6 @@ p3IdService::p3IdService(RsGeneralDataService *gds, RsNetworkExchangeService *ne
|
|||||||
|
|
||||||
// Kick off Cache Testing, + Others.
|
// Kick off Cache Testing, + Others.
|
||||||
RsTickEvent::schedule_in(GXSID_EVENT_PGPHASH, PGPHASH_PERIOD);
|
RsTickEvent::schedule_in(GXSID_EVENT_PGPHASH, PGPHASH_PERIOD);
|
||||||
RsTickEvent::schedule_in(GXSID_EVENT_RECOGN, RECOGN_PERIOD);
|
|
||||||
RsTickEvent::schedule_in(GXSID_EVENT_REPUTATION, REPUTATION_PERIOD);
|
RsTickEvent::schedule_in(GXSID_EVENT_REPUTATION, REPUTATION_PERIOD);
|
||||||
RsTickEvent::schedule_now(GXSID_EVENT_CACHEOWNIDS);
|
RsTickEvent::schedule_now(GXSID_EVENT_CACHEOWNIDS);
|
||||||
|
|
||||||
@ -1232,6 +1231,7 @@ bool p3IdService::cache_process_recogntaginfo(const RsGxsIdGroupItem *item, std:
|
|||||||
std::cerr << std::endl;
|
std::cerr << std::endl;
|
||||||
#endif // DEBUG_RECOGN
|
#endif // DEBUG_RECOGN
|
||||||
|
|
||||||
|
recogn_schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -2054,8 +2054,6 @@ bool p3IdService::pgphash_start()
|
|||||||
std::cerr << std::endl;
|
std::cerr << std::endl;
|
||||||
#endif // DEBUG_IDS
|
#endif // DEBUG_IDS
|
||||||
|
|
||||||
getPgpIdList();
|
|
||||||
|
|
||||||
// ACTUALLY only need summary - but have written code for data.
|
// ACTUALLY only need summary - but have written code for data.
|
||||||
// Also need to use opts.groupFlags to filter stuff properly to REALID's only.
|
// Also need to use opts.groupFlags to filter stuff properly to REALID's only.
|
||||||
// TODO
|
// TODO
|
||||||
@ -2089,7 +2087,7 @@ bool p3IdService::pgphash_handlerequest(uint32_t token)
|
|||||||
// We Will do this later!
|
// We Will do this later!
|
||||||
|
|
||||||
std::vector<RsGxsIdGroup> groups;
|
std::vector<RsGxsIdGroup> groups;
|
||||||
std::vector<RsGxsIdGroup> groupsToProcess;
|
bool groupsToProcess = false;
|
||||||
bool ok = getGroupData(token, groups);
|
bool ok = getGroupData(token, groups);
|
||||||
|
|
||||||
if(ok)
|
if(ok)
|
||||||
@ -2162,6 +2160,7 @@ bool p3IdService::pgphash_handlerequest(uint32_t token)
|
|||||||
|
|
||||||
RsStackMutex stack(mIdMtx); /********** STACK LOCKED MTX ******/
|
RsStackMutex stack(mIdMtx); /********** STACK LOCKED MTX ******/
|
||||||
mGroupsToProcess.push_back(*vit);
|
mGroupsToProcess.push_back(*vit);
|
||||||
|
groupsToProcess = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -2170,6 +2169,12 @@ bool p3IdService::pgphash_handlerequest(uint32_t token)
|
|||||||
std::cerr << std::endl;
|
std::cerr << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (groupsToProcess)
|
||||||
|
{
|
||||||
|
// update PgpIdList -> if there are groups to process.
|
||||||
|
getPgpIdList();
|
||||||
|
}
|
||||||
|
|
||||||
// Schedule Processing.
|
// Schedule Processing.
|
||||||
RsTickEvent::schedule_in(GXSID_EVENT_PGPHASH_PROC, PGPHASH_PROC_PERIOD);
|
RsTickEvent::schedule_in(GXSID_EVENT_PGPHASH_PROC, PGPHASH_PROC_PERIOD);
|
||||||
return true;
|
return true;
|
||||||
@ -2431,6 +2436,37 @@ void calcPGPHash(const RsGxsId &id, const PGPFingerprintType &pgp, GxsIdPgpHash
|
|||||||
* Info to be stored in GroupServiceString + Cache.
|
* Info to be stored in GroupServiceString + Cache.
|
||||||
**/
|
**/
|
||||||
|
|
||||||
|
bool p3IdService::recogn_schedule()
|
||||||
|
{
|
||||||
|
std::cerr << "p3IdService::recogn_schedule()";
|
||||||
|
std::cerr << std::endl;
|
||||||
|
|
||||||
|
int32_t age = 0;
|
||||||
|
int32_t next_event = 0;
|
||||||
|
|
||||||
|
if (RsTickEvent::event_count(GXSID_EVENT_RECOGN) > 0)
|
||||||
|
{
|
||||||
|
std::cerr << "p3IdService::recogn_schedule() Skipping GXSIS_EVENT_RECOGN already scheduled";
|
||||||
|
std::cerr << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (RsTickEvent::prev_event_ago(GXSID_EVENT_RECOGN, age))
|
||||||
|
{
|
||||||
|
std::cerr << "p3IdService::recogn_schedule() previous event " << age << " secs ago";
|
||||||
|
std::cerr << std::endl;
|
||||||
|
|
||||||
|
next_event = RECOGN_PERIOD - age;
|
||||||
|
if (next_event < 0)
|
||||||
|
{
|
||||||
|
next_event = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RsTickEvent::schedule_in(GXSID_EVENT_RECOGN, next_event);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool p3IdService::recogn_start()
|
bool p3IdService::recogn_start()
|
||||||
{
|
{
|
||||||
@ -2446,9 +2482,7 @@ bool p3IdService::recogn_start()
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// SCHEDULE NEXT ONE.
|
// NEXT EVENT is scheduled via recogn_schedule.
|
||||||
RsTickEvent::schedule_in(GXSID_EVENT_RECOGN, RECOGN_PERIOD);
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef DEBUG_RECOGN
|
#ifdef DEBUG_RECOGN
|
||||||
std::cerr << "p3IdService::recogn_start() making request";
|
std::cerr << "p3IdService::recogn_start() making request";
|
||||||
@ -2545,10 +2579,10 @@ bool p3IdService::recogn_process()
|
|||||||
bool isDone = false;
|
bool isDone = false;
|
||||||
{
|
{
|
||||||
RsStackMutex stack(mIdMtx); /********** STACK LOCKED MTX ******/
|
RsStackMutex stack(mIdMtx); /********** STACK LOCKED MTX ******/
|
||||||
if (!mRecognGroupsToProcess.empty() && !mGroupsToProcess.empty())
|
if (!mRecognGroupsToProcess.empty())
|
||||||
{
|
{
|
||||||
item = mRecognGroupsToProcess.front();
|
item = mRecognGroupsToProcess.front();
|
||||||
mGroupsToProcess.pop_front();
|
mRecognGroupsToProcess.pop_front();
|
||||||
|
|
||||||
#ifdef DEBUG_RECOGN
|
#ifdef DEBUG_RECOGN
|
||||||
std::cerr << "p3IdService::recogn_process() Popped Group: " << item->meta.mGroupId;
|
std::cerr << "p3IdService::recogn_process() Popped Group: " << item->meta.mGroupId;
|
||||||
|
@ -356,6 +356,7 @@ virtual void handle_event(uint32_t event_type, const std::string &elabel);
|
|||||||
* recogn processing.
|
* recogn processing.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
bool recogn_schedule();
|
||||||
bool recogn_start();
|
bool recogn_start();
|
||||||
bool recogn_handlerequest(uint32_t token);
|
bool recogn_handlerequest(uint32_t token);
|
||||||
bool recogn_process();
|
bool recogn_process();
|
||||||
|
@ -204,6 +204,7 @@ template<class Key, class Value> bool RsMemCache<Key, Value>::erase(const Key &k
|
|||||||
// remove from lru.
|
// remove from lru.
|
||||||
mDataMap.erase(it);
|
mDataMap.erase(it);
|
||||||
update_lrumap(key, old_ts, new_ts);
|
update_lrumap(key, old_ts, new_ts);
|
||||||
|
mDataCount--;
|
||||||
|
|
||||||
mStats_access++;
|
mStats_access++;
|
||||||
return true;
|
return true;
|
||||||
@ -362,6 +363,12 @@ template<class Key, class Value> bool RsMemCache<Key, Value>::resize()
|
|||||||
// ERROR.
|
// ERROR.
|
||||||
std::cerr << "RsMemCache::resize() CONSISTENCY ERROR";
|
std::cerr << "RsMemCache::resize() CONSISTENCY ERROR";
|
||||||
std::cerr << std::endl;
|
std::cerr << std::endl;
|
||||||
|
std::cerr << "\tmDataMap.size() = " << mDataMap.size();
|
||||||
|
std::cerr << std::endl;
|
||||||
|
std::cerr << "\tmLruMap.size() = " << mLruMap.size();
|
||||||
|
std::cerr << std::endl;
|
||||||
|
std::cerr << "\tmDataCount = " << mDataCount;
|
||||||
|
std::cerr << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mDataCount > mMaxSize)
|
if (mDataCount > mMaxSize)
|
||||||
|
@ -80,6 +80,7 @@ void RsTickEvent::tick_events()
|
|||||||
mEvents.erase(it);
|
mEvents.erase(it);
|
||||||
|
|
||||||
count_adjust_locked(event_type, -1);
|
count_adjust_locked(event_type, -1);
|
||||||
|
note_event_locked(event_type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user