mirror of
https://github.com/RetroShare/RetroShare.git
synced 2025-08-07 05:42:19 -04:00
cleaned up p3Distrib.cc
and removed request getallmsglist from getmessagecount to prevent cache loading subscribed/own forums git-svn-id: http://svn.code.sf.net/p/retroshare/code/branches/v0.5-cacheopt@4386 b45a01b8-16f6-495d-af2f-9b41ad6348cc
This commit is contained in:
parent
2eae48b23e
commit
f7e3a3f1b3
2 changed files with 26 additions and 48 deletions
|
@ -316,7 +316,7 @@ void p3GroupDistrib::updateCacheDocument()
|
|||
std::vector<grpNodePair> grpNodes;
|
||||
std::string failedCacheId = FAILED_CACHE_CONT;
|
||||
|
||||
// failed cache content node is has not been created add to doc
|
||||
// failed cache content node has not been created, so add to doc
|
||||
if(mCacheTable.find(failedCacheId) == mCacheTable.end()){
|
||||
|
||||
mCacheDoc.append_child("group");
|
||||
|
@ -384,7 +384,7 @@ void p3GroupDistrib::updateCacheDocument()
|
|||
|
||||
// add groups to cache table
|
||||
locked_updateCacheTableGrp(grpNodes, false);
|
||||
//grpNodeIter.clear();
|
||||
|
||||
|
||||
std::map<std::string, std::set<pCacheId> > msgCacheMap;
|
||||
pugi::xml_node nodeIter;
|
||||
|
@ -402,7 +402,6 @@ void p3GroupDistrib::updateCacheDocument()
|
|||
pCacheId pCid;
|
||||
|
||||
int count = 0;
|
||||
// int count2 = 0, count3 = 0;
|
||||
|
||||
for(; msgIt != mMsgHistPending.end(); msgIt++)
|
||||
{
|
||||
|
@ -415,17 +414,6 @@ void p3GroupDistrib::updateCacheDocument()
|
|||
pCid = pCacheId(msgIt->second.first,
|
||||
msgIt->second.second);
|
||||
|
||||
// ensure you don't add cache ids twice to same group
|
||||
// // by checking cache table and current msg additions
|
||||
// if(nodeCache_iter->second.cIdSet.find(pCid) !=
|
||||
// nodeCache_iter->second.cIdSet.end())
|
||||
// count2++;
|
||||
//
|
||||
// if(msgCacheMap[msgIt->first].find(pCid) != msgCacheMap[msgIt->first].end())
|
||||
// count3++;
|
||||
|
||||
|
||||
|
||||
nodeIter = nodeCache_iter->second.node;
|
||||
messages_node = nodeIter.child("messages");
|
||||
|
||||
|
@ -446,7 +434,6 @@ void p3GroupDistrib::updateCacheDocument()
|
|||
|
||||
// add msg to grp set
|
||||
msgCacheMap[msgIt->first].insert(pCid);
|
||||
count++;
|
||||
|
||||
}
|
||||
else{
|
||||
|
@ -462,8 +449,6 @@ void p3GroupDistrib::updateCacheDocument()
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// now update cache table by tagging msg cache ids to their
|
||||
// respective groups
|
||||
locked_updateCacheTableMsg(msgCacheMap);
|
||||
|
@ -1127,6 +1112,8 @@ void p3GroupDistrib::loadFileMsgs(const std::string &filename, uint16_t cacheSub
|
|||
// link grp to cache id (only one cache id, so doesn't matter if one grp comes out twice
|
||||
// with same cache id)
|
||||
std::map<std::string, pCacheId> msgCacheMap;
|
||||
|
||||
// if message loaded before check failed cache
|
||||
pCacheId failedCache = pCacheId(src, cacheSubId);
|
||||
/* create the serialiser to load msgs */
|
||||
BinInterface *bio = new BinFileInterface(filename.c_str(), BIN_FLAGS_READABLE);
|
||||
|
|
|
@ -76,7 +76,7 @@ RsForums *rsForums = NULL;
|
|||
* remember 2^16 = 64K max units in store period.
|
||||
* PUBPERIOD * 2^16 = max STORE PERIOD */
|
||||
#define FORUM_STOREPERIOD (365*24*3600) /* 365 * 24 * 3600 - secs in a year */
|
||||
#define FORUM_PUBPERIOD 600 /* 10 minutes ... (max = 455 days) */
|
||||
#define FORUM_PUBPERIOD 10 /* 10 minutes ... (max = 455 days) */
|
||||
|
||||
p3Forums::p3Forums(uint16_t type, CacheStrapper *cs, CacheTransfer *cft,
|
||||
std::string srcdir, std::string storedir, std::string forumDir)
|
||||
|
@ -549,42 +549,33 @@ bool p3Forums::getMessageCount(const std::string &fId, unsigned int &newCount, u
|
|||
} /******* UNLOCKED ********/
|
||||
|
||||
if (grpFlags & (RS_DISTRIB_ADMIN | RS_DISTRIB_SUBSCRIBED)) {
|
||||
std::list<std::string> msgIds;
|
||||
if (getAllMsgList(fId, msgIds)) {
|
||||
|
||||
RsStackMutex stack(distribMtx); /***** STACK LOCKED MUTEX *****/
|
||||
RsStackMutex stack(distribMtx); /***** STACK LOCKED MUTEX *****/
|
||||
|
||||
std::map<std::string, RsForumReadStatus*>::iterator fit = mReadStatus.find(fId);
|
||||
if (fit == mReadStatus.end()) {
|
||||
// no status available -> all messages are new
|
||||
newCount += msgIds.size();
|
||||
unreadCount += msgIds.size();
|
||||
continue;
|
||||
}
|
||||
std::map<std::string, RsForumReadStatus*>::iterator fit = mReadStatus.find(fId);
|
||||
if (fit == mReadStatus.end()) {
|
||||
// not status available
|
||||
continue;
|
||||
}
|
||||
|
||||
std::list<std::string>::iterator mit;
|
||||
for (mit = msgIds.begin(); mit != msgIds.end(); mit++) {
|
||||
std::map<std::string, uint32_t >::iterator rit = fit->second->msgReadStatus.find(*mit);
|
||||
|
||||
if (rit == fit->second->msgReadStatus.end()) {
|
||||
// no status available -> message is new
|
||||
newCount++;
|
||||
unreadCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (rit->second & FORUM_MSG_STATUS_READ) {
|
||||
// message is not new
|
||||
if (rit->second & FORUM_MSG_STATUS_UNREAD_BY_USER) {
|
||||
// message is unread
|
||||
unreadCount++;
|
||||
}
|
||||
} else {
|
||||
newCount++;
|
||||
// iterator through read status map to determine number of new and old
|
||||
std::map<std::string, uint32_t >::iterator rit = fit->second->msgReadStatus.begin();
|
||||
for(; rit != fit->second->msgReadStatus.end(); rit++)
|
||||
{
|
||||
if(rit->second & FORUM_MSG_STATUS_READ)
|
||||
{
|
||||
if (rit->second & FORUM_MSG_STATUS_UNREAD_BY_USER) {
|
||||
// message is unread
|
||||
unreadCount++;
|
||||
}
|
||||
}
|
||||
} /******* UNLOCKED ********/
|
||||
else
|
||||
{
|
||||
newCount++;
|
||||
unreadCount++;
|
||||
}
|
||||
}
|
||||
/******* UNLOCKED ********/
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue