mirror of
https://github.com/RetroShare/RetroShare.git
synced 2024-12-26 07:59:35 -05:00
added comments in the file lists code. Removed unused functions. Added a few missing mutexes.
This commit is contained in:
parent
7f99bc2b70
commit
0cc4ebd89c
@ -142,14 +142,18 @@ bool InternalFileHierarchyStorage::updateSubDirectoryList(const DirectoryStorage
|
||||
for(uint32_t i=0;i<d.subdirs.size();)
|
||||
if(subdirs.find(static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name) == subdirs.end())
|
||||
{
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << "[directory storage] Removing subdirectory " << static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name << " with index " << d.subdirs[i] << std::endl;
|
||||
#endif
|
||||
|
||||
if( !removeDirectory(d.subdirs[i]))
|
||||
i++ ;
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << "[directory storage] Keeping existing subdirectory " << static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name << " with index " << d.subdirs[i] << std::endl;
|
||||
#endif
|
||||
|
||||
should_create.erase(static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name) ;
|
||||
++i;
|
||||
@ -157,7 +161,9 @@ bool InternalFileHierarchyStorage::updateSubDirectoryList(const DirectoryStorage
|
||||
|
||||
for(std::map<std::string,time_t>::const_iterator it(should_create.begin());it!=should_create.end();++it)
|
||||
{
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << "[directory storage] adding new subdirectory " << it->first << " at index " << mNodes.size() << std::endl;
|
||||
#endif
|
||||
|
||||
DirEntry *de = new DirEntry(it->first) ;
|
||||
|
||||
@ -258,7 +264,9 @@ bool InternalFileHierarchyStorage::updateSubFilesList(const DirectoryStorage::En
|
||||
|
||||
if(it == subfiles.end()) // file does not exist anymore => delete
|
||||
{
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << "[directory storage] removing non existing file " << f.file_name << " at index " << d.subfiles[i] << std::endl;
|
||||
#endif
|
||||
|
||||
delete mNodes[d.subfiles[i]] ;
|
||||
mNodes[d.subfiles[i]] = NULL ;
|
||||
@ -281,7 +289,9 @@ bool InternalFileHierarchyStorage::updateSubFilesList(const DirectoryStorage::En
|
||||
|
||||
for(std::map<std::string,DirectoryStorage::FileTS>::const_iterator it(new_files.begin());it!=new_files.end();++it)
|
||||
{
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << "[directory storage] adding new file " << it->first << " at index " << mNodes.size() << std::endl;
|
||||
#endif
|
||||
|
||||
d.subfiles.push_back(mNodes.size()) ;
|
||||
mNodes.push_back(new FileEntry(it->first,it->second.size,it->second.modtime));
|
||||
@ -297,8 +307,9 @@ bool InternalFileHierarchyStorage::updateHash(const DirectoryStorage::EntryIndex
|
||||
std::cerr << "[directory storage] (EE) cannot update file at index " << file_index << ". Not a valid index, or not a file." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << "[directory storage] updating hash at index " << file_index << ", hash=" << hash << std::endl;
|
||||
#endif
|
||||
|
||||
RsFileHash& old_hash (static_cast<FileEntry*>(mNodes[file_index])->file_hash) ;
|
||||
mFileHashes[hash] = file_index ;
|
||||
@ -317,7 +328,9 @@ bool InternalFileHierarchyStorage::updateFile(const DirectoryStorage::EntryIndex
|
||||
|
||||
FileEntry& fe(*static_cast<FileEntry*>(mNodes[file_index])) ;
|
||||
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << "[directory storage] updating file entry at index " << file_index << ", name=" << fe.file_name << " size=" << fe.file_size << ", hash=" << fe.file_hash << std::endl;
|
||||
#endif
|
||||
|
||||
fe.file_hash = hash;
|
||||
fe.file_size = size;
|
||||
@ -358,7 +371,9 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
}
|
||||
DirEntry& d(*static_cast<DirEntry*>(mNodes[indx])) ;
|
||||
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << "Updating dir entry: name=\"" << dir_name << "\", most_recent_time=" << most_recent_time << ", modtime=" << dir_modtime << std::endl;
|
||||
#endif
|
||||
|
||||
d.dir_most_recent_time = most_recent_time;
|
||||
d.dir_modtime = dir_modtime;
|
||||
@ -375,7 +390,9 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
// check that all subdirs already exist. If not, create.
|
||||
for(uint32_t i=0;i<subdirs_hash.size();++i)
|
||||
{
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << " subdir hash = " << subdirs_hash[i] << ": " ;
|
||||
#endif
|
||||
|
||||
std::map<RsFileHash,DirectoryStorage::EntryIndex>::iterator it = existing_subdirs.find(subdirs_hash[i]) ;
|
||||
DirectoryStorage::EntryIndex dir_index = 0;
|
||||
@ -384,7 +401,9 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
{
|
||||
dir_index = it->second ;
|
||||
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << " already exists, at index " << dir_index << std::endl;
|
||||
#endif
|
||||
|
||||
existing_subdirs.erase(it) ;
|
||||
}
|
||||
@ -401,7 +420,9 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
|
||||
mDirHashes[subdirs_hash[i]] = dir_index ;
|
||||
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << " created, at new index " << dir_index << std::endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
d.subdirs.push_back(dir_index) ;
|
||||
@ -411,7 +432,9 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
|
||||
for(std::map<RsFileHash,DirectoryStorage::EntryIndex>::const_iterator it = existing_subdirs.begin();it!=existing_subdirs.end();++it)
|
||||
{
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << " removing existing subfile that is not in the dirctory anymore: name=" << it->first << " index=" << it->second << std::endl;
|
||||
#endif
|
||||
|
||||
if(!checkIndex(it->second,FileStorageNode::TYPE_DIR))
|
||||
{
|
||||
@ -436,13 +459,17 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
const FileEntry& f(subfiles_array[i]) ;
|
||||
DirectoryStorage::EntryIndex file_index ;
|
||||
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << " subfile name = " << subfiles_array[i].file_name << ": " ;
|
||||
#endif
|
||||
|
||||
if(it != existing_subfiles.end() && mNodes[it->second] != NULL && mNodes[it->second]->type() == FileStorageNode::TYPE_FILE)
|
||||
{
|
||||
file_index = it->second ;
|
||||
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << " already exists, at index " << file_index << std::endl;
|
||||
#endif
|
||||
|
||||
if(!updateFile(file_index,f.file_hash,f.file_name,f.file_size,f.file_modtime))
|
||||
std::cerr << "(EE) Cannot update file with index " << it->second <<" and hash " << f.file_hash << ". This is very weird. Entry should have just been created and therefore should exist. Skipping." << std::endl;
|
||||
@ -456,7 +483,9 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
mNodes[file_index] = new FileEntry(f.file_name,f.file_size,f.file_modtime,f.file_hash) ;
|
||||
mFileHashes[f.file_hash] = file_index ;
|
||||
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << " created, at new index " << file_index << std::endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
d.subfiles.push_back(file_index) ;
|
||||
@ -465,7 +494,9 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
|
||||
for(std::map<std::string,DirectoryStorage::EntryIndex>::const_iterator it = existing_subfiles.begin();it!=existing_subfiles.end();++it)
|
||||
{
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << " removing existing subfile that is not in the dirctory anymore: name=" << it->first << " index=" << it->second << std::endl;
|
||||
#endif
|
||||
|
||||
if(!checkIndex(it->second,FileStorageNode::TYPE_FILE))
|
||||
{
|
||||
|
@ -173,21 +173,6 @@ int DirectoryStorage::searchHash(const RsFileHash& hash, std::list<EntryIndex> &
|
||||
return mFileHierarchy->searchHash(hash,results);
|
||||
}
|
||||
|
||||
// static const uint8_t DIRECTORY_STORAGE_TAG_FILE_HASH = 0x01 ;
|
||||
// static const uint8_t DIRECTORY_STORAGE_TAG_FILE_NAME = 0x02 ;
|
||||
// static const uint8_t DIRECTORY_STORAGE_TAG_FILE_SIZE = 0x03 ;
|
||||
// static const uint8_t DIRECTORY_STORAGE_TAG_DIR_NAME = 0x04 ;
|
||||
// static const uint8_t DIRECTORY_STORAGE_TAG_MODIF_TS = 0x05 ;
|
||||
// static const uint8_t DIRECTORY_STORAGE_TAG_RECURS_MODIF_TS = 0x06 ;
|
||||
|
||||
void DirectoryStorage::loadNextTag(const unsigned char *data,uint32_t& offset,uint8_t& entry_tag,uint32_t& entry_size)
|
||||
{
|
||||
entry_tag = data[offset++] ;
|
||||
}
|
||||
void DirectoryStorage::saveNextTag(unsigned char *data, uint32_t& offset, uint8_t entry_tag, uint32_t entry_size)
|
||||
{
|
||||
}
|
||||
|
||||
void DirectoryStorage::load(const std::string& local_file_name)
|
||||
{
|
||||
RS_STACK_MUTEX(mDirStorageMtx) ;
|
||||
|
@ -49,23 +49,28 @@ class DirectoryStorage
|
||||
|
||||
void save() const ;
|
||||
|
||||
// These functions are to be used by file transfer and file search.
|
||||
|
||||
virtual int searchTerms(const std::list<std::string>& terms, std::list<EntryIndex> &results) const ;
|
||||
virtual int searchBoolExp(RsRegularExpression::Expression * exp, std::list<EntryIndex> &results) const ;
|
||||
virtual int searchHash(const RsFileHash& hash, std::list<EntryIndex> &results) const ;
|
||||
|
||||
bool getDirectoryRecursModTime(EntryIndex index,time_t& recurs_max_modf_TS) const ;
|
||||
bool getDirectoryLocalModTime (EntryIndex index,time_t& motime_TS) const ;
|
||||
bool getDirectoryUpdateTime (EntryIndex index,time_t& update_TS) const ;
|
||||
// gets/sets the various time stamps:
|
||||
//
|
||||
bool getDirectoryRecursModTime(EntryIndex index,time_t& recurs_max_modf_TS) const ; // last modification time, computed recursively over all subfiles and directories
|
||||
bool getDirectoryLocalModTime (EntryIndex index,time_t& motime_TS) const ; // last modification time for that index only
|
||||
bool getDirectoryUpdateTime (EntryIndex index,time_t& update_TS) const ; // last time the entry was updated. This is only used on the RemoteDirectoryStorage side.
|
||||
|
||||
bool setDirectoryRecursModTime(EntryIndex index,time_t recurs_max_modf_TS) ;
|
||||
bool setDirectoryLocalModTime (EntryIndex index,time_t modtime_TS) ;
|
||||
bool setDirectoryUpdateTime (EntryIndex index,time_t update_TS) ;
|
||||
|
||||
uint32_t getEntryType(const EntryIndex& indx) ; // returns DIR_TYPE_*, not the internal directory storage stuff.
|
||||
uint32_t getEntryType(const EntryIndex& indx) ; // WARNING: returns DIR_TYPE_*, not the internal directory storage stuff.
|
||||
virtual bool extractData(const EntryIndex& indx,DirDetails& d);
|
||||
|
||||
// This class allows to abstractly browse the stored directory hierarchy in a depth-first manner.
|
||||
// It gives access to sub-files and sub-directories below.
|
||||
// It gives access to sub-files and sub-directories below. When using it, the client should make sure
|
||||
// that the DirectoryStorage is properly locked, since the iterator cannot lock it.
|
||||
//
|
||||
class DirIterator
|
||||
{
|
||||
@ -120,20 +125,28 @@ class DirectoryStorage
|
||||
time_t modtime;
|
||||
};
|
||||
|
||||
EntryIndex root() const ; // returns the index of the root directory entry.
|
||||
const RsPeerId& peerId() const { return mPeerId ; }
|
||||
int parentRow(EntryIndex e) const ;
|
||||
bool getChildIndex(EntryIndex e,int row,EntryIndex& c) const;
|
||||
EntryIndex root() const ; // returns the index of the root directory entry. This is generally 0.
|
||||
const RsPeerId& peerId() const { return mPeerId ; } // peer ID of who owns that file list.
|
||||
int parentRow(EntryIndex e) const ; // position of the current node, in the array of children at its parent node. Used by GUI for display.
|
||||
bool getChildIndex(EntryIndex e,int row,EntryIndex& c) const; // returns the index of the children node at position "row" in the children nodes. Used by GUI for display.
|
||||
|
||||
// Sets the subdirectory/subfiles list of entry indx the supplied one, possible adding and removing directories (resp.files). New directories are set empty with
|
||||
// just a name and need to be updated later on. New files are returned in a list so that they can be sent to hash cache.
|
||||
//
|
||||
bool updateSubDirectoryList(const EntryIndex& indx, const std::map<std::string, time_t> &subdirs) ;
|
||||
bool updateSubFilesList(const EntryIndex& indx, const std::map<std::string, FileTS> &subfiles, std::map<std::string, FileTS> &new_files) ;
|
||||
bool removeDirectory(const EntryIndex& indx) ;
|
||||
|
||||
// Updates relevant information for the file at the given index.
|
||||
|
||||
bool updateFile(const EntryIndex& index,const RsFileHash& hash, const std::string& fname, uint64_t size, time_t modf_time) ;
|
||||
bool updateHash(const EntryIndex& index,const RsFileHash& hash);
|
||||
|
||||
bool getDirHashFromIndex(const EntryIndex& index,RsFileHash& hash) const ;
|
||||
bool getIndexFromDirHash(const RsFileHash& hash,EntryIndex& index) const ;
|
||||
// Returns the hash of the directory at the given index and reverse. This hash is set as random the first time it is used (when updating directories). It will be
|
||||
// used by the sync system to designate the directory without referring to index (index could be used to figure out the existance of hidden directories)
|
||||
|
||||
bool getDirHashFromIndex(const EntryIndex& index,RsFileHash& hash) const ; // constant cost
|
||||
bool getIndexFromDirHash(const RsFileHash& hash,EntryIndex& index) const ; // log cost.
|
||||
|
||||
void print();
|
||||
void cleanup();
|
||||
@ -144,9 +157,6 @@ class DirectoryStorage
|
||||
|
||||
private:
|
||||
|
||||
void loadNextTag(const unsigned char *data, uint32_t& offset, uint8_t& entry_tag, uint32_t& entry_size) ;
|
||||
void saveNextTag(unsigned char *data,uint32_t& offset,uint8_t entry_tag,uint32_t entry_size) ;
|
||||
|
||||
// debug
|
||||
void locked_check();
|
||||
|
||||
@ -195,6 +205,11 @@ public:
|
||||
LocalDirectoryStorage(const std::string& fname,const RsPeerId& own_id) : DirectoryStorage(own_id),mFileName(fname) {}
|
||||
virtual ~LocalDirectoryStorage() {}
|
||||
|
||||
/*!
|
||||
* \brief [gs]etSharedDirectoryList
|
||||
* Gets/sets the list of shared directories. Each directory is supplied with a virtual name (the name the friends will see), and sharing flags/groups.
|
||||
* \param lst
|
||||
*/
|
||||
void setSharedDirectoryList(const std::list<SharedDirInfo>& lst) ;
|
||||
void getSharedDirectoryList(std::list<SharedDirInfo>& lst) ;
|
||||
|
||||
|
@ -30,6 +30,11 @@
|
||||
#include "util/rsthreads.h"
|
||||
#include "retroshare/rsfiles.h"
|
||||
|
||||
/*!
|
||||
* \brief The HashStorageClient class
|
||||
* Used by clients of the hash cache for receiving hash results when done. This is asynchrone of course since hashing
|
||||
* might be quite costly.
|
||||
*/
|
||||
class HashStorageClient
|
||||
{
|
||||
public:
|
||||
@ -59,7 +64,7 @@ public:
|
||||
* \param mod_time Actual file modification time
|
||||
* \param known_hash Returned hash for the file.
|
||||
* \param c Hash cache client to which the hash should be sent once calculated
|
||||
* \param client_param Param to be passed to the client callback
|
||||
* \param client_param Param to be passed to the client callback. Useful if the client needs a file ID.
|
||||
*
|
||||
* \return true if the supplied hash info is up to date.
|
||||
*/
|
||||
@ -75,9 +80,9 @@ public:
|
||||
} ;
|
||||
|
||||
// interaction with GUI, called from p3FileLists
|
||||
void setRememberHashFilesDuration(uint32_t days) { mMaxStorageDurationDays = days ; }
|
||||
void setRememberHashFilesDuration(uint32_t days) { mMaxStorageDurationDays = days ; } // duration for which the hash is kept even if the file is not shared anymore
|
||||
uint32_t rememberHashFilesDuration() const { return mMaxStorageDurationDays ; }
|
||||
void clear() { mFiles.clear(); mChanged=true; }
|
||||
void clear() { mFiles.clear(); mChanged=true; } // drop all known hashes. Not something to do, except if you want to rehash the entire database
|
||||
bool empty() const { return mFiles.empty() ; }
|
||||
|
||||
// Functions called by the thread
|
||||
@ -86,8 +91,14 @@ public:
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& o,const HashStorageInfo& info) ;
|
||||
private:
|
||||
/*!
|
||||
* \brief clean
|
||||
* This function is responsible for removing old hashes, etc
|
||||
*/
|
||||
void clean() ;
|
||||
|
||||
// loading/saving the entire hash database to a file
|
||||
|
||||
void locked_save() ;
|
||||
void locked_load() ;
|
||||
|
||||
@ -98,7 +109,7 @@ private:
|
||||
|
||||
uint32_t mMaxStorageDurationDays ; // maximum duration of un-requested cache entries
|
||||
std::map<std::string, HashStorageInfo> mFiles ; // stored as (full_path, hash_info)
|
||||
std::string mFilePath ;
|
||||
std::string mFilePath ; // file where the hash database is stored
|
||||
bool mChanged ;
|
||||
|
||||
struct FileHashJob
|
||||
|
@ -223,8 +223,10 @@ int p3FileDatabase::tick()
|
||||
|
||||
mLastRemoteDirSweepTS = now;
|
||||
|
||||
#warning hack to make loaded directories show up in the GUI, because the GUI isn_t ready at the time they are actually loaded up.
|
||||
RsServer::notify()->notifyListChange(NOTIFY_LIST_DIRLIST_FRIENDS, 0);
|
||||
// This is a hash to make loaded directories show up in the GUI, because the GUI generally isn't ready at the time they are actually loaded up,
|
||||
// so the first notify is ignored, and no other notify will happen next.
|
||||
|
||||
RsServer::notify()->notifyListChange(NOTIFY_LIST_DIRLIST_FRIENDS, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -614,7 +616,9 @@ void p3FileDatabase::requestDirUpdate(void *ref)
|
||||
P3FILELISTS_DEBUG() << "Trying to force sync of entry ndex " << e << " to friend " << mRemoteDirectories[fi-1]->peerId() << std::endl;
|
||||
#endif
|
||||
|
||||
if(generateAndSendSyncRequest(mRemoteDirectories[fi-1],e))
|
||||
RS_STACK_MUTEX(mFLSMtx) ;
|
||||
|
||||
if(locked_generateAndSendSyncRequest(mRemoteDirectories[fi-1],e))
|
||||
{
|
||||
#ifdef DEBUG_P3FILELISTS
|
||||
P3FILELISTS_DEBUG() << " Succeed." << std::endl;
|
||||
@ -638,7 +642,7 @@ bool p3FileDatabase::findChildPointer(void *ref, int row, void *& result, FileSe
|
||||
|
||||
return true ;
|
||||
}
|
||||
else if(row < mRemoteDirectories.size())
|
||||
else if((uint32_t)row < mRemoteDirectories.size())
|
||||
{
|
||||
convertEntryIndexToPointer(mRemoteDirectories[row]->root(),row+1,result);
|
||||
return true;
|
||||
@ -752,7 +756,11 @@ int p3FileDatabase::RequestDirDetails(void *ref, DirDetails& d, FileSearchFlags
|
||||
|
||||
// Case where the index is the top of a single person. Can be us, or a friend.
|
||||
|
||||
bool res = storage->extractData(e,d);
|
||||
if(!storage->extractData(e,d))
|
||||
{
|
||||
P3FILELISTS_ERROR() << "(EE) request on index " << e << ", for directory ID=" << storage->peerId() << " failed. This should not happen." << std::endl;
|
||||
return false ;
|
||||
}
|
||||
|
||||
// update indexes. This is a bit hacky, but does the job. The cast to intptr_t is the proper way to convert
|
||||
// a pointer into an int.
|
||||
@ -824,6 +832,7 @@ void p3FileDatabase::forceDirectoryCheck() // Force re-sweep the di
|
||||
}
|
||||
bool p3FileDatabase::inDirectoryCheck()
|
||||
{
|
||||
RS_STACK_MUTEX(mFLSMtx) ;
|
||||
return mLocalDirWatcher->inDirectoryCheck();
|
||||
}
|
||||
void p3FileDatabase::setWatchEnabled(bool b)
|
||||
@ -975,8 +984,9 @@ bool p3FileDatabase::convertSharedFilePath(const std::string& path,std::string&
|
||||
// - because the hash is performed late, the last modf time upward is updated only when the hash is obtained.
|
||||
//
|
||||
// Remote dirs store
|
||||
// 1 - recursive modif time of the files/dir in the friend's time
|
||||
// 2 - update TS in the local time
|
||||
// 1 - recursive max modif time of the files/dir in the friend's time
|
||||
// 2 - modif time of the files/dir in the friend's time
|
||||
// 3 - update TS in the local time
|
||||
//
|
||||
// The update algorithm is the following:
|
||||
//
|
||||
@ -1275,20 +1285,29 @@ void p3FileDatabase::handleDirSyncResponse(RsFileListsSyncResponseItem *sitem)
|
||||
|
||||
if(item->flags & RsFileListsItem::FLAGS_ENTRY_WAS_REMOVED)
|
||||
{
|
||||
RS_STACK_MUTEX(mFLSMtx) ;
|
||||
#ifdef DEBUG_P3FILELISTS
|
||||
std::cerr << " removing directory with index " << entry_index << " because it does not exist." << std::endl;
|
||||
#endif
|
||||
mRemoteDirectories[fi]->removeDirectory(entry_index);
|
||||
|
||||
mRemoteDirectories[fi]->print();
|
||||
}
|
||||
else if(item->flags & RsFileListsItem::FLAGS_ENTRY_UP_TO_DATE)
|
||||
{
|
||||
RS_STACK_MUTEX(mFLSMtx) ;
|
||||
#ifdef DEBUG_P3FILELISTS
|
||||
std::cerr << " Directory is up to date. Setting local TS." << std::endl;
|
||||
#endif
|
||||
|
||||
mRemoteDirectories[fi]->setDirectoryUpdateTime(entry_index,time(NULL)) ;
|
||||
}
|
||||
else if(item->flags & RsFileListsItem::FLAGS_SYNC_DIR_CONTENT)
|
||||
{
|
||||
#ifdef DEBUG_P3FILELISTS
|
||||
std::cerr << " Item contains directory data. Deserialising/Updating." << std::endl;
|
||||
#endif
|
||||
RS_STACK_MUTEX(mFLSMtx) ;
|
||||
|
||||
if(mRemoteDirectories[fi]->deserialiseUpdateDirEntry(entry_index,item->directory_content_data))
|
||||
RsServer::notify()->notifyListChange(NOTIFY_LIST_DIRLIST_FRIENDS, 0); // notify the GUI if the hierarchy has changed
|
||||
@ -1328,7 +1347,7 @@ void p3FileDatabase::locked_recursSweepRemoteDirectory(RemoteDirectoryStorage *r
|
||||
// compare TS
|
||||
|
||||
if((e == 0 && now > local_update_TS + DELAY_BETWEEN_REMOTE_DIRECTORY_SYNC_REQ) || local_update_TS == 0) // we need to compare local times only. We cannot compare local (now) with remote time.
|
||||
if(generateAndSendSyncRequest(rds,e))
|
||||
if(locked_generateAndSendSyncRequest(rds,e))
|
||||
{
|
||||
#ifdef DEBUG_P3FILELISTS
|
||||
P3FILELISTS_DEBUG() << " Asking for sync of directory " << e << " to peer " << rds->peerId() << " because it's " << (now - local_update_TS) << " secs old since last check." << std::endl;
|
||||
@ -1363,7 +1382,7 @@ p3FileDatabase::DirSyncRequestId p3FileDatabase::makeDirSyncReqId(const RsPeerId
|
||||
return r ^ random_bias;
|
||||
}
|
||||
|
||||
bool p3FileDatabase::generateAndSendSyncRequest(RemoteDirectoryStorage *rds,const DirectoryStorage::EntryIndex& e)
|
||||
bool p3FileDatabase::locked_generateAndSendSyncRequest(RemoteDirectoryStorage *rds,const DirectoryStorage::EntryIndex& e)
|
||||
{
|
||||
RsFileHash entry_hash ;
|
||||
time_t now = time(NULL) ;
|
||||
|
@ -117,7 +117,7 @@ class p3FileDatabase: public p3Service, public p3Config, public ftSearch //, pub
|
||||
int RequestDirDetails(void *, DirDetails&, FileSearchFlags) const ;
|
||||
uint32_t getType(void *) const ;
|
||||
|
||||
// proxy method used by the web UI.
|
||||
// proxy method used by the web UI. Dont't delete!
|
||||
int RequestDirDetails(const RsPeerId& uid, const std::string& path, DirDetails &details)const;
|
||||
|
||||
// set/update shared directories
|
||||
@ -173,7 +173,7 @@ class p3FileDatabase: public p3Service, public p3Config, public ftSearch //, pub
|
||||
* \param e Entry index to update
|
||||
* \return true if the request is correctly sent.
|
||||
*/
|
||||
bool generateAndSendSyncRequest(RemoteDirectoryStorage *rds,const DirectoryStorage::EntryIndex& e);
|
||||
bool locked_generateAndSendSyncRequest(RemoteDirectoryStorage *rds,const DirectoryStorage::EntryIndex& e);
|
||||
|
||||
// File sync request queues. The fast one is used for online browsing when friends are connected.
|
||||
// The slow one is used for background update of file lists.
|
||||
|
@ -522,7 +522,9 @@ QModelIndexList SharedFilesDialog::getSelected()
|
||||
|
||||
void RemoteSharedFilesDialog::expanded(const QModelIndex& indx)
|
||||
{
|
||||
#ifdef DEBUG_SHARED_FILES_DIALOG
|
||||
std::cerr << "Expanding at " << indx.row() << " and " << indx.column() << " ref=" << indx.internalPointer() << ", pointer at 1: " << proxyModel->mapToSource(indx).internalPointer() << std::endl;
|
||||
#endif
|
||||
|
||||
model->updateRef(proxyModel->mapToSource(indx)) ;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user