mirror of
https://github.com/RetroShare/RetroShare.git
synced 2024-10-01 02:35:48 -04:00
fixed a few bugs in internal directory storage structure
This commit is contained in:
parent
2668b6da1c
commit
ca8f95c7f7
@ -101,17 +101,23 @@ class InternalFileHierarchyStorage
|
|||||||
for(uint32_t i=0;i<d.subdirs.size();)
|
for(uint32_t i=0;i<d.subdirs.size();)
|
||||||
if(subdirs.find(static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name) == subdirs.end())
|
if(subdirs.find(static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name) == subdirs.end())
|
||||||
{
|
{
|
||||||
|
std::cerr << "[directory storage] Removing subdirectory " << static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name << " with index " << d.subdirs[i] << std::endl;
|
||||||
|
|
||||||
if( !removeDirectory(d.subdirs[i]))
|
if( !removeDirectory(d.subdirs[i]))
|
||||||
i++ ;
|
i++ ;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
std::cerr << "[directory storage] Keeping existing subdirectory " << static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name << " with index " << d.subdirs[i] << std::endl;
|
||||||
|
|
||||||
should_create.erase(static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name) ;
|
should_create.erase(static_cast<DirEntry*>(mNodes[d.subdirs[i]])->dir_name) ;
|
||||||
++i;
|
++i;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(std::set<std::string>::const_iterator it(should_create.begin());it!=should_create.end();++it)
|
for(std::set<std::string>::const_iterator it(should_create.begin());it!=should_create.end();++it)
|
||||||
{
|
{
|
||||||
|
std::cerr << "[directory storage] adding new subdirectory " << *it << " at index " << mNodes.size() << std::endl;
|
||||||
|
|
||||||
d.subdirs.push_back(mNodes.size()) ;
|
d.subdirs.push_back(mNodes.size()) ;
|
||||||
mNodes.push_back(new DirEntry(*it));
|
mNodes.push_back(new DirEntry(*it));
|
||||||
mNodes.back()->row = mNodes.size()-1;
|
mNodes.back()->row = mNodes.size()-1;
|
||||||
@ -170,9 +176,13 @@ class InternalFileHierarchyStorage
|
|||||||
|
|
||||||
if(it == subfiles.end()) // file does not exist anymore => delete
|
if(it == subfiles.end()) // file does not exist anymore => delete
|
||||||
{
|
{
|
||||||
|
std::cerr << "[directory storage] removing non existing file " << f.file_name << " at index " << d.subfiles[i] << std::endl;
|
||||||
|
|
||||||
|
delete mNodes[d.subfiles[i]] ;
|
||||||
|
mNodes[d.subfiles[i]] = NULL ;
|
||||||
|
|
||||||
d.subfiles[i] = d.subfiles[d.subfiles.size()-1] ;
|
d.subfiles[i] = d.subfiles[d.subfiles.size()-1] ;
|
||||||
d.subfiles.pop_back();
|
d.subfiles.pop_back();
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,6 +200,8 @@ class InternalFileHierarchyStorage
|
|||||||
|
|
||||||
for(std::map<std::string,DirectoryStorage::FileTS>::const_iterator it(new_files.begin());it!=new_files.end();++it)
|
for(std::map<std::string,DirectoryStorage::FileTS>::const_iterator it(new_files.begin());it!=new_files.end();++it)
|
||||||
{
|
{
|
||||||
|
std::cerr << "[directory storage] adding new file " << it->first << " at index " << mNodes.size() << std::endl;
|
||||||
|
|
||||||
d.subfiles.push_back(mNodes.size()) ;
|
d.subfiles.push_back(mNodes.size()) ;
|
||||||
mNodes.push_back(new FileEntry(it->first,it->second.size,it->second.modtime));
|
mNodes.push_back(new FileEntry(it->first,it->second.size,it->second.modtime));
|
||||||
mNodes.back()->row = mNodes.size()-1;
|
mNodes.back()->row = mNodes.size()-1;
|
||||||
@ -200,7 +212,12 @@ class InternalFileHierarchyStorage
|
|||||||
bool updateHash(const DirectoryStorage::EntryIndex& file_index,const RsFileHash& hash)
|
bool updateHash(const DirectoryStorage::EntryIndex& file_index,const RsFileHash& hash)
|
||||||
{
|
{
|
||||||
if(!checkIndex(file_index,FileStorageNode::TYPE_FILE))
|
if(!checkIndex(file_index,FileStorageNode::TYPE_FILE))
|
||||||
|
{
|
||||||
|
std::cerr << "[directory storage] (EE) cannot update file at index " << file_index << ". Not a valid index, or not a file." << std::endl;
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cerr << "[directory storage] updating hash at index " << file_index << ", hash=" << hash << std::endl;
|
||||||
|
|
||||||
static_cast<FileEntry*>(mNodes[file_index])->file_hash = hash ;
|
static_cast<FileEntry*>(mNodes[file_index])->file_hash = hash ;
|
||||||
return true;
|
return true;
|
||||||
@ -208,10 +225,15 @@ class InternalFileHierarchyStorage
|
|||||||
bool updateFile(const DirectoryStorage::EntryIndex& file_index,const RsFileHash& hash, const std::string& fname,uint64_t size, const time_t modf_time)
|
bool updateFile(const DirectoryStorage::EntryIndex& file_index,const RsFileHash& hash, const std::string& fname,uint64_t size, const time_t modf_time)
|
||||||
{
|
{
|
||||||
if(!checkIndex(file_index,FileStorageNode::TYPE_FILE))
|
if(!checkIndex(file_index,FileStorageNode::TYPE_FILE))
|
||||||
|
{
|
||||||
|
std::cerr << "[directory storage] (EE) cannot update file at index " << file_index << ". Not a valid index, or not a file." << std::endl;
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
FileEntry& fe(*static_cast<FileEntry*>(mNodes[file_index])) ;
|
FileEntry& fe(*static_cast<FileEntry*>(mNodes[file_index])) ;
|
||||||
|
|
||||||
|
std::cerr << "[directory storage] updating file entry at index " << file_index << ", name=" << fe.file_name << " size=" << fe.file_size << ", hash=" << fe.file_hash << std::endl;
|
||||||
|
|
||||||
fe.file_hash = hash;
|
fe.file_hash = hash;
|
||||||
fe.file_size = size;
|
fe.file_size = size;
|
||||||
fe.file_modtime = modf_time;
|
fe.file_modtime = modf_time;
|
||||||
@ -321,6 +343,12 @@ private:
|
|||||||
void recursPrint(int depth,DirectoryStorage::EntryIndex node) const
|
void recursPrint(int depth,DirectoryStorage::EntryIndex node) const
|
||||||
{
|
{
|
||||||
std::string indent(2*depth,' ');
|
std::string indent(2*depth,' ');
|
||||||
|
|
||||||
|
if(mNodes[node] == NULL)
|
||||||
|
{
|
||||||
|
std::cerr << "EMPTY NODE !!" << std::endl;
|
||||||
|
return ;
|
||||||
|
}
|
||||||
DirEntry& d(*static_cast<DirEntry*>(mNodes[node]));
|
DirEntry& d(*static_cast<DirEntry*>(mNodes[node]));
|
||||||
|
|
||||||
std::cerr << indent << "dir:" << d.dir_name << std::endl;
|
std::cerr << indent << "dir:" << d.dir_name << std::endl;
|
||||||
|
@ -83,12 +83,6 @@ void LocalDirectoryUpdater::recursUpdateSharedDir(const std::string& cumulated_p
|
|||||||
|
|
||||||
librs::util::FolderIterator dirIt(cumulated_path);
|
librs::util::FolderIterator dirIt(cumulated_path);
|
||||||
|
|
||||||
if(!dirIt.isValid())
|
|
||||||
{
|
|
||||||
mSharedDirectories->removeDirectory(indx) ; // this is a complex operation since it needs to *update* it so that it is kept consistent.
|
|
||||||
return ;
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect subdirs and subfiles
|
// collect subdirs and subfiles
|
||||||
|
|
||||||
std::map<std::string,DirectoryStorage::FileTS> subfiles ;
|
std::map<std::string,DirectoryStorage::FileTS> subfiles ;
|
||||||
|
@ -12,7 +12,9 @@ static const uint32_t MAX_INACTIVITY_SLEEP_TIME = 2*1000*1000;
|
|||||||
HashStorage::HashStorage(const std::string& save_file_name)
|
HashStorage::HashStorage(const std::string& save_file_name)
|
||||||
: mFilePath(save_file_name), mHashMtx("Hash Storage mutex")
|
: mFilePath(save_file_name), mHashMtx("Hash Storage mutex")
|
||||||
{
|
{
|
||||||
|
mInactivitySleepTime = DEFAULT_INACTIVITY_SLEEP_TIME;
|
||||||
mRunning = false ;
|
mRunning = false ;
|
||||||
|
|
||||||
load() ;
|
load() ;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,7 +65,11 @@ void HashStorage::data_tick()
|
|||||||
}
|
}
|
||||||
mInactivitySleepTime = DEFAULT_INACTIVITY_SLEEP_TIME;
|
mInactivitySleepTime = DEFAULT_INACTIVITY_SLEEP_TIME;
|
||||||
|
|
||||||
|
{
|
||||||
|
RS_STACK_MUTEX(mHashMtx) ;
|
||||||
job = mFilesToHash.begin()->second ;
|
job = mFilesToHash.begin()->second ;
|
||||||
|
mFilesToHash.erase(mFilesToHash.begin()) ;
|
||||||
|
}
|
||||||
|
|
||||||
std::cerr << "Hashing file " << job.full_path << "..." ; std::cerr.flush();
|
std::cerr << "Hashing file " << job.full_path << "..." ; std::cerr.flush();
|
||||||
|
|
||||||
@ -77,8 +83,6 @@ void HashStorage::data_tick()
|
|||||||
else
|
else
|
||||||
std::cerr << "done."<< std::endl;
|
std::cerr << "done."<< std::endl;
|
||||||
|
|
||||||
mFilesToHash.erase(mFilesToHash.begin()) ;
|
|
||||||
|
|
||||||
// store the result
|
// store the result
|
||||||
|
|
||||||
HashStorageInfo& info(mFiles[job.full_path]);
|
HashStorageInfo& info(mFiles[job.full_path]);
|
||||||
|
@ -84,6 +84,6 @@ private:
|
|||||||
|
|
||||||
RsMutex mHashMtx ;
|
RsMutex mHashMtx ;
|
||||||
bool mRunning;
|
bool mRunning;
|
||||||
uint32_t mInactivitySleepTime = 50*1000 ;
|
uint32_t mInactivitySleepTime ;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -203,21 +203,16 @@ void p3FileDatabase::cleanup()
|
|||||||
//
|
//
|
||||||
for(std::set<RsPeerId>::const_iterator it(friend_set.begin());it!=friend_set.end();++it)
|
for(std::set<RsPeerId>::const_iterator it(friend_set.begin());it!=friend_set.end();++it)
|
||||||
{
|
{
|
||||||
P3FILELISTS_DEBUG() << " adding missing remote dir entry for friend " << *it << std::endl;
|
// Check if a remote directory exists for that friend, possibly creating the index.
|
||||||
|
|
||||||
uint32_t i;
|
uint32_t friend_index = getFriendIndex(*it) ;
|
||||||
for(i=0;i<mDirectories.size() && mDirectories[i] != NULL;++i);
|
|
||||||
|
|
||||||
if(i == mDirectories.size())
|
if(mDirectories.size() > friend_index && mDirectories[friend_index] != NULL)
|
||||||
mDirectories.push_back(NULL) ;
|
continue ;
|
||||||
|
|
||||||
mDirectories[i] = new RemoteDirectoryStorage(*it,makeRemoteFileName(*it));
|
P3FILELISTS_DEBUG() << " adding missing remote dir entry for friend " << *it << ", with index " << friend_index << std::endl;
|
||||||
|
|
||||||
if(mFriendIndexTab.size() <= i)
|
mDirectories[friend_index] = new RemoteDirectoryStorage(*it,makeRemoteFileName(*it));
|
||||||
mFriendIndexTab.resize(i+1) ;
|
|
||||||
|
|
||||||
mFriendIndexTab[i] = *it ;
|
|
||||||
mFriendIndexMap[*it] = i;
|
|
||||||
|
|
||||||
mUpdateFlags |= P3FILELISTS_UPDATE_FLAG_REMOTE_MAP_CHANGED ;
|
mUpdateFlags |= P3FILELISTS_UPDATE_FLAG_REMOTE_MAP_CHANGED ;
|
||||||
}
|
}
|
||||||
@ -249,19 +244,33 @@ uint32_t p3FileDatabase::getFriendIndex(const RsPeerId& pid)
|
|||||||
|
|
||||||
if(!found)
|
if(!found)
|
||||||
{
|
{
|
||||||
std::cerr << "(EE) FriendIndexTab is full. This is weird. Do you really have more than 1024 friends??" << std::endl;
|
found = mFriendIndexTab.size();
|
||||||
return 1024 ;
|
mFriendIndexTab.push_back(pid);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(mFriendIndexTab.size() >= (1 << NB_FRIEND_INDEX_BITS) )
|
||||||
|
{
|
||||||
|
std::cerr << "(EE) FriendIndexTab is full. This is weird. Do you really have more than " << (1<<NB_FRIEND_INDEX_BITS) << " friends??" << std::endl;
|
||||||
|
return 1 << NB_FRIEND_INDEX_BITS ;
|
||||||
}
|
}
|
||||||
|
|
||||||
mFriendIndexTab[found] = pid ;
|
mFriendIndexTab[found] = pid ;
|
||||||
mFriendIndexMap[pid] = found;
|
mFriendIndexMap[pid] = found;
|
||||||
|
|
||||||
|
if(mDirectories.size() <= found)
|
||||||
|
mDirectories.resize(found+1,NULL) ;
|
||||||
|
|
||||||
IndicateConfigChanged();
|
IndicateConfigChanged();
|
||||||
|
|
||||||
return found ;
|
return found ;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
{
|
||||||
|
if(mDirectories.size() <= it->second)
|
||||||
|
mDirectories.resize(it->second+1,NULL) ;
|
||||||
|
|
||||||
return it->second;
|
return it->second;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const RsPeerId& p3FileDatabase::getFriendFromIndex(uint32_t indx) const
|
const RsPeerId& p3FileDatabase::getFriendFromIndex(uint32_t indx) const
|
||||||
|
Loading…
Reference in New Issue
Block a user