mirror of
https://github.com/RetroShare/RetroShare.git
synced 2024-12-27 08:29:26 -05:00
added count of total number of files and total file isze on top of shared directories
This commit is contained in:
parent
61131e4bf5
commit
5ef4e752d9
@ -62,6 +62,9 @@ InternalFileHierarchyStorage::InternalFileHierarchyStorage() : mRoot(0)
|
||||
|
||||
mNodes.push_back(de) ;
|
||||
mDirHashes[de->dir_hash] = 0 ;
|
||||
|
||||
mTotalSize = 0 ;
|
||||
mTotalFiles = 0 ;
|
||||
}
|
||||
|
||||
bool InternalFileHierarchyStorage::getDirHashFromIndex(const DirectoryStorage::EntryIndex& index,RsFileHash& hash) const
|
||||
@ -299,7 +302,7 @@ bool InternalFileHierarchyStorage::updateSubFilesList(const DirectoryStorage::En
|
||||
std::cerr << "[directory storage] removing non existing file " << f.file_name << " at index " << d.subfiles[i] << std::endl;
|
||||
#endif
|
||||
|
||||
deleteNode(d.subfiles[i]) ;
|
||||
deleteFileNode(d.subfiles[i]) ;
|
||||
|
||||
d.subfiles[i] = d.subfiles[d.subfiles.size()-1] ;
|
||||
d.subfiles.pop_back();
|
||||
@ -311,6 +314,9 @@ bool InternalFileHierarchyStorage::updateSubFilesList(const DirectoryStorage::En
|
||||
f.file_hash.clear(); // hash needs recomputing
|
||||
f.file_modtime = it->second.modtime;
|
||||
f.file_size = it->second.size;
|
||||
|
||||
mTotalSize -= f.file_size ;
|
||||
mTotalSize += it->second.size ;
|
||||
}
|
||||
new_files.erase(f.file_name) ;
|
||||
|
||||
@ -327,6 +333,9 @@ bool InternalFileHierarchyStorage::updateSubFilesList(const DirectoryStorage::En
|
||||
mNodes.push_back(new FileEntry(it->first,it->second.size,it->second.modtime));
|
||||
mNodes.back()->row = mNodes.size()-1;
|
||||
mNodes.back()->parent_index = indx;
|
||||
|
||||
mTotalSize += it->second.size;
|
||||
mTotalFiles += 1;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -361,6 +370,10 @@ bool InternalFileHierarchyStorage::updateFile(const DirectoryStorage::EntryIndex
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << "[directory storage] updating file entry at index " << file_index << ", name=" << fe.file_name << " size=" << fe.file_size << ", hash=" << fe.file_hash << std::endl;
|
||||
#endif
|
||||
if(mTotalSize >= fe.file_size)
|
||||
mTotalSize -= fe.file_size;
|
||||
|
||||
mTotalSize += size ;
|
||||
|
||||
fe.file_hash = hash;
|
||||
fe.file_size = size;
|
||||
@ -373,6 +386,23 @@ bool InternalFileHierarchyStorage::updateFile(const DirectoryStorage::EntryIndex
|
||||
return true;
|
||||
}
|
||||
|
||||
void InternalFileHierarchyStorage::deleteFileNode(uint32_t index)
|
||||
{
|
||||
if(mNodes[index] != NULL)
|
||||
{
|
||||
FileEntry& fe(*static_cast<FileEntry*>(mNodes[index])) ;
|
||||
|
||||
if(mTotalSize >= fe.file_size)
|
||||
mTotalSize -= fe.file_size ;
|
||||
|
||||
if(mTotalFiles > 0)
|
||||
mTotalFiles -= 1;
|
||||
|
||||
delete mNodes[index] ;
|
||||
mFreeNodes.push_back(index) ;
|
||||
mNodes[index] = NULL ;
|
||||
}
|
||||
}
|
||||
void InternalFileHierarchyStorage::deleteNode(uint32_t index)
|
||||
{
|
||||
if(mNodes[index] != NULL)
|
||||
@ -518,6 +548,8 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
|
||||
mNodes[file_index] = new FileEntry(f.file_name,f.file_size,f.file_modtime,f.file_hash) ;
|
||||
mFileHashes[f.file_hash] = file_index ;
|
||||
mTotalSize += f.file_size ;
|
||||
mTotalFiles++;
|
||||
|
||||
#ifdef DEBUG_DIRECTORY_STORAGE
|
||||
std::cerr << " created, at new index " << file_index << std::endl;
|
||||
@ -539,7 +571,7 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
std::cerr << "(EE) Cannot delete node of index " << it->second << " because it is not a file. Inconsistency error!" << std::endl;
|
||||
continue ;
|
||||
}
|
||||
deleteNode(it->second) ;
|
||||
deleteFileNode(it->second) ;
|
||||
}
|
||||
|
||||
// now update row and parent index for all subnodes
|
||||
@ -562,6 +594,12 @@ bool InternalFileHierarchyStorage::updateDirEntry(const DirectoryStorage::EntryI
|
||||
return true;
|
||||
}
|
||||
|
||||
void InternalFileHierarchyStorage::getStatistics(SharedDirStats& stats) const
|
||||
{
|
||||
stats.total_number_of_files = mTotalFiles ;
|
||||
stats.total_shared_size = mTotalSize ;
|
||||
}
|
||||
|
||||
bool InternalFileHierarchyStorage::getTS(const DirectoryStorage::EntryIndex& index,time_t& TS,time_t DirEntry::* m) const
|
||||
{
|
||||
if(!checkIndex(index,FileStorageNode::TYPE_DIR))
|
||||
@ -806,7 +844,7 @@ bool InternalFileHierarchyStorage::check(std::string& error_string) // checks co
|
||||
{
|
||||
error_string += " - Orphean node!" ;
|
||||
|
||||
deleteNode(i) ;
|
||||
deleteNode(i) ; // we don't care if it's a file or a dir.
|
||||
}
|
||||
|
||||
return error_string.empty();;
|
||||
@ -887,8 +925,7 @@ bool InternalFileHierarchyStorage::nodeAccessError(const std::string& s)
|
||||
return false ;
|
||||
}
|
||||
|
||||
// Removes the given subdirectory from the parent node and all its pendign subdirs. Files are kept, and will go during the cleaning
|
||||
// phase. That allows to keep file information when moving them around.
|
||||
// Removes the given subdirectory from the parent node and all its pendign subdirs and files.
|
||||
|
||||
bool InternalFileHierarchyStorage::recursRemoveDirectory(DirectoryStorage::EntryIndex dir)
|
||||
{
|
||||
@ -900,7 +937,7 @@ bool InternalFileHierarchyStorage::recursRemoveDirectory(DirectoryStorage::Entry
|
||||
recursRemoveDirectory(d.subdirs[i]);
|
||||
|
||||
for(uint32_t i=0;i<d.subfiles.size();++i)
|
||||
deleteNode(d.subfiles[i]);
|
||||
deleteFileNode(d.subfiles[i]);
|
||||
|
||||
deleteNode(dir) ;
|
||||
|
||||
@ -1016,6 +1053,8 @@ bool InternalFileHierarchyStorage::load(const std::string& fname)
|
||||
uint32_t buffer_offset = 0 ;
|
||||
|
||||
mFreeNodes.clear();
|
||||
mTotalFiles = 0;
|
||||
mTotalSize = 0;
|
||||
|
||||
try
|
||||
{
|
||||
@ -1077,6 +1116,9 @@ bool InternalFileHierarchyStorage::load(const std::string& fname)
|
||||
|
||||
mNodes[node_index] = fe ;
|
||||
mFileHashes[fe->file_hash] = node_index ;
|
||||
|
||||
mTotalFiles++ ;
|
||||
mTotalSize += file_size ;
|
||||
}
|
||||
else if(FileListIO::readField(buffer,buffer_size,buffer_offset,FILE_LIST_IO_TAG_LOCAL_DIR_ENTRY,node_section_data,node_section_size))
|
||||
{
|
||||
@ -1143,6 +1185,7 @@ bool InternalFileHierarchyStorage::load(const std::string& fname)
|
||||
free(node_section_data) ;
|
||||
}
|
||||
free(buffer) ;
|
||||
|
||||
return true ;
|
||||
}
|
||||
catch(read_error& e)
|
||||
|
@ -82,7 +82,7 @@ public:
|
||||
std::vector<DirectoryStorage::EntryIndex> subfiles ;
|
||||
|
||||
time_t dir_modtime;
|
||||
time_t dir_most_recent_time; // recursive most recent modification time, including files and subdirs in the entire hierarchy below.
|
||||
time_t dir_most_recent_time;// recursive most recent modification time, including files and subdirs in the entire hierarchy below.
|
||||
time_t dir_update_time; // last time the information was updated for that directory. Includes subdirs indexes and subfile info.
|
||||
};
|
||||
|
||||
@ -109,6 +109,7 @@ public:
|
||||
bool setTS(const DirectoryStorage::EntryIndex& index,time_t& TS,time_t DirEntry::* ) ;
|
||||
|
||||
// Do a complete recursive sweep over sub-directories and files, and update the lst modf TS. This could be also performed by a cleanup method.
|
||||
// Also keeps the high level statistics up to date.
|
||||
|
||||
time_t recursUpdateLastModfTime(const DirectoryStorage::EntryIndex& dir_index, bool &unfinished_files_present);
|
||||
|
||||
@ -151,6 +152,10 @@ public:
|
||||
|
||||
void print() const;
|
||||
|
||||
// gets statistics about share files
|
||||
|
||||
void getStatistics(SharedDirStats& stats) const ;
|
||||
|
||||
private:
|
||||
void recursPrint(int depth,DirectoryStorage::EntryIndex node) const;
|
||||
static bool nodeAccessError(const std::string& s);
|
||||
@ -163,6 +168,7 @@ private:
|
||||
// Deletes an existing entry in mNodes, and keeps record of the indices that get freed.
|
||||
|
||||
void deleteNode(DirectoryStorage::EntryIndex);
|
||||
void deleteFileNode(DirectoryStorage::EntryIndex);
|
||||
|
||||
// Removes the given subdirectory from the parent node and all its pendign subdirs. Files are kept, and will go during the cleaning
|
||||
// phase. That allows to keep file information when moving them around.
|
||||
@ -184,5 +190,10 @@ private:
|
||||
// in very different ways.
|
||||
//
|
||||
std::map<RsFileHash,DirectoryStorage::EntryIndex> mDirHashes ;
|
||||
|
||||
// high level statistics on the full hierarchy. Should be kept up to date.
|
||||
|
||||
uint32_t mTotalFiles ;
|
||||
uint64_t mTotalSize ;
|
||||
};
|
||||
|
||||
|
@ -168,6 +168,12 @@ bool DirectoryStorage::updateHash(const EntryIndex& index,const RsFileHash& hash
|
||||
return mFileHierarchy->updateHash(index,hash);
|
||||
}
|
||||
|
||||
void DirectoryStorage::getStatistics(SharedDirStats& stats)
|
||||
{
|
||||
RS_STACK_MUTEX(mDirStorageMtx) ;
|
||||
mFileHierarchy->getStatistics(stats);
|
||||
}
|
||||
|
||||
bool DirectoryStorage::load(const std::string& local_file_name)
|
||||
{
|
||||
RS_STACK_MUTEX(mDirStorageMtx) ;
|
||||
|
@ -149,6 +149,10 @@ class DirectoryStorage
|
||||
bool getDirHashFromIndex(const EntryIndex& index,RsFileHash& hash) const ; // constant cost
|
||||
bool getIndexFromDirHash(const RsFileHash& hash,EntryIndex& index) const ; // log cost.
|
||||
|
||||
// gathers statistics from the internal directory structure
|
||||
|
||||
void getStatistics(SharedDirStats& stats) ;
|
||||
|
||||
void print();
|
||||
void cleanup();
|
||||
|
||||
|
@ -681,6 +681,26 @@ bool p3FileDatabase::findChildPointer( void *ref, int row, void *& result,
|
||||
return res;
|
||||
}
|
||||
|
||||
// This function returns statistics about the entire directory
|
||||
|
||||
int p3FileDatabase::getSharedDirStatistics(const RsPeerId& pid,SharedDirStats& stats)
|
||||
{
|
||||
RS_STACK_MUTEX(mFLSMtx) ;
|
||||
|
||||
if(pid == mOwnId)
|
||||
{
|
||||
mLocalSharedDirs->getStatistics(stats) ;
|
||||
return true ;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint32_t fi = locked_getFriendIndex(pid);
|
||||
mRemoteDirectories[fi]->getStatistics(stats) ;
|
||||
|
||||
return true ;
|
||||
}
|
||||
}
|
||||
|
||||
// This function converts a pointer into directory details, to be used by the AbstractItemModel for browsing the files.
|
||||
int p3FileDatabase::RequestDirDetails(void *ref, DirDetails& d, FileSearchFlags flags) const
|
||||
{
|
||||
|
@ -127,6 +127,10 @@ class p3FileDatabase: public p3Service, public p3Config, public ftSearch //, pub
|
||||
void updateShareFlags(const SharedDirInfo& info) ;
|
||||
bool convertSharedFilePath(const std::string& path,std::string& fullpath);
|
||||
|
||||
// computes/gathers statistics about shared directories
|
||||
|
||||
int getSharedDirStatistics(const RsPeerId& pid,SharedDirStats& stats);
|
||||
|
||||
// interface for hash caching
|
||||
|
||||
void setWatchPeriod(uint32_t seconds);
|
||||
|
@ -700,6 +700,10 @@ int ftServer::SearchBoolExp(RsRegularExpression::Expression * exp, std::list<Dir
|
||||
{
|
||||
return mFileDatabase->SearchBoolExp(exp,results,flags,peer_id) ;
|
||||
}
|
||||
int ftServer::getSharedDirStatistics(const RsPeerId& pid, SharedDirStats& stats)
|
||||
{
|
||||
return mFileDatabase->getSharedDirStatistics(pid,stats) ;
|
||||
}
|
||||
|
||||
/***************************************************************/
|
||||
/*************** Local Shared Dir Interface ********************/
|
||||
|
@ -182,6 +182,7 @@ public:
|
||||
virtual int SearchKeywords(std::list<std::string> keywords, std::list<DirDetails> &results,FileSearchFlags flags,const RsPeerId& peer_id);
|
||||
virtual int SearchBoolExp(RsRegularExpression::Expression * exp, std::list<DirDetails> &results,FileSearchFlags flags);
|
||||
virtual int SearchBoolExp(RsRegularExpression::Expression * exp, std::list<DirDetails> &results,FileSearchFlags flags,const RsPeerId& peer_id);
|
||||
virtual int getSharedDirStatistics(const RsPeerId& pid, SharedDirStats& stats) ;
|
||||
|
||||
/***
|
||||
* Utility Functions
|
||||
|
@ -106,6 +106,12 @@ struct SharedDirInfo
|
||||
std::list<RsNodeGroupId> parent_groups ;
|
||||
};
|
||||
|
||||
struct SharedDirStats
|
||||
{
|
||||
uint32_t total_number_of_files ;
|
||||
uint64_t total_shared_size ;
|
||||
};
|
||||
|
||||
class RsFiles
|
||||
{
|
||||
public:
|
||||
@ -198,6 +204,7 @@ class RsFiles
|
||||
virtual int SearchKeywords(std::list<std::string> keywords, std::list<DirDetails> &results,FileSearchFlags flags,const RsPeerId& peer_id) = 0;
|
||||
virtual int SearchBoolExp(RsRegularExpression::Expression * exp, std::list<DirDetails> &results,FileSearchFlags flags) = 0;
|
||||
virtual int SearchBoolExp(RsRegularExpression::Expression * exp, std::list<DirDetails> &results,FileSearchFlags flags,const RsPeerId& peer_id) = 0;
|
||||
virtual int getSharedDirStatistics(const RsPeerId& pid, SharedDirStats& stats) =0;
|
||||
|
||||
/***
|
||||
* Utility Functions.
|
||||
|
@ -385,8 +385,27 @@ QVariant TreeStyle_RDM::displayRole(const DirDetails& details,int coln) const
|
||||
{
|
||||
switch(coln)
|
||||
{
|
||||
case 0:
|
||||
return (RemoteMode)?(QString::fromUtf8(rsPeers->getPeerName(details.id).c_str())):tr("My files");
|
||||
case 0: {
|
||||
SharedDirStats stats ;
|
||||
QString res ;
|
||||
|
||||
if(RemoteMode)
|
||||
{
|
||||
res = QString::fromUtf8(rsPeers->getPeerName(details.id).c_str());
|
||||
rsFiles->getSharedDirStatistics(details.id,stats) ;
|
||||
}
|
||||
else
|
||||
{
|
||||
res = tr("My files");
|
||||
rsFiles->getSharedDirStatistics(rsPeers->getOwnId(),stats) ;
|
||||
}
|
||||
|
||||
if(stats.total_number_of_files > 0)
|
||||
res += " - " + QString::number(stats.total_number_of_files) + " files, " + misc::friendlyUnit(stats.total_shared_size) ;
|
||||
|
||||
return res ;
|
||||
}
|
||||
|
||||
case 1:
|
||||
return QString() ;
|
||||
case 2: if(!isNewerThanEpoque(details.min_age))
|
||||
|
Loading…
Reference in New Issue
Block a user