mirror of
https://github.com/RetroShare/RetroShare.git
synced 2024-12-28 00:49:28 -05:00
fixed updated ts in hash cache files
This commit is contained in:
parent
2aac88464d
commit
aeb0595301
@ -155,7 +155,11 @@ class InternalFileHierarchyStorage
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(it->second.modtime != f.file_modtime || it->second.size != f.file_size) // file is newer and/or has different size
|
if(it->second.modtime != f.file_modtime || it->second.size != f.file_size) // file is newer and/or has different size
|
||||||
|
{
|
||||||
f.file_hash.clear(); // hash needs recomputing
|
f.file_hash.clear(); // hash needs recomputing
|
||||||
|
f.file_modtime = it->second.modtime;
|
||||||
|
f.file_size = it->second.size;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
new_files.erase(f.file_name) ;
|
new_files.erase(f.file_name) ;
|
||||||
|
|
||||||
@ -292,7 +296,7 @@ private:
|
|||||||
for(int i=0;i<d.subfiles.size();++i)
|
for(int i=0;i<d.subfiles.size();++i)
|
||||||
{
|
{
|
||||||
FileEntry& f(*static_cast<FileEntry*>(mNodes[d.subfiles[i]]));
|
FileEntry& f(*static_cast<FileEntry*>(mNodes[d.subfiles[i]]));
|
||||||
std::cerr << indent << " hash:" << f.file_hash << " ts:" << std::fill(8) << (uint64_t)f.file_modtime << " " << f.file_size << " " << f.file_name << std::endl;
|
std::cerr << indent << " hash:" << f.file_hash << " ts:" << (uint64_t)f.file_modtime << " " << f.file_size << " " << f.file_name << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
|
|
||||||
#define DEBUG_LOCAL_DIR_UPDATER 1
|
#define DEBUG_LOCAL_DIR_UPDATER 1
|
||||||
|
|
||||||
|
static const uint32_t DELAY_BETWEEN_DIRECTORY_UPDATES = 10 ; // 10 seconds for testing. Should be much more!!
|
||||||
|
|
||||||
void RemoteDirectoryUpdater::tick()
|
void RemoteDirectoryUpdater::tick()
|
||||||
{
|
{
|
||||||
// use the stored iterator
|
// use the stored iterator
|
||||||
@ -13,11 +15,23 @@ void RemoteDirectoryUpdater::tick()
|
|||||||
LocalDirectoryUpdater::LocalDirectoryUpdater(HashStorage *hc,LocalDirectoryStorage *lds)
|
LocalDirectoryUpdater::LocalDirectoryUpdater(HashStorage *hc,LocalDirectoryStorage *lds)
|
||||||
: mHashCache(hc),mSharedDirectories(lds)
|
: mHashCache(hc),mSharedDirectories(lds)
|
||||||
{
|
{
|
||||||
|
mLastSweepTime = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void LocalDirectoryUpdater::tick()
|
void LocalDirectoryUpdater::data_tick()
|
||||||
{
|
{
|
||||||
std::cerr << "LocalDirectoryUpdater::tick()" << std::endl;
|
time_t now = time(NULL) ;
|
||||||
|
|
||||||
|
if(now > DELAY_BETWEEN_DIRECTORY_UPDATES + mLastSweepTime)
|
||||||
|
{
|
||||||
|
sweepSharedDirectories() ;
|
||||||
|
mLastSweepTime = now;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void LocalDirectoryUpdater::sweepSharedDirectories()
|
||||||
|
{
|
||||||
|
std::cerr << "LocalDirectoryUpdater::sweep()" << std::endl;
|
||||||
|
|
||||||
// recursive update algorithm works that way:
|
// recursive update algorithm works that way:
|
||||||
// - the external loop starts on the shared directory list and goes through sub-directories
|
// - the external loop starts on the shared directory list and goes through sub-directories
|
||||||
|
@ -11,30 +11,26 @@ class DirectoryUpdater
|
|||||||
public:
|
public:
|
||||||
DirectoryUpdater() {}
|
DirectoryUpdater() {}
|
||||||
virtual ~DirectoryUpdater(){}
|
virtual ~DirectoryUpdater(){}
|
||||||
|
|
||||||
// Does some updating job. Crawls the existing directories and checks wether it has been updated
|
|
||||||
// recently enough. If not, calls the directry source.
|
|
||||||
//
|
|
||||||
virtual void tick() =0;
|
|
||||||
|
|
||||||
//
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class LocalDirectoryUpdater: public DirectoryUpdater, public HashStorageClient
|
class LocalDirectoryUpdater: public DirectoryUpdater, public HashStorageClient, public RsTickingThread
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
LocalDirectoryUpdater(HashStorage *hash_cache,LocalDirectoryStorage *lds) ;
|
LocalDirectoryUpdater(HashStorage *hash_cache,LocalDirectoryStorage *lds) ;
|
||||||
virtual ~LocalDirectoryUpdater() {}
|
virtual ~LocalDirectoryUpdater() {}
|
||||||
|
|
||||||
virtual void tick() ;
|
virtual void data_tick() ;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void hash_callback(uint32_t client_param, const std::string& name, const RsFileHash& hash, uint64_t size);
|
virtual void hash_callback(uint32_t client_param, const std::string& name, const RsFileHash& hash, uint64_t size);
|
||||||
void recursUpdateSharedDir(const std::string& cumulated_path,DirectoryStorage::EntryIndex indx);
|
void recursUpdateSharedDir(const std::string& cumulated_path,DirectoryStorage::EntryIndex indx);
|
||||||
|
void sweepSharedDirectories();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
HashStorage *mHashCache ;
|
HashStorage *mHashCache ;
|
||||||
LocalDirectoryStorage *mSharedDirectories ;
|
LocalDirectoryStorage *mSharedDirectories ;
|
||||||
|
|
||||||
|
time_t mLastSweepTime;
|
||||||
};
|
};
|
||||||
|
|
||||||
class RemoteDirectoryUpdater: public DirectoryUpdater
|
class RemoteDirectoryUpdater: public DirectoryUpdater
|
||||||
|
@ -40,6 +40,16 @@ void HashStorage::data_tick()
|
|||||||
mRunning = false ;
|
mRunning = false ;
|
||||||
std::cerr << "done." << std::endl;
|
std::cerr << "done." << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// store the result
|
||||||
|
|
||||||
|
HashStorageInfo& info(mFiles[job.full_path]);
|
||||||
|
|
||||||
|
info.filename = job.full_path ;
|
||||||
|
info.size = size ;
|
||||||
|
info.modf_stamp = job.ts ;
|
||||||
|
info.time_stamp = time(NULL);
|
||||||
|
info.hash = hash;
|
||||||
}
|
}
|
||||||
// call the client
|
// call the client
|
||||||
|
|
||||||
@ -51,6 +61,9 @@ bool HashStorage::requestHash(const std::string& full_path,uint64_t size,time_t
|
|||||||
{
|
{
|
||||||
// check if the hash is up to date w.r.t. cache.
|
// check if the hash is up to date w.r.t. cache.
|
||||||
|
|
||||||
|
#ifdef HASHSTORAGE_DEBUG
|
||||||
|
std::cerr << "HASH Requested for file " << full_path << ": ";
|
||||||
|
#endif
|
||||||
RS_STACK_MUTEX(mHashMtx) ;
|
RS_STACK_MUTEX(mHashMtx) ;
|
||||||
|
|
||||||
time_t now = time(NULL) ;
|
time_t now = time(NULL) ;
|
||||||
@ -59,11 +72,15 @@ bool HashStorage::requestHash(const std::string& full_path,uint64_t size,time_t
|
|||||||
if(it != mFiles.end() && (uint64_t)mod_time == it->second.modf_stamp && size == it->second.size)
|
if(it != mFiles.end() && (uint64_t)mod_time == it->second.modf_stamp && size == it->second.size)
|
||||||
{
|
{
|
||||||
it->second.time_stamp = now ;
|
it->second.time_stamp = now ;
|
||||||
#ifdef HASHCACHE_DEBUG
|
known_hash = it->second.hash;
|
||||||
|
#ifdef HASHSTORAGE_DEBUG
|
||||||
std::cerr << "Found in cache." << std::endl ;
|
std::cerr << "Found in cache." << std::endl ;
|
||||||
#endif
|
#endif
|
||||||
return true ;
|
return true ;
|
||||||
}
|
}
|
||||||
|
#ifdef HASHSTORAGE_DEBUG
|
||||||
|
std::cerr << "Not in cache. Sceduling for re-hash." << std::endl ;
|
||||||
|
#endif
|
||||||
|
|
||||||
// we need to schedule a re-hashing
|
// we need to schedule a re-hashing
|
||||||
|
|
||||||
@ -75,6 +92,7 @@ bool HashStorage::requestHash(const std::string& full_path,uint64_t size,time_t
|
|||||||
job.client = c ;
|
job.client = c ;
|
||||||
job.client_param = client_param ;
|
job.client_param = client_param ;
|
||||||
job.full_path = full_path ;
|
job.full_path = full_path ;
|
||||||
|
job.ts = mod_time ;
|
||||||
|
|
||||||
mFilesToHash[full_path] = job;
|
mFilesToHash[full_path] = job;
|
||||||
|
|
||||||
|
@ -68,6 +68,7 @@ private:
|
|||||||
std::string full_path;
|
std::string full_path;
|
||||||
HashStorageClient *client;
|
HashStorageClient *client;
|
||||||
uint32_t client_param ;
|
uint32_t client_param ;
|
||||||
|
time_t ts;
|
||||||
};
|
};
|
||||||
|
|
||||||
// current work
|
// current work
|
||||||
|
Loading…
Reference in New Issue
Block a user