Added check in cachestrapper to see if a duplicate cache has been requested.

In this case, don't cancel the old one - just keep going!

This should fix the issues associated with cache restarts and frequent peer disconnects.



git-svn-id: http://svn.code.sf.net/p/retroshare/code/trunk@4808 b45a01b8-16f6-495d-af2f-9b41ad6348cc
This commit is contained in:
drbob 2012-01-17 22:01:04 +00:00
parent bd9cd22387
commit df741f7c64

View File

@ -1069,6 +1069,28 @@ bool CacheTransfer::RequestCache(CacheData &data, CacheStore *cbStore)
((dit->second).cid.type == data.cid.type) && ((dit->second).cid.type == data.cid.type) &&
((dit->second).cid.subid == data.cid.subid)) ((dit->second).cid.subid == data.cid.subid))
{ {
sit = cbStores.find(dit->second.hash);
/* if identical to previous request, then we don't want to cancel
* a partially transferred cache file
*
* We wouldn't expect to have to request it again, however the feedback loop
* from ftController is not completed (it should callback and tell us if it cancels
* the cache file. XXX TO FIX.
*/
if ((data.hash == dit->second.hash) &&
(data.path == dit->second.path) &&
(data.size == dit->second.size) &&
(cbStore == cbStore))
{
std::cerr << "Re-request duplicate cache... let it continue";
std::cerr << std::endl;
/* request data */
RequestCacheFile(data.pid, data.path, data.hash, data.size);
return true;
}
/* cancel old transfer */ /* cancel old transfer */
CancelCacheFile(dit->second.pid, dit->second.path, CancelCacheFile(dit->second.pid, dit->second.path,
dit->second.hash, dit->second.size); dit->second.hash, dit->second.size);