mirror of
https://github.com/haveno-dex/haveno.git
synced 2025-07-23 07:00:49 -04:00
Fix missing sequence nr and signature updates at refresh offers
This commit is contained in:
parent
ca20de64d9
commit
bb6334f6a0
24 changed files with 720 additions and 506 deletions
|
@ -80,7 +80,7 @@ public class Storage<T extends Serializable> {
|
||||||
public T initAndGetPersisted(String fileName) {
|
public T initAndGetPersisted(String fileName) {
|
||||||
this.fileName = fileName;
|
this.fileName = fileName;
|
||||||
storageFile = new File(dir, fileName);
|
storageFile = new File(dir, fileName);
|
||||||
fileManager = new FileManager<>(dir, storageFile, 600);
|
fileManager = new FileManager<>(dir, storageFile, 300);
|
||||||
|
|
||||||
return getPersisted();
|
return getPersisted();
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,15 +17,14 @@
|
||||||
|
|
||||||
package io.bitsquare.arbitration;
|
package io.bitsquare.arbitration;
|
||||||
|
|
||||||
import com.google.common.util.concurrent.MoreExecutors;
|
|
||||||
import com.google.inject.Inject;
|
import com.google.inject.Inject;
|
||||||
import com.google.inject.name.Named;
|
import com.google.inject.name.Named;
|
||||||
import io.bitsquare.app.ProgramArguments;
|
import io.bitsquare.app.ProgramArguments;
|
||||||
|
import io.bitsquare.common.Timer;
|
||||||
import io.bitsquare.common.UserThread;
|
import io.bitsquare.common.UserThread;
|
||||||
import io.bitsquare.common.crypto.KeyRing;
|
import io.bitsquare.common.crypto.KeyRing;
|
||||||
import io.bitsquare.common.handlers.ErrorMessageHandler;
|
import io.bitsquare.common.handlers.ErrorMessageHandler;
|
||||||
import io.bitsquare.common.handlers.ResultHandler;
|
import io.bitsquare.common.handlers.ResultHandler;
|
||||||
import io.bitsquare.common.util.Utilities;
|
|
||||||
import io.bitsquare.p2p.BootstrapListener;
|
import io.bitsquare.p2p.BootstrapListener;
|
||||||
import io.bitsquare.p2p.NodeAddress;
|
import io.bitsquare.p2p.NodeAddress;
|
||||||
import io.bitsquare.p2p.P2PService;
|
import io.bitsquare.p2p.P2PService;
|
||||||
|
@ -47,7 +46,6 @@ import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
@ -57,10 +55,14 @@ import static org.bitcoinj.core.Utils.HEX;
|
||||||
public class ArbitratorManager {
|
public class ArbitratorManager {
|
||||||
private static final Logger log = LoggerFactory.getLogger(ArbitratorManager.class);
|
private static final Logger log = LoggerFactory.getLogger(ArbitratorManager.class);
|
||||||
|
|
||||||
private final KeyRing keyRing;
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
private final ArbitratorService arbitratorService;
|
// Static
|
||||||
private final User user;
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
private final ObservableMap<NodeAddress, Arbitrator> arbitratorsObservableMap = FXCollections.observableHashMap();
|
|
||||||
|
private static final long REPUBLISH_MILLIS = Arbitrator.TTL / 2;
|
||||||
|
private static final long RETRY_REPUBLISH_SEC = 5;
|
||||||
|
|
||||||
|
private static final String publicKeyForTesting = "027a381b5333a56e1cc3d90d3a7d07f26509adf7029ed06fc997c656621f8da1ee";
|
||||||
|
|
||||||
// Keys for invited arbitrators in bootstrapping phase (before registration is open to anyone and security payment is implemented)
|
// Keys for invited arbitrators in bootstrapping phase (before registration is open to anyone and security payment is implemented)
|
||||||
// For testing purpose here is a private key so anyone can setup an arbitrator for now.
|
// For testing purpose here is a private key so anyone can setup an arbitrator for now.
|
||||||
|
@ -87,10 +89,24 @@ public class ArbitratorManager {
|
||||||
"0274f772a98d23e7a0251ab30d7121897b5aebd11a2f1e45ab654aa57503173245",
|
"0274f772a98d23e7a0251ab30d7121897b5aebd11a2f1e45ab654aa57503173245",
|
||||||
"036d8a1dfcb406886037d2381da006358722823e1940acc2598c844bbc0fd1026f"
|
"036d8a1dfcb406886037d2381da006358722823e1940acc2598c844bbc0fd1026f"
|
||||||
));
|
));
|
||||||
private static final String publicKeyForTesting = "027a381b5333a56e1cc3d90d3a7d07f26509adf7029ed06fc997c656621f8da1ee";
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Instance fields
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
private final KeyRing keyRing;
|
||||||
|
private final ArbitratorService arbitratorService;
|
||||||
|
private final User user;
|
||||||
|
private final ObservableMap<NodeAddress, Arbitrator> arbitratorsObservableMap = FXCollections.observableHashMap();
|
||||||
private final boolean isDevTest;
|
private final boolean isDevTest;
|
||||||
private BootstrapListener bootstrapListener;
|
private BootstrapListener bootstrapListener;
|
||||||
private ScheduledThreadPoolExecutor republishArbitratorExecutor;
|
private Timer republishArbitratorTimer, retryRepublishArbitratorTimer;
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Constructor
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public ArbitratorManager(@Named(ProgramArguments.DEV_TEST) boolean isDevTest, KeyRing keyRing, ArbitratorService arbitratorService, User user) {
|
public ArbitratorManager(@Named(ProgramArguments.DEV_TEST) boolean isDevTest, KeyRing keyRing, ArbitratorService arbitratorService, User user) {
|
||||||
|
@ -113,19 +129,26 @@ public class ArbitratorManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void shutDown() {
|
public void shutDown() {
|
||||||
if (republishArbitratorExecutor != null)
|
stopRepublishArbitratorTimer();
|
||||||
MoreExecutors.shutdownAndAwaitTermination(republishArbitratorExecutor, 500, TimeUnit.MILLISECONDS);
|
stopRetryRepublishArbitratorTimer();
|
||||||
|
if (bootstrapListener != null)
|
||||||
|
arbitratorService.getP2PService().removeP2PServiceListener(bootstrapListener);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// API
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
public void onAllServicesInitialized() {
|
public void onAllServicesInitialized() {
|
||||||
if (user.getRegisteredArbitrator() != null) {
|
if (user.getRegisteredArbitrator() != null) {
|
||||||
|
|
||||||
P2PService p2PService = arbitratorService.getP2PService();
|
P2PService p2PService = arbitratorService.getP2PService();
|
||||||
if (!p2PService.isBootstrapped()) {
|
if (!p2PService.isBootstrapped()) {
|
||||||
bootstrapListener = new BootstrapListener() {
|
bootstrapListener = new BootstrapListener() {
|
||||||
@Override
|
@Override
|
||||||
public void onBootstrapComplete() {
|
public void onBootstrapComplete() {
|
||||||
republishArbitrator();
|
ArbitratorManager.this.onBootstrapComplete();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
p2PService.addP2PServiceListener(bootstrapListener);
|
p2PService.addP2PServiceListener(bootstrapListener);
|
||||||
|
@ -133,29 +156,13 @@ public class ArbitratorManager {
|
||||||
} else {
|
} else {
|
||||||
republishArbitrator();
|
republishArbitrator();
|
||||||
}
|
}
|
||||||
|
|
||||||
// re-publish periodically
|
|
||||||
republishArbitratorExecutor = Utilities.getScheduledThreadPoolExecutor("republishArbitrator", 1, 5, 5);
|
|
||||||
long delay = Arbitrator.TTL / 2;
|
|
||||||
republishArbitratorExecutor.scheduleAtFixedRate(this::republishArbitrator, delay, delay, TimeUnit.MILLISECONDS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
republishArbitratorTimer = UserThread.runPeriodically(this::republishArbitrator, REPUBLISH_MILLIS, TimeUnit.MILLISECONDS);
|
||||||
|
|
||||||
applyArbitrators();
|
applyArbitrators();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void republishArbitrator() {
|
|
||||||
if (bootstrapListener != null)
|
|
||||||
arbitratorService.getP2PService().removeP2PServiceListener(bootstrapListener);
|
|
||||||
|
|
||||||
Arbitrator registeredArbitrator = user.getRegisteredArbitrator();
|
|
||||||
if (registeredArbitrator != null) {
|
|
||||||
addArbitrator(registeredArbitrator,
|
|
||||||
this::applyArbitrators,
|
|
||||||
log::error
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void applyArbitrators() {
|
public void applyArbitrators() {
|
||||||
Map<NodeAddress, Arbitrator> map = arbitratorService.getArbitrators();
|
Map<NodeAddress, Arbitrator> map = arbitratorService.getArbitrators();
|
||||||
log.trace("Arbitrators . size=" + map.values().size());
|
log.trace("Arbitrators . size=" + map.values().size());
|
||||||
|
@ -222,18 +229,6 @@ public class ArbitratorManager {
|
||||||
return key.signMessage(keyToSignAsHex);
|
return key.signMessage(keyToSignAsHex);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean verifySignature(PublicKey storageSignaturePubKey, byte[] registrationPubKey, String signature) {
|
|
||||||
String keyToSignAsHex = Utils.HEX.encode(storageSignaturePubKey.getEncoded());
|
|
||||||
try {
|
|
||||||
ECKey key = ECKey.fromPublicOnly(registrationPubKey);
|
|
||||||
key.verifyMessage(keyToSignAsHex, signature);
|
|
||||||
return true;
|
|
||||||
} catch (SignatureException e) {
|
|
||||||
log.warn("verifySignature failed");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Nullable
|
@Nullable
|
||||||
public ECKey getRegistrationKey(String privKeyBigIntString) {
|
public ECKey getRegistrationKey(String privKeyBigIntString) {
|
||||||
try {
|
try {
|
||||||
|
@ -246,4 +241,61 @@ public class ArbitratorManager {
|
||||||
public boolean isPublicKeyInList(String pubKeyAsHex) {
|
public boolean isPublicKeyInList(String pubKeyAsHex) {
|
||||||
return isDevTest && pubKeyAsHex.equals(publicKeyForTesting) || publicKeys.contains(pubKeyAsHex);
|
return isDevTest && pubKeyAsHex.equals(publicKeyForTesting) || publicKeys.contains(pubKeyAsHex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Private
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
private void onBootstrapComplete() {
|
||||||
|
if (bootstrapListener != null) {
|
||||||
|
arbitratorService.getP2PService().removeP2PServiceListener(bootstrapListener);
|
||||||
|
bootstrapListener = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
republishArbitrator();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void republishArbitrator() {
|
||||||
|
Arbitrator registeredArbitrator = user.getRegisteredArbitrator();
|
||||||
|
if (registeredArbitrator != null) {
|
||||||
|
addArbitrator(registeredArbitrator,
|
||||||
|
this::applyArbitrators,
|
||||||
|
errorMessage -> {
|
||||||
|
if (retryRepublishArbitratorTimer == null)
|
||||||
|
retryRepublishArbitratorTimer = UserThread.runPeriodically(() -> {
|
||||||
|
stopRetryRepublishArbitratorTimer();
|
||||||
|
republishArbitrator();
|
||||||
|
}, RETRY_REPUBLISH_SEC);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean verifySignature(PublicKey storageSignaturePubKey, byte[] registrationPubKey, String signature) {
|
||||||
|
String keyToSignAsHex = Utils.HEX.encode(storageSignaturePubKey.getEncoded());
|
||||||
|
try {
|
||||||
|
ECKey key = ECKey.fromPublicOnly(registrationPubKey);
|
||||||
|
key.verifyMessage(keyToSignAsHex, signature);
|
||||||
|
return true;
|
||||||
|
} catch (SignatureException e) {
|
||||||
|
log.warn("verifySignature failed");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void stopRetryRepublishArbitratorTimer() {
|
||||||
|
if (retryRepublishArbitratorTimer != null) {
|
||||||
|
retryRepublishArbitratorTimer.stop();
|
||||||
|
retryRepublishArbitratorTimer = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void stopRepublishArbitratorTimer() {
|
||||||
|
if (republishArbitratorTimer != null) {
|
||||||
|
republishArbitratorTimer.stop();
|
||||||
|
republishArbitratorTimer = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,8 +54,8 @@ public final class Offer implements StoragePayload, RequiresOwnerIsOnlinePayload
|
||||||
@JsonExclude
|
@JsonExclude
|
||||||
private static final Logger log = LoggerFactory.getLogger(Offer.class);
|
private static final Logger log = LoggerFactory.getLogger(Offer.class);
|
||||||
|
|
||||||
public static final long TTL = TimeUnit.SECONDS.toMillis(60);
|
// public static final long TTL = TimeUnit.SECONDS.toMillis(60);
|
||||||
// public static final long TTL = TimeUnit.SECONDS.toMillis(10); //TODO
|
public static final long TTL = TimeUnit.SECONDS.toMillis(10); //TODO
|
||||||
|
|
||||||
public final static String TAC_OFFERER = "When placing that offer I accept that anyone who fulfills my conditions can " +
|
public final static String TAC_OFFERER = "When placing that offer I accept that anyone who fulfills my conditions can " +
|
||||||
"take that offer.";
|
"take that offer.";
|
||||||
|
|
|
@ -60,7 +60,10 @@ import static io.bitsquare.util.Validator.nonEmptyStringOf;
|
||||||
public class OpenOfferManager implements PeerManager.Listener, DecryptedDirectMessageListener {
|
public class OpenOfferManager implements PeerManager.Listener, DecryptedDirectMessageListener {
|
||||||
private static final Logger log = LoggerFactory.getLogger(OpenOfferManager.class);
|
private static final Logger log = LoggerFactory.getLogger(OpenOfferManager.class);
|
||||||
|
|
||||||
private static final long RETRY_DELAY_AFTER_ALL_CON_LOST_SEC = 5;
|
private static final long RETRY_REPUBLISH_DELAY_SEC = 5;
|
||||||
|
private static final long REPUBLISH_AGAIN_AT_STARTUP_DELAY_SEC = 10;
|
||||||
|
private static final long REPUBLISH_INTERVAL_MILLIS = 10 * Offer.TTL;
|
||||||
|
private static final long REFRESH_INTERVAL_MILLIS = (long) (Offer.TTL * 0.5);
|
||||||
|
|
||||||
private final KeyRing keyRing;
|
private final KeyRing keyRing;
|
||||||
private final User user;
|
private final User user;
|
||||||
|
@ -73,7 +76,7 @@ public class OpenOfferManager implements PeerManager.Listener, DecryptedDirectMe
|
||||||
private final TradableList<OpenOffer> openOffers;
|
private final TradableList<OpenOffer> openOffers;
|
||||||
private final Storage<TradableList<OpenOffer>> openOffersStorage;
|
private final Storage<TradableList<OpenOffer>> openOffersStorage;
|
||||||
private boolean stopped;
|
private boolean stopped;
|
||||||
private Timer periodicRepublishOffersTimer, periodicRefreshOffersTimer, republishOffersTimer;
|
private Timer periodicRepublishOffersTimer, periodicRefreshOffersTimer, retryRepublishOffersTimer;
|
||||||
private BootstrapListener bootstrapListener;
|
private BootstrapListener bootstrapListener;
|
||||||
|
|
||||||
|
|
||||||
|
@ -131,6 +134,7 @@ public class OpenOfferManager implements PeerManager.Listener, DecryptedDirectMe
|
||||||
|
|
||||||
stopPeriodicRefreshOffersTimer();
|
stopPeriodicRefreshOffersTimer();
|
||||||
stopPeriodicRepublishOffersTimer();
|
stopPeriodicRepublishOffersTimer();
|
||||||
|
stopRetryRepublishOffersTimer();
|
||||||
|
|
||||||
log.info("remove all open offers at shutDown");
|
log.info("remove all open offers at shutDown");
|
||||||
// we remove own offers from offerbook when we go offline
|
// we remove own offers from offerbook when we go offline
|
||||||
|
@ -167,10 +171,21 @@ public class OpenOfferManager implements PeerManager.Listener, DecryptedDirectMe
|
||||||
|
|
||||||
// Republish means we send the complete offer object
|
// Republish means we send the complete offer object
|
||||||
republishOffers();
|
republishOffers();
|
||||||
startRepublishOffersThread();
|
startPeriodicRepublishOffersTimer();
|
||||||
|
|
||||||
// Refresh is started once we get a success from republish
|
// Refresh is started once we get a success from republish
|
||||||
|
|
||||||
|
// We republish after a bit as it might be that our connected node still has the offer in the data map
|
||||||
|
// but other peers have it already removed because of expired TTL.
|
||||||
|
// Those other not directly connected peers would not get the broadcast of the new offer, as the first
|
||||||
|
// connected peer (seed node) does nto broadcast if it has the data in the map.
|
||||||
|
// To update quickly to the whole network we repeat the republishOffers call after a few seconds when we
|
||||||
|
// are better connected to the network. There is no guarantee that all peers will receive it but we have
|
||||||
|
// also our periodic timer, so after that longer interval the offer should be available to all peers.
|
||||||
|
if (retryRepublishOffersTimer == null)
|
||||||
|
retryRepublishOffersTimer = UserThread.runAfter(OpenOfferManager.this::republishOffers,
|
||||||
|
REPUBLISH_AGAIN_AT_STARTUP_DELAY_SEC);
|
||||||
|
|
||||||
p2PService.getPeerManager().addListener(this);
|
p2PService.getPeerManager().addListener(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,90 +199,25 @@ public class OpenOfferManager implements PeerManager.Listener, DecryptedDirectMe
|
||||||
stopped = true;
|
stopped = true;
|
||||||
stopPeriodicRefreshOffersTimer();
|
stopPeriodicRefreshOffersTimer();
|
||||||
stopPeriodicRepublishOffersTimer();
|
stopPeriodicRepublishOffersTimer();
|
||||||
|
stopRetryRepublishOffersTimer();
|
||||||
|
|
||||||
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onNewConnectionAfterAllConnectionsLost() {
|
public void onNewConnectionAfterAllConnectionsLost() {
|
||||||
|
stopped = false;
|
||||||
restart();
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onAwakeFromStandby() {
|
public void onAwakeFromStandby() {
|
||||||
|
stopped = false;
|
||||||
if (!p2PService.getNetworkNode().getAllConnections().isEmpty())
|
if (!p2PService.getNetworkNode().getAllConnections().isEmpty())
|
||||||
restart();
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// RepublishOffers, refreshOffers
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
private void startRepublishOffersThread() {
|
|
||||||
stopped = false;
|
|
||||||
if (periodicRepublishOffersTimer == null)
|
|
||||||
periodicRepublishOffersTimer = UserThread.runPeriodically(OpenOfferManager.this::republishOffers,
|
|
||||||
Offer.TTL * 10,
|
|
||||||
TimeUnit.MILLISECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void republishOffers() {
|
|
||||||
Log.traceCall("Number of offer for republish: " + openOffers.size());
|
|
||||||
if (!stopped) {
|
|
||||||
stopPeriodicRefreshOffersTimer();
|
|
||||||
|
|
||||||
for (OpenOffer openOffer : openOffers) {
|
|
||||||
offerBookService.republishOffers(openOffer.getOffer(),
|
|
||||||
() -> {
|
|
||||||
log.debug("Successful added offer to P2P network");
|
|
||||||
// Refresh means we send only the dat needed to refresh the TTL (hash, signature and sequence nr.)
|
|
||||||
startRefreshOffersThread();
|
|
||||||
},
|
|
||||||
errorMessage -> {
|
|
||||||
//TODO handle with retry
|
|
||||||
log.error("Add offer to P2P network failed. " + errorMessage);
|
|
||||||
stopRepublishOffersTimer();
|
|
||||||
republishOffersTimer = UserThread.runAfter(OpenOfferManager.this::republishOffers,
|
|
||||||
RETRY_DELAY_AFTER_ALL_CON_LOST_SEC);
|
|
||||||
});
|
|
||||||
openOffer.setStorage(openOffersStorage);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.warn("We have stopped already. We ignore that republishOffers call.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void startRefreshOffersThread() {
|
|
||||||
stopped = false;
|
|
||||||
// refresh sufficiently before offer would expire
|
|
||||||
if (periodicRefreshOffersTimer == null)
|
|
||||||
periodicRefreshOffersTimer = UserThread.runPeriodically(OpenOfferManager.this::refreshOffers,
|
|
||||||
(long) (Offer.TTL * 0.5),
|
|
||||||
TimeUnit.MILLISECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void refreshOffers() {
|
|
||||||
if (!stopped) {
|
|
||||||
Log.traceCall("Number of offer for refresh: " + openOffers.size());
|
|
||||||
for (OpenOffer openOffer : openOffers) {
|
|
||||||
offerBookService.refreshOffer(openOffer.getOffer(),
|
|
||||||
() -> log.debug("Successful refreshed TTL for offer"),
|
|
||||||
errorMessage -> log.error("Refresh TTL for offer failed. " + errorMessage));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.warn("We have stopped already. We ignore that refreshOffers call.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void restart() {
|
|
||||||
startRepublishOffersThread();
|
|
||||||
startRefreshOffersThread();
|
|
||||||
if (republishOffersTimer == null) {
|
|
||||||
stopped = false;
|
|
||||||
republishOffersTimer = UserThread.runAfter(OpenOfferManager.this::republishOffers, RETRY_DELAY_AFTER_ALL_CON_LOST_SEC);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// API
|
// API
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -282,6 +232,12 @@ public class OpenOfferManager implements PeerManager.Listener, DecryptedDirectMe
|
||||||
openOffers.add(openOffer);
|
openOffers.add(openOffer);
|
||||||
openOffersStorage.queueUpForSave();
|
openOffersStorage.queueUpForSave();
|
||||||
resultHandler.handleResult(transaction);
|
resultHandler.handleResult(transaction);
|
||||||
|
if (!stopped) {
|
||||||
|
startPeriodicRepublishOffersTimer();
|
||||||
|
startPeriodicRefreshOffersTimer();
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that placeOfferProtocol.placeOffer.onResult call.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
placeOfferProtocol.placeOffer();
|
placeOfferProtocol.placeOffer();
|
||||||
|
@ -395,6 +351,97 @@ public class OpenOfferManager implements PeerManager.Listener, DecryptedDirectMe
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// RepublishOffers, refreshOffers
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
private void republishOffers() {
|
||||||
|
Log.traceCall("Number of offer for republish: " + openOffers.size());
|
||||||
|
if (!stopped) {
|
||||||
|
stopPeriodicRefreshOffersTimer();
|
||||||
|
|
||||||
|
openOffers.stream().forEach(openOffer -> {
|
||||||
|
offerBookService.republishOffers(openOffer.getOffer(),
|
||||||
|
() -> {
|
||||||
|
if (!stopped) {
|
||||||
|
log.debug("Successful added offer to P2P network");
|
||||||
|
// Refresh means we send only the dat needed to refresh the TTL (hash, signature and sequence nr.)
|
||||||
|
if (periodicRefreshOffersTimer == null)
|
||||||
|
startPeriodicRefreshOffersTimer();
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that offerBookService.republishOffers.onSuccess call.");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
errorMessage -> {
|
||||||
|
if (!stopped) {
|
||||||
|
log.error("Add offer to P2P network failed. " + errorMessage);
|
||||||
|
stopRetryRepublishOffersTimer();
|
||||||
|
retryRepublishOffersTimer = UserThread.runAfter(OpenOfferManager.this::republishOffers,
|
||||||
|
RETRY_REPUBLISH_DELAY_SEC);
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that offerBookService.republishOffers.onFault call.");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
openOffer.setStorage(openOffersStorage);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that republishOffers call.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void startPeriodicRepublishOffersTimer() {
|
||||||
|
Log.traceCall();
|
||||||
|
stopped = false;
|
||||||
|
if (periodicRepublishOffersTimer == null)
|
||||||
|
periodicRepublishOffersTimer = UserThread.runPeriodically(() -> {
|
||||||
|
if (!stopped) {
|
||||||
|
republishOffers();
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that periodicRepublishOffersTimer.run call.");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
REPUBLISH_INTERVAL_MILLIS,
|
||||||
|
TimeUnit.MILLISECONDS);
|
||||||
|
else
|
||||||
|
log.warn("periodicRepublishOffersTimer already stated");
|
||||||
|
}
|
||||||
|
|
||||||
|
private void startPeriodicRefreshOffersTimer() {
|
||||||
|
Log.traceCall();
|
||||||
|
stopped = false;
|
||||||
|
// refresh sufficiently before offer would expire
|
||||||
|
if (periodicRefreshOffersTimer == null)
|
||||||
|
periodicRefreshOffersTimer = UserThread.runPeriodically(() -> {
|
||||||
|
if (!stopped) {
|
||||||
|
Log.traceCall("Number of offer for refresh: " + openOffers.size());
|
||||||
|
openOffers.stream().forEach(openOffer -> {
|
||||||
|
offerBookService.refreshOffer(openOffer.getOffer(),
|
||||||
|
() -> log.debug("Successful refreshed TTL for offer"),
|
||||||
|
errorMessage -> log.error("Refresh TTL for offer failed. " + errorMessage));
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that periodicRefreshOffersTimer.run call.");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
REFRESH_INTERVAL_MILLIS,
|
||||||
|
TimeUnit.MILLISECONDS);
|
||||||
|
else
|
||||||
|
log.warn("periodicRefreshOffersTimer already stated");
|
||||||
|
}
|
||||||
|
|
||||||
|
private void restart() {
|
||||||
|
Log.traceCall();
|
||||||
|
if (retryRepublishOffersTimer == null)
|
||||||
|
retryRepublishOffersTimer = UserThread.runAfter(() -> {
|
||||||
|
stopped = false;
|
||||||
|
stopRetryRepublishOffersTimer();
|
||||||
|
republishOffers();
|
||||||
|
}, RETRY_REPUBLISH_DELAY_SEC);
|
||||||
|
|
||||||
|
startPeriodicRepublishOffersTimer();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Private
|
// Private
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -413,10 +460,10 @@ public class OpenOfferManager implements PeerManager.Listener, DecryptedDirectMe
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void stopRepublishOffersTimer() {
|
private void stopRetryRepublishOffersTimer() {
|
||||||
if (republishOffersTimer != null) {
|
if (retryRepublishOffersTimer != null) {
|
||||||
republishOffersTimer.stop();
|
retryRepublishOffersTimer.stop();
|
||||||
republishOffersTimer = null;
|
retryRepublishOffersTimer = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -696,6 +696,8 @@ public class CreateOfferView extends ActivatableViewAndModel<AnchorPane, CreateO
|
||||||
createOfferButton.setVisible(false);
|
createOfferButton.setVisible(false);
|
||||||
createOfferButton.setOnAction(e -> onPlaceOffer());
|
createOfferButton.setOnAction(e -> onPlaceOffer());
|
||||||
createOfferButton.setMinHeight(40);
|
createOfferButton.setMinHeight(40);
|
||||||
|
createOfferButton.setPadding(new Insets(0, 20, 0, 20));
|
||||||
|
|
||||||
placeOfferSpinner = placeOfferTuple.second;
|
placeOfferSpinner = placeOfferTuple.second;
|
||||||
placeOfferSpinner.setPrefSize(18, 18);
|
placeOfferSpinner.setPrefSize(18, 18);
|
||||||
placeOfferSpinnerInfoLabel = placeOfferTuple.third;
|
placeOfferSpinnerInfoLabel = placeOfferTuple.third;
|
||||||
|
|
|
@ -579,6 +579,8 @@ public class TakeOfferView extends ActivatableViewAndModel<AnchorPane, TakeOffer
|
||||||
takeOfferButton.setVisible(false);
|
takeOfferButton.setVisible(false);
|
||||||
takeOfferButton.setOnAction(e -> onTakeOffer());
|
takeOfferButton.setOnAction(e -> onTakeOffer());
|
||||||
takeOfferButton.setMinHeight(40);
|
takeOfferButton.setMinHeight(40);
|
||||||
|
takeOfferButton.setPadding(new Insets(0, 20, 0, 20));
|
||||||
|
|
||||||
takeOfferSpinner = takeOfferTuple.second;
|
takeOfferSpinner = takeOfferTuple.second;
|
||||||
takeOfferSpinner.setPrefSize(18, 18);
|
takeOfferSpinner.setPrefSize(18, 18);
|
||||||
takeOfferSpinnerInfoLabel = takeOfferTuple.third;
|
takeOfferSpinnerInfoLabel = takeOfferTuple.third;
|
||||||
|
|
|
@ -124,14 +124,14 @@ public class P2PService implements SetupListener, MessageListener, ConnectionLis
|
||||||
networkNode.addConnectionListener(this);
|
networkNode.addConnectionListener(this);
|
||||||
networkNode.addMessageListener(this);
|
networkNode.addMessageListener(this);
|
||||||
|
|
||||||
broadcaster = new Broadcaster(networkNode);
|
Set<NodeAddress> seedNodeAddresses = seedNodesRepository.getSeedNodeAddresses(useLocalhost, networkId);
|
||||||
|
peerManager = new PeerManager(networkNode, seedNodeAddresses, storageDir, clock);
|
||||||
|
|
||||||
|
broadcaster = new Broadcaster(networkNode, peerManager);
|
||||||
|
|
||||||
p2PDataStorage = new P2PDataStorage(broadcaster, networkNode, storageDir);
|
p2PDataStorage = new P2PDataStorage(broadcaster, networkNode, storageDir);
|
||||||
p2PDataStorage.addHashMapChangedListener(this);
|
p2PDataStorage.addHashMapChangedListener(this);
|
||||||
|
|
||||||
Set<NodeAddress> seedNodeAddresses = seedNodesRepository.getSeedNodeAddresses(useLocalhost, networkId);
|
|
||||||
peerManager = new PeerManager(networkNode, seedNodeAddresses, storageDir, clock);
|
|
||||||
|
|
||||||
requestDataManager = new RequestDataManager(networkNode, p2PDataStorage, peerManager, seedNodeAddresses, this);
|
requestDataManager = new RequestDataManager(networkNode, p2PDataStorage, peerManager, seedNodeAddresses, this);
|
||||||
|
|
||||||
peerExchangeManager = new PeerExchangeManager(networkNode, peerManager, seedNodeAddresses);
|
peerExchangeManager = new PeerExchangeManager(networkNode, peerManager, seedNodeAddresses);
|
||||||
|
@ -175,6 +175,9 @@ public class P2PService implements SetupListener, MessageListener, ConnectionLis
|
||||||
if (peerManager != null)
|
if (peerManager != null)
|
||||||
peerManager.shutDown();
|
peerManager.shutDown();
|
||||||
|
|
||||||
|
if (broadcaster != null)
|
||||||
|
broadcaster.shutDown();
|
||||||
|
|
||||||
if (requestDataManager != null)
|
if (requestDataManager != null)
|
||||||
requestDataManager.shutDown();
|
requestDataManager.shutDown();
|
||||||
|
|
||||||
|
@ -224,7 +227,7 @@ public class P2PService implements SetupListener, MessageListener, ConnectionLis
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
|
|
||||||
requestDataManager.requestPreliminaryData();
|
requestDataManager.requestPreliminaryData();
|
||||||
keepAliveManager.start();
|
keepAliveManager.restart();
|
||||||
p2pServiceListeners.stream().forEach(SetupListener::onTorNodeReady);
|
p2pServiceListeners.stream().forEach(SetupListener::onTorNodeReady);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,9 +60,9 @@ public class Connection implements MessageListener {
|
||||||
private static final int MAX_MSG_SIZE = 100 * 1024; // 100 kb of compressed data
|
private static final int MAX_MSG_SIZE = 100 * 1024; // 100 kb of compressed data
|
||||||
private static final int MSG_THROTTLE_PER_SEC = 10; // With MAX_MSG_SIZE of 100kb results in bandwidth of 10 mbit/sec
|
private static final int MSG_THROTTLE_PER_SEC = 10; // With MAX_MSG_SIZE of 100kb results in bandwidth of 10 mbit/sec
|
||||||
private static final int MSG_THROTTLE_PER_10_SEC = 50; // With MAX_MSG_SIZE of 100kb results in bandwidth of 5 mbit/sec for 10 sec
|
private static final int MSG_THROTTLE_PER_10_SEC = 50; // With MAX_MSG_SIZE of 100kb results in bandwidth of 5 mbit/sec for 10 sec
|
||||||
private static final int SOCKET_TIMEOUT = (int) TimeUnit.SECONDS.toMillis(60);
|
//private static final int SOCKET_TIMEOUT = (int) TimeUnit.SECONDS.toMillis(60);
|
||||||
//TODO
|
//TODO
|
||||||
// private static final int SOCKET_TIMEOUT = (int) TimeUnit.SECONDS.toMillis(30);
|
private static final int SOCKET_TIMEOUT = (int) TimeUnit.SECONDS.toMillis(20);
|
||||||
|
|
||||||
public static int getMaxMsgSize() {
|
public static int getMaxMsgSize() {
|
||||||
return MAX_MSG_SIZE;
|
return MAX_MSG_SIZE;
|
||||||
|
@ -358,6 +358,7 @@ public class Connection implements MessageListener {
|
||||||
log.info("\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n" +
|
log.info("\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n" +
|
||||||
"ShutDown connection:"
|
"ShutDown connection:"
|
||||||
+ "\npeersNodeAddress=" + peersNodeAddress
|
+ "\npeersNodeAddress=" + peersNodeAddress
|
||||||
|
+ "\ncloseConnectionReason=" + closeConnectionReason
|
||||||
+ "\nuid=" + uid
|
+ "\nuid=" + uid
|
||||||
+ "\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n");
|
+ "\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n");
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,9 @@ import com.google.common.util.concurrent.Futures;
|
||||||
import com.google.common.util.concurrent.SettableFuture;
|
import com.google.common.util.concurrent.SettableFuture;
|
||||||
import io.bitsquare.app.Log;
|
import io.bitsquare.app.Log;
|
||||||
import io.bitsquare.p2p.NodeAddress;
|
import io.bitsquare.p2p.NodeAddress;
|
||||||
|
import io.bitsquare.p2p.network.CloseConnectionReason;
|
||||||
import io.bitsquare.p2p.network.Connection;
|
import io.bitsquare.p2p.network.Connection;
|
||||||
|
import io.bitsquare.p2p.network.ConnectionListener;
|
||||||
import io.bitsquare.p2p.network.NetworkNode;
|
import io.bitsquare.p2p.network.NetworkNode;
|
||||||
import io.bitsquare.p2p.storage.messages.DataBroadcastMessage;
|
import io.bitsquare.p2p.storage.messages.DataBroadcastMessage;
|
||||||
import javafx.beans.property.IntegerProperty;
|
import javafx.beans.property.IntegerProperty;
|
||||||
|
@ -19,7 +21,7 @@ import org.slf4j.LoggerFactory;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.CopyOnWriteArraySet;
|
import java.util.concurrent.CopyOnWriteArraySet;
|
||||||
|
|
||||||
public class Broadcaster {
|
public class Broadcaster implements ConnectionListener, PeerManager.Listener {
|
||||||
private static final Logger log = LoggerFactory.getLogger(Broadcaster.class);
|
private static final Logger log = LoggerFactory.getLogger(Broadcaster.class);
|
||||||
|
|
||||||
|
|
||||||
|
@ -28,15 +30,35 @@ public class Broadcaster {
|
||||||
}
|
}
|
||||||
|
|
||||||
private final NetworkNode networkNode;
|
private final NetworkNode networkNode;
|
||||||
|
private PeerManager peerManager;
|
||||||
private final Set<Listener> listeners = new CopyOnWriteArraySet<>();
|
private final Set<Listener> listeners = new CopyOnWriteArraySet<>();
|
||||||
|
private boolean stopped = false;
|
||||||
|
|
||||||
private final IntegerProperty numOfBroadcasts = new SimpleIntegerProperty(0);
|
private final IntegerProperty numOfBroadcasts = new SimpleIntegerProperty(0);
|
||||||
|
|
||||||
public Broadcaster(NetworkNode networkNode) {
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Constructor
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
public Broadcaster(NetworkNode networkNode, PeerManager peerManager) {
|
||||||
this.networkNode = networkNode;
|
this.networkNode = networkNode;
|
||||||
|
this.peerManager = peerManager;
|
||||||
|
networkNode.removeConnectionListener(this);
|
||||||
|
peerManager.removeListener(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void shutDown() {
|
||||||
|
stopped = true;
|
||||||
|
networkNode.removeConnectionListener(this);
|
||||||
|
peerManager.removeListener(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// API
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
public void broadcast(DataBroadcastMessage message, @Nullable NodeAddress sender) {
|
public void broadcast(DataBroadcastMessage message, @Nullable NodeAddress sender) {
|
||||||
Log.traceCall("Sender=" + sender + "\n\t" +
|
Log.traceCall("Sender=" + sender + "\n\t" +
|
||||||
"Message=" + StringUtils.abbreviate(message.toString(), 100));
|
"Message=" + StringUtils.abbreviate(message.toString(), 100));
|
||||||
|
@ -48,21 +70,28 @@ public class Broadcaster {
|
||||||
.filter(connection -> !connection.getPeersNodeAddressOptional().get().equals(sender))
|
.filter(connection -> !connection.getPeersNodeAddressOptional().get().equals(sender))
|
||||||
.forEach(connection -> {
|
.forEach(connection -> {
|
||||||
NodeAddress nodeAddress = connection.getPeersNodeAddressOptional().get();
|
NodeAddress nodeAddress = connection.getPeersNodeAddressOptional().get();
|
||||||
log.trace("Broadcast message to " +
|
log.trace("Broadcast message to " + nodeAddress + ".");
|
||||||
nodeAddress + ".");
|
|
||||||
SettableFuture<Connection> future = networkNode.sendMessage(connection, message);
|
SettableFuture<Connection> future = networkNode.sendMessage(connection, message);
|
||||||
Futures.addCallback(future, new FutureCallback<Connection>() {
|
Futures.addCallback(future, new FutureCallback<Connection>() {
|
||||||
@Override
|
@Override
|
||||||
public void onSuccess(Connection connection) {
|
public void onSuccess(Connection connection) {
|
||||||
log.trace("Broadcast to " + nodeAddress + " succeeded.");
|
if (!stopped) {
|
||||||
|
//log.trace("Broadcast to " + nodeAddress + " succeeded.");
|
||||||
numOfBroadcasts.set(numOfBroadcasts.get() + 1);
|
numOfBroadcasts.set(numOfBroadcasts.get() + 1);
|
||||||
listeners.stream().forEach(listener -> listener.onBroadcasted(message));
|
listeners.stream().forEach(listener -> listener.onBroadcasted(message));
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that networkNode.sendMessage.onSuccess call.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(@NotNull Throwable throwable) {
|
public void onFailure(@NotNull Throwable throwable) {
|
||||||
|
if (!stopped) {
|
||||||
log.info("Broadcast to " + nodeAddress + " failed.\n\t" +
|
log.info("Broadcast to " + nodeAddress + " failed.\n\t" +
|
||||||
"ErrorMessage=" + throwable.getMessage());
|
"ErrorMessage=" + throwable.getMessage());
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that networkNode.sendMessage.onFailure call.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -85,4 +114,43 @@ public class Broadcaster {
|
||||||
public void removeListener(Listener listener) {
|
public void removeListener(Listener listener) {
|
||||||
listeners.remove(listener);
|
listeners.remove(listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// ConnectionListener implementation
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onConnection(Connection connection) {
|
||||||
|
stopped = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onDisconnect(CloseConnectionReason closeConnectionReason, Connection connection) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onError(Throwable throwable) {
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// PeerManager.Listener implementation
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onAllConnectionsLost() {
|
||||||
|
stopped = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onNewConnectionAfterAllConnectionsLost() {
|
||||||
|
stopped = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onAwakeFromStandby() {
|
||||||
|
if (!networkNode.getAllConnections().isEmpty())
|
||||||
|
stopped = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,10 +4,8 @@ import io.bitsquare.app.Log;
|
||||||
import io.bitsquare.common.Clock;
|
import io.bitsquare.common.Clock;
|
||||||
import io.bitsquare.common.Timer;
|
import io.bitsquare.common.Timer;
|
||||||
import io.bitsquare.common.UserThread;
|
import io.bitsquare.common.UserThread;
|
||||||
import io.bitsquare.p2p.Message;
|
|
||||||
import io.bitsquare.p2p.NodeAddress;
|
import io.bitsquare.p2p.NodeAddress;
|
||||||
import io.bitsquare.p2p.network.*;
|
import io.bitsquare.p2p.network.*;
|
||||||
import io.bitsquare.p2p.peers.getdata.messages.GetUpdatedDataRequest;
|
|
||||||
import io.bitsquare.p2p.peers.peerexchange.ReportedPeer;
|
import io.bitsquare.p2p.peers.peerexchange.ReportedPeer;
|
||||||
import io.bitsquare.storage.Storage;
|
import io.bitsquare.storage.Storage;
|
||||||
import javafx.beans.value.ChangeListener;
|
import javafx.beans.value.ChangeListener;
|
||||||
|
@ -20,15 +18,15 @@ import java.util.*;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkArgument;
|
public class PeerManager implements ConnectionListener {
|
||||||
|
|
||||||
public class PeerManager implements ConnectionListener, MessageListener {
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(PeerManager.class);
|
private static final Logger log = LoggerFactory.getLogger(PeerManager.class);
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Static
|
// Static
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
private static final long CHECK_MAX_CONN_DELAY_SEC = 3;
|
||||||
|
|
||||||
private static int MAX_CONNECTIONS;
|
private static int MAX_CONNECTIONS;
|
||||||
private static int MIN_CONNECTIONS;
|
private static int MIN_CONNECTIONS;
|
||||||
private static int MAX_CONNECTIONS_PEER;
|
private static int MAX_CONNECTIONS_PEER;
|
||||||
|
@ -85,6 +83,8 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
private final ChangeListener<NodeAddress> connectionNodeAddressListener;
|
private final ChangeListener<NodeAddress> connectionNodeAddressListener;
|
||||||
private final Clock.Listener listener;
|
private final Clock.Listener listener;
|
||||||
private final List<Listener> listeners = new LinkedList<>();
|
private final List<Listener> listeners = new LinkedList<>();
|
||||||
|
private boolean stopped;
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Constructor
|
// Constructor
|
||||||
|
@ -107,10 +107,14 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
printConnectedPeers();
|
printConnectedPeers();
|
||||||
if (checkMaxConnectionsTimer == null && newValue != null)
|
if (checkMaxConnectionsTimer == null && newValue != null)
|
||||||
checkMaxConnectionsTimer = UserThread.runAfter(() -> {
|
checkMaxConnectionsTimer = UserThread.runAfter(() -> {
|
||||||
|
if (!stopped) {
|
||||||
removeTooOldReportedPeers();
|
removeTooOldReportedPeers();
|
||||||
removeTooOldPersistedPeers();
|
removeTooOldPersistedPeers();
|
||||||
checkMaxConnections(MAX_CONNECTIONS);
|
checkMaxConnections(MAX_CONNECTIONS);
|
||||||
}, 3);
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that checkMaxConnectionsTimer.run call.");
|
||||||
|
}
|
||||||
|
}, CHECK_MAX_CONN_DELAY_SEC);
|
||||||
};
|
};
|
||||||
|
|
||||||
// we check if app was idle for more then 5 sec.
|
// we check if app was idle for more then 5 sec.
|
||||||
|
@ -126,7 +130,8 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
@Override
|
@Override
|
||||||
public void onMissedSecondTick(long missed) {
|
public void onMissedSecondTick(long missed) {
|
||||||
if (missed > Clock.IDLE_TOLERANCE) {
|
if (missed > Clock.IDLE_TOLERANCE) {
|
||||||
log.error("We have been idle for {} sec", missed / 1000);
|
log.warn("We have been in standby mode for {} sec", missed / 1000);
|
||||||
|
stopped = false;
|
||||||
listeners.stream().forEach(Listener::onAwakeFromStandby);
|
listeners.stream().forEach(Listener::onAwakeFromStandby);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -142,6 +147,11 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
stopCheckMaxConnectionsTimer();
|
stopCheckMaxConnectionsTimer();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// API
|
||||||
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
public int getMaxConnections() {
|
public int getMaxConnections() {
|
||||||
return MAX_CONNECTIONS_ABSOLUTE;
|
return MAX_CONNECTIONS_ABSOLUTE;
|
||||||
}
|
}
|
||||||
|
@ -154,6 +164,7 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
listeners.remove(listener);
|
listeners.remove(listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// ConnectionListener implementation
|
// ConnectionListener implementation
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -161,18 +172,13 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
@Override
|
@Override
|
||||||
public void onConnection(Connection connection) {
|
public void onConnection(Connection connection) {
|
||||||
connection.getNodeAddressProperty().addListener(connectionNodeAddressListener);
|
connection.getNodeAddressProperty().addListener(connectionNodeAddressListener);
|
||||||
Optional<NodeAddress> peersNodeAddressOptional = connection.getPeersNodeAddressOptional();
|
|
||||||
// OutboundConnection always know their peers address
|
if (isSeedNode(connection))
|
||||||
// A seed node get only InboundConnection from other seed nodes with getData or peer exchange,
|
|
||||||
// never direct messages, so we need to check for PeerType.SEED_NODE at onMessage
|
|
||||||
if (connection instanceof OutboundConnection &&
|
|
||||||
peersNodeAddressOptional.isPresent() &&
|
|
||||||
seedNodeAddresses.contains(peersNodeAddressOptional.get())) {
|
|
||||||
connection.setPeerType(Connection.PeerType.SEED_NODE);
|
connection.setPeerType(Connection.PeerType.SEED_NODE);
|
||||||
}
|
|
||||||
|
|
||||||
if (lostAllConnections) {
|
if (lostAllConnections) {
|
||||||
lostAllConnections = false;
|
lostAllConnections = false;
|
||||||
|
stopped = false;
|
||||||
listeners.stream().forEach(Listener::onNewConnectionAfterAllConnectionsLost);
|
listeners.stream().forEach(Listener::onNewConnectionAfterAllConnectionsLost);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -183,9 +189,11 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
handleConnectionFault(connection);
|
handleConnectionFault(connection);
|
||||||
|
|
||||||
lostAllConnections = networkNode.getAllConnections().isEmpty();
|
lostAllConnections = networkNode.getAllConnections().isEmpty();
|
||||||
if (lostAllConnections)
|
if (lostAllConnections) {
|
||||||
|
stopped = true;
|
||||||
listeners.stream().forEach(Listener::onAllConnectionsLost);
|
listeners.stream().forEach(Listener::onAllConnectionsLost);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onError(Throwable throwable) {
|
public void onError(Throwable throwable) {
|
||||||
|
@ -193,25 +201,7 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// MessageListener implementation
|
// Check max connections
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
//TODO move to RequestDataManager
|
|
||||||
@Override
|
|
||||||
public void onMessage(Message message, Connection connection) {
|
|
||||||
// In case a seed node connects to another seed node we get his address at the DataRequest triggered from
|
|
||||||
// RequestDataManager.updateDataFromConnectedSeedNode
|
|
||||||
if (message instanceof GetUpdatedDataRequest) {
|
|
||||||
Log.traceCall(message.toString() + "\n\tconnection=" + connection);
|
|
||||||
|
|
||||||
if (isSeedNode(connection))
|
|
||||||
connection.setPeerType(Connection.PeerType.SEED_NODE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Check seed node connections
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
private boolean checkMaxConnections(int limit) {
|
private boolean checkMaxConnections(int limit) {
|
||||||
|
@ -222,7 +212,6 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
int size = allConnections.size();
|
int size = allConnections.size();
|
||||||
log.info("We have {} connections open. Our limit is {}", size, limit);
|
log.info("We have {} connections open. Our limit is {}", size, limit);
|
||||||
if (size > limit) {
|
if (size > limit) {
|
||||||
// Only InboundConnection, and PEER type connections
|
|
||||||
log.info("We have too many connections open.\n\t" +
|
log.info("We have too many connections open.\n\t" +
|
||||||
"Lets try first to remove the inbound connections of type PEER.");
|
"Lets try first to remove the inbound connections of type PEER.");
|
||||||
List<Connection> candidates = allConnections.stream()
|
List<Connection> candidates = allConnections.stream()
|
||||||
|
@ -235,7 +224,6 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
"MAX_CONNECTIONS_PEER limit of {}", MAX_CONNECTIONS_PEER);
|
"MAX_CONNECTIONS_PEER limit of {}", MAX_CONNECTIONS_PEER);
|
||||||
if (size > MAX_CONNECTIONS_PEER) {
|
if (size > MAX_CONNECTIONS_PEER) {
|
||||||
log.info("Lets try to remove ANY connection of type PEER.");
|
log.info("Lets try to remove ANY connection of type PEER.");
|
||||||
// Only PEER type connections
|
|
||||||
candidates = allConnections.stream()
|
candidates = allConnections.stream()
|
||||||
.filter(e -> e.getPeerType() == Connection.PeerType.PEER)
|
.filter(e -> e.getPeerType() == Connection.PeerType.PEER)
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
|
@ -245,7 +233,6 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
"MAX_CONNECTIONS_NON_DIRECT limit of {}", MAX_CONNECTIONS_NON_DIRECT);
|
"MAX_CONNECTIONS_NON_DIRECT limit of {}", MAX_CONNECTIONS_NON_DIRECT);
|
||||||
if (size > MAX_CONNECTIONS_NON_DIRECT) {
|
if (size > MAX_CONNECTIONS_NON_DIRECT) {
|
||||||
log.info("Lets try to remove any connection which is not of type DIRECT_MSG_PEER.");
|
log.info("Lets try to remove any connection which is not of type DIRECT_MSG_PEER.");
|
||||||
// All connections except DIRECT_MSG_PEER
|
|
||||||
candidates = allConnections.stream()
|
candidates = allConnections.stream()
|
||||||
.filter(e -> e.getPeerType() != Connection.PeerType.DIRECT_MSG_PEER)
|
.filter(e -> e.getPeerType() != Connection.PeerType.DIRECT_MSG_PEER)
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
|
@ -255,9 +242,7 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
"MAX_CONNECTIONS_ABSOLUTE limit of {}", MAX_CONNECTIONS_ABSOLUTE);
|
"MAX_CONNECTIONS_ABSOLUTE limit of {}", MAX_CONNECTIONS_ABSOLUTE);
|
||||||
if (size > MAX_CONNECTIONS_ABSOLUTE) {
|
if (size > MAX_CONNECTIONS_ABSOLUTE) {
|
||||||
log.info("Lets try to remove any connection.");
|
log.info("Lets try to remove any connection.");
|
||||||
// All connections
|
candidates = allConnections.stream().collect(Collectors.toList());
|
||||||
candidates = allConnections.stream()
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -286,9 +271,9 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
|
|
||||||
private void removeSuperfluousSeedNodes() {
|
private void removeSuperfluousSeedNodes() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
Set<Connection> allConnections = networkNode.getAllConnections();
|
Set<Connection> connections = networkNode.getConfirmedConnections();
|
||||||
if (allConnections.size() > MAX_CONNECTIONS_PEER) {
|
if (hasSufficientConnections()) {
|
||||||
List<Connection> candidates = allConnections.stream()
|
List<Connection> candidates = connections.stream()
|
||||||
.filter(this::isSeedNode)
|
.filter(this::isSeedNode)
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
|
|
||||||
|
@ -318,7 +303,7 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
.filter(e -> e.nodeAddress.equals(nodeAddress)).findAny();
|
.filter(e -> e.nodeAddress.equals(nodeAddress)).findAny();
|
||||||
if (reportedPeerOptional.isPresent()) {
|
if (reportedPeerOptional.isPresent()) {
|
||||||
ReportedPeer reportedPeer = reportedPeerOptional.get();
|
ReportedPeer reportedPeer = reportedPeerOptional.get();
|
||||||
reportedPeers.remove(reportedPeer);
|
removeReportedPeer(reportedPeer);
|
||||||
return reportedPeer;
|
return reportedPeer;
|
||||||
} else {
|
} else {
|
||||||
return null;
|
return null;
|
||||||
|
@ -338,7 +323,7 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addToReportedPeers(HashSet<ReportedPeer> reportedPeersToAdd, Connection connection) {
|
public void addToReportedPeers(HashSet<ReportedPeer> reportedPeersToAdd, Connection connection) {
|
||||||
printReportedPeers(reportedPeersToAdd);
|
printNewReportedPeers(reportedPeersToAdd);
|
||||||
|
|
||||||
// We check if the reported msg is not violating our rules
|
// We check if the reported msg is not violating our rules
|
||||||
if (reportedPeersToAdd.size() <= (MAX_REPORTED_PEERS + PeerManager.MAX_CONNECTIONS_ABSOLUTE + 10)) {
|
if (reportedPeersToAdd.size() <= (MAX_REPORTED_PEERS + PeerManager.MAX_CONNECTIONS_ABSOLUTE + 10)) {
|
||||||
|
@ -359,6 +344,25 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void purgeReportedPeersIfExceeds() {
|
||||||
|
Log.traceCall();
|
||||||
|
int size = reportedPeers.size();
|
||||||
|
int limit = MAX_REPORTED_PEERS - MAX_CONNECTIONS_ABSOLUTE;
|
||||||
|
if (size > limit) {
|
||||||
|
log.trace("We have already {} reported peers which exceeds our limit of {}." +
|
||||||
|
"We remove random peers from the reported peers list.", size, limit);
|
||||||
|
int diff = size - limit;
|
||||||
|
List<ReportedPeer> list = new ArrayList<>(reportedPeers);
|
||||||
|
// we dont use sorting by lastActivityDate to keep it more random
|
||||||
|
for (int i = 0; i < diff; i++) {
|
||||||
|
ReportedPeer toRemove = list.remove(new Random().nextInt(list.size()));
|
||||||
|
removeReportedPeer(toRemove);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.trace("No need to purge reported peers.\n\tWe don't have more then {} reported peers yet.", MAX_REPORTED_PEERS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void printReportedPeers() {
|
private void printReportedPeers() {
|
||||||
if (!reportedPeers.isEmpty()) {
|
if (!reportedPeers.isEmpty()) {
|
||||||
if (printReportedPeersDetails) {
|
if (printReportedPeersDetails) {
|
||||||
|
@ -372,25 +376,26 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void printReportedPeers(HashSet<ReportedPeer> reportedPeers) {
|
private void printNewReportedPeers(HashSet<ReportedPeer> reportedPeers) {
|
||||||
if (printReportedPeersDetails) {
|
if (printReportedPeersDetails) {
|
||||||
StringBuilder result = new StringBuilder("We received now reportedPeers:");
|
StringBuilder result = new StringBuilder("We received new reportedPeers:");
|
||||||
reportedPeers.stream().forEach(e -> result.append("\n\t").append(e));
|
reportedPeers.stream().forEach(e -> result.append("\n\t").append(e));
|
||||||
log.info(result.toString());
|
log.info(result.toString());
|
||||||
}
|
}
|
||||||
log.info("Number of new arrived reported peers: {}", reportedPeers.size());
|
log.info("Number of new arrived reported peers: {}", reportedPeers.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Persisted peers
|
// Persisted peers
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
private boolean removePersistedPeer(ReportedPeer reportedPeer) {
|
private boolean removePersistedPeer(ReportedPeer persistedPeer) {
|
||||||
if (persistedPeers.contains(reportedPeer)) {
|
if (persistedPeers.contains(persistedPeer)) {
|
||||||
persistedPeers.remove(reportedPeer);
|
persistedPeers.remove(persistedPeer);
|
||||||
|
|
||||||
if (dbStorage != null)
|
if (dbStorage != null)
|
||||||
dbStorage.queueUpForSave(persistedPeers, 5000);
|
dbStorage.queueUpForSave(persistedPeers, 2000);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -401,12 +406,7 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
private boolean removePersistedPeer(NodeAddress nodeAddress) {
|
private boolean removePersistedPeer(NodeAddress nodeAddress) {
|
||||||
Optional<ReportedPeer> persistedPeerOptional = persistedPeers.stream()
|
Optional<ReportedPeer> persistedPeerOptional = persistedPeers.stream()
|
||||||
.filter(e -> e.nodeAddress.equals(nodeAddress)).findAny();
|
.filter(e -> e.nodeAddress.equals(nodeAddress)).findAny();
|
||||||
persistedPeerOptional.ifPresent(persistedPeer -> {
|
return persistedPeerOptional.isPresent() && removePersistedPeer(persistedPeerOptional.get());
|
||||||
persistedPeers.remove(persistedPeer);
|
|
||||||
if (dbStorage != null)
|
|
||||||
dbStorage.queueUpForSave(persistedPeers, 5000);
|
|
||||||
});
|
|
||||||
return persistedPeerOptional.isPresent();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void removeTooOldPersistedPeers() {
|
private void removeTooOldPersistedPeers() {
|
||||||
|
@ -417,6 +417,24 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
persistedPeersToRemove.forEach(this::removePersistedPeer);
|
persistedPeersToRemove.forEach(this::removePersistedPeer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void purgePersistedPeersIfExceeds() {
|
||||||
|
Log.traceCall();
|
||||||
|
int size = persistedPeers.size();
|
||||||
|
int limit = MAX_PERSISTED_PEERS;
|
||||||
|
if (size > limit) {
|
||||||
|
log.trace("We have already {} persisted peers which exceeds our limit of {}." +
|
||||||
|
"We remove random peers from the persisted peers list.", size, limit);
|
||||||
|
int diff = size - limit;
|
||||||
|
List<ReportedPeer> list = new ArrayList<>(persistedPeers);
|
||||||
|
// we dont use sorting by lastActivityDate to avoid attack vectors and keep it more random
|
||||||
|
for (int i = 0; i < diff; i++) {
|
||||||
|
ReportedPeer toRemove = list.remove(new Random().nextInt(list.size()));
|
||||||
|
removePersistedPeer(toRemove);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.trace("No need to purge persisted peers.\n\tWe don't have more then {} persisted peers yet.", MAX_PERSISTED_PEERS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public Set<ReportedPeer> getPersistedPeers() {
|
public Set<ReportedPeer> getPersistedPeers() {
|
||||||
return persistedPeers;
|
return persistedPeers;
|
||||||
|
@ -431,32 +449,6 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
return networkNode.getNodeAddressesOfConfirmedConnections().size() >= MIN_CONNECTIONS;
|
return networkNode.getNodeAddressesOfConfirmedConnections().size() >= MIN_CONNECTIONS;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void handleConnectionFault(Connection connection) {
|
|
||||||
connection.getPeersNodeAddressOptional().ifPresent(nodeAddress -> handleConnectionFault(nodeAddress, connection));
|
|
||||||
}
|
|
||||||
|
|
||||||
public void handleConnectionFault(NodeAddress nodeAddress, @Nullable Connection connection) {
|
|
||||||
Log.traceCall("nodeAddress=" + nodeAddress);
|
|
||||||
ReportedPeer reportedPeer = removeReportedPeer(nodeAddress);
|
|
||||||
if (connection != null && connection.getRuleViolation() != null) {
|
|
||||||
removePersistedPeer(nodeAddress);
|
|
||||||
} else {
|
|
||||||
if (reportedPeer != null) {
|
|
||||||
removePersistedPeer(nodeAddress);
|
|
||||||
persistedPeers.add(reportedPeer);
|
|
||||||
dbStorage.queueUpForSave(persistedPeers, 5000);
|
|
||||||
|
|
||||||
removeTooOldPersistedPeers();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* public Set<ReportedPeer> getConnectedAndReportedPeers() {
|
|
||||||
Set<ReportedPeer> result = new HashSet<>(reportedPeers);
|
|
||||||
result.addAll(getConnectedPeers());
|
|
||||||
return result;
|
|
||||||
}*/
|
|
||||||
|
|
||||||
public boolean isSeedNode(ReportedPeer reportedPeer) {
|
public boolean isSeedNode(ReportedPeer reportedPeer) {
|
||||||
return seedNodeAddresses.contains(reportedPeer.nodeAddress);
|
return seedNodeAddresses.contains(reportedPeer.nodeAddress);
|
||||||
}
|
}
|
||||||
|
@ -486,6 +478,23 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
return networkNode.getNodeAddressesOfConfirmedConnections().contains(nodeAddress);
|
return networkNode.getNodeAddressesOfConfirmedConnections().contains(nodeAddress);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void handleConnectionFault(Connection connection) {
|
||||||
|
connection.getPeersNodeAddressOptional().ifPresent(nodeAddress -> handleConnectionFault(nodeAddress, connection));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void handleConnectionFault(NodeAddress nodeAddress) {
|
||||||
|
handleConnectionFault(nodeAddress, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void handleConnectionFault(NodeAddress nodeAddress, @Nullable Connection connection) {
|
||||||
|
Log.traceCall("nodeAddress=" + nodeAddress);
|
||||||
|
ReportedPeer reportedPeer = removeReportedPeer(nodeAddress);
|
||||||
|
if (connection != null && connection.getRuleViolation() != null)
|
||||||
|
removePersistedPeer(nodeAddress);
|
||||||
|
else
|
||||||
|
removeTooOldPersistedPeers();
|
||||||
|
}
|
||||||
|
|
||||||
public void shutDownConnection(Connection connection, CloseConnectionReason closeConnectionReason) {
|
public void shutDownConnection(Connection connection, CloseConnectionReason closeConnectionReason) {
|
||||||
if (connection.getPeerType() != Connection.PeerType.DIRECT_MSG_PEER)
|
if (connection.getPeerType() != Connection.PeerType.DIRECT_MSG_PEER)
|
||||||
connection.shutDown(closeConnectionReason);
|
connection.shutDown(closeConnectionReason);
|
||||||
|
@ -500,54 +509,17 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
.ifPresent(connection -> connection.shutDown(closeConnectionReason));
|
.ifPresent(connection -> connection.shutDown(closeConnectionReason));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public HashSet<ReportedPeer> getConnectedNonSeedNodeReportedPeers(NodeAddress excludedNodeAddress) {
|
||||||
|
return new HashSet<>(getConnectedNonSeedNodeReportedPeers().stream()
|
||||||
|
.filter(e -> !e.nodeAddress.equals(excludedNodeAddress))
|
||||||
|
.collect(Collectors.toSet()));
|
||||||
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Private
|
// Private
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
private void purgeReportedPeersIfExceeds() {
|
private Set<ReportedPeer> getConnectedReportedPeers() {
|
||||||
Log.traceCall();
|
|
||||||
int size = getReportedPeers().size();
|
|
||||||
int limit = MAX_REPORTED_PEERS - MAX_CONNECTIONS_ABSOLUTE;
|
|
||||||
if (size > limit) {
|
|
||||||
log.trace("We have already {} reported peers which exceeds our limit of {}." +
|
|
||||||
"We remove random peers from the reported peers list.", size, limit);
|
|
||||||
int diff = size - limit;
|
|
||||||
List<ReportedPeer> list = new ArrayList<>(getReportedPeers());
|
|
||||||
// we dont use sorting by lastActivityDate to avoid attack vectors and keep it more random
|
|
||||||
for (int i = 0; i < diff; i++) {
|
|
||||||
ReportedPeer toRemove = getAndRemoveRandomReportedPeer(list);
|
|
||||||
removeReportedPeer(toRemove);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.trace("No need to purge reported peers.\n\tWe don't have more then {} reported peers yet.", MAX_REPORTED_PEERS);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void purgePersistedPeersIfExceeds() {
|
|
||||||
Log.traceCall();
|
|
||||||
int size = getPersistedPeers().size();
|
|
||||||
int limit = MAX_PERSISTED_PEERS - MAX_CONNECTIONS_ABSOLUTE;
|
|
||||||
if (size > limit) {
|
|
||||||
log.trace("We have already {} persisted peers which exceeds our limit of {}." +
|
|
||||||
"We remove random peers from the persisted peers list.", size, limit);
|
|
||||||
int diff = size - limit;
|
|
||||||
List<ReportedPeer> list = new ArrayList<>(getReportedPeers());
|
|
||||||
// we dont use sorting by lastActivityDate to avoid attack vectors and keep it more random
|
|
||||||
for (int i = 0; i < diff; i++) {
|
|
||||||
ReportedPeer toRemove = getAndRemoveRandomReportedPeer(list);
|
|
||||||
removePersistedPeer(toRemove);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.trace("No need to purge persisted peers.\n\tWe don't have more then {} persisted peers yet.", MAX_PERSISTED_PEERS);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private ReportedPeer getAndRemoveRandomReportedPeer(List<ReportedPeer> list) {
|
|
||||||
checkArgument(!list.isEmpty(), "List must not be empty");
|
|
||||||
return list.remove(new Random().nextInt(list.size()));
|
|
||||||
}
|
|
||||||
|
|
||||||
private Set<ReportedPeer> getConnectedPeers() {
|
|
||||||
// networkNode.getConfirmedConnections includes:
|
// networkNode.getConfirmedConnections includes:
|
||||||
// filter(connection -> connection.getPeersNodeAddressOptional().isPresent())
|
// filter(connection -> connection.getPeersNodeAddressOptional().isPresent())
|
||||||
return networkNode.getConfirmedConnections().stream()
|
return networkNode.getConfirmedConnections().stream()
|
||||||
|
@ -555,18 +527,12 @@ public class PeerManager implements ConnectionListener, MessageListener {
|
||||||
.collect(Collectors.toSet());
|
.collect(Collectors.toSet());
|
||||||
}
|
}
|
||||||
|
|
||||||
private HashSet<ReportedPeer> getConnectedPeersNonSeedNodes() {
|
private HashSet<ReportedPeer> getConnectedNonSeedNodeReportedPeers() {
|
||||||
return new HashSet<>(getConnectedPeers().stream()
|
return new HashSet<>(getConnectedReportedPeers().stream()
|
||||||
.filter(e -> !isSeedNode(e))
|
.filter(e -> !isSeedNode(e))
|
||||||
.collect(Collectors.toSet()));
|
.collect(Collectors.toSet()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public HashSet<ReportedPeer> getConnectedPeersNonSeedNodes(NodeAddress excludedNodeAddress) {
|
|
||||||
return new HashSet<>(getConnectedPeersNonSeedNodes().stream()
|
|
||||||
.filter(e -> !e.nodeAddress.equals(excludedNodeAddress))
|
|
||||||
.collect(Collectors.toSet()));
|
|
||||||
}
|
|
||||||
|
|
||||||
private void stopCheckMaxConnectionsTimer() {
|
private void stopCheckMaxConnectionsTimer() {
|
||||||
if (checkMaxConnectionsTimer != null) {
|
if (checkMaxConnectionsTimer != null) {
|
||||||
checkMaxConnectionsTimer.stop();
|
checkMaxConnectionsTimer.stop();
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class RequestDataHandler implements MessageListener {
|
||||||
private final PeerManager peerManager;
|
private final PeerManager peerManager;
|
||||||
private final Listener listener;
|
private final Listener listener;
|
||||||
private Timer timeoutTimer;
|
private Timer timeoutTimer;
|
||||||
private final long nonce = new Random().nextLong();
|
private final int nonce = new Random().nextInt();
|
||||||
private boolean stopped;
|
private boolean stopped;
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -67,11 +67,8 @@ public class RequestDataHandler implements MessageListener {
|
||||||
networkNode.addMessageListener(this);
|
networkNode.addMessageListener(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void cleanup() {
|
public void cancel() {
|
||||||
Log.traceCall();
|
cleanup();
|
||||||
stopped = true;
|
|
||||||
networkNode.removeMessageListener(this);
|
|
||||||
stopTimeoutTimer();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -99,21 +96,29 @@ public class RequestDataHandler implements MessageListener {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(@NotNull Throwable throwable) {
|
public void onFailure(@NotNull Throwable throwable) {
|
||||||
|
if (!stopped) {
|
||||||
String errorMessage = "Sending getDataRequest to " + nodeAddress +
|
String errorMessage = "Sending getDataRequest to " + nodeAddress +
|
||||||
" failed. That is expected if the peer is offline.\n\t" +
|
" failed. That is expected if the peer is offline.\n\t" +
|
||||||
"getDataRequest=" + getDataRequest + "." +
|
"getDataRequest=" + getDataRequest + "." +
|
||||||
"\n\tException=" + throwable.getMessage();
|
"\n\tException=" + throwable.getMessage();
|
||||||
log.info(errorMessage);
|
log.info(errorMessage);
|
||||||
handleFault(errorMessage, nodeAddress, CloseConnectionReason.SEND_MSG_FAILURE);
|
handleFault(errorMessage, nodeAddress, CloseConnectionReason.SEND_MSG_FAILURE);
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that requestData.onFailure call.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
checkArgument(timeoutTimer == null, "requestData must not be called twice.");
|
checkArgument(timeoutTimer == null, "requestData must not be called twice.");
|
||||||
timeoutTimer = UserThread.runAfter(() -> {
|
timeoutTimer = UserThread.runAfter(() -> {
|
||||||
|
if (!stopped) {
|
||||||
String errorMessage = "A timeout occurred at sending getDataRequest:" + getDataRequest +
|
String errorMessage = "A timeout occurred at sending getDataRequest:" + getDataRequest +
|
||||||
" on nodeAddress:" + nodeAddress;
|
" on nodeAddress:" + nodeAddress;
|
||||||
log.info(errorMessage + " / RequestDataHandler=" + RequestDataHandler.this);
|
log.info(errorMessage + " / RequestDataHandler=" + RequestDataHandler.this);
|
||||||
handleFault(errorMessage, nodeAddress, CloseConnectionReason.SEND_MSG_TIMEOUT);
|
handleFault(errorMessage, nodeAddress, CloseConnectionReason.SEND_MSG_TIMEOUT);
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that timeoutTimer.run call.");
|
||||||
|
}
|
||||||
},
|
},
|
||||||
10);
|
10);
|
||||||
} else {
|
} else {
|
||||||
|
@ -160,16 +165,25 @@ public class RequestDataHandler implements MessageListener {
|
||||||
// Private
|
// Private
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
|
private void handleFault(String errorMessage, NodeAddress nodeAddress, CloseConnectionReason closeConnectionReason) {
|
||||||
|
cleanup();
|
||||||
|
peerManager.shutDownConnection(nodeAddress, closeConnectionReason);
|
||||||
|
peerManager.handleConnectionFault(nodeAddress);
|
||||||
|
listener.onFault(errorMessage, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void cleanup() {
|
||||||
|
Log.traceCall();
|
||||||
|
stopped = true;
|
||||||
|
networkNode.removeMessageListener(this);
|
||||||
|
stopTimeoutTimer();
|
||||||
|
}
|
||||||
|
|
||||||
private void stopTimeoutTimer() {
|
private void stopTimeoutTimer() {
|
||||||
if (timeoutTimer != null) {
|
if (timeoutTimer != null) {
|
||||||
timeoutTimer.stop();
|
timeoutTimer.stop();
|
||||||
timeoutTimer = null;
|
timeoutTimer = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void handleFault(String errorMessage, NodeAddress nodeAddress, CloseConnectionReason closeConnectionReason) {
|
|
||||||
cleanup();
|
|
||||||
peerManager.shutDownConnection(nodeAddress, closeConnectionReason);
|
|
||||||
listener.onFault(errorMessage, null);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,7 +82,7 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||||
stopRetryTimer();
|
stopRetryTimer();
|
||||||
networkNode.removeMessageListener(this);
|
networkNode.removeMessageListener(this);
|
||||||
peerManager.removeListener(this);
|
peerManager.removeListener(this);
|
||||||
handlerMap.values().stream().forEach(RequestDataHandler::cleanup);
|
closeAllHandlers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -122,14 +122,12 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||||
@Override
|
@Override
|
||||||
public void onConnection(Connection connection) {
|
public void onConnection(Connection connection) {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
// clean up in case we could not clean up at disconnect
|
|
||||||
closeRequestDataHandler(connection);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onDisconnect(CloseConnectionReason closeConnectionReason, Connection connection) {
|
public void onDisconnect(CloseConnectionReason closeConnectionReason, Connection connection) {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeRequestDataHandler(connection);
|
closeHandler(connection);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -144,28 +142,27 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||||
@Override
|
@Override
|
||||||
public void onAllConnectionsLost() {
|
public void onAllConnectionsLost() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeAllRequestDataHandlers();
|
closeAllHandlers();
|
||||||
stopRetryTimer();
|
stopRetryTimer();
|
||||||
stopped = true;
|
stopped = true;
|
||||||
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onNewConnectionAfterAllConnectionsLost() {
|
public void onNewConnectionAfterAllConnectionsLost() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeAllRequestDataHandlers();
|
closeAllHandlers();
|
||||||
stopped = false;
|
stopped = false;
|
||||||
|
restart();
|
||||||
retryAfterDelay();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onAwakeFromStandby() {
|
public void onAwakeFromStandby() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeAllRequestDataHandlers();
|
closeAllHandlers();
|
||||||
stopped = false;
|
stopped = false;
|
||||||
|
|
||||||
if (!networkNode.getAllConnections().isEmpty())
|
if (!networkNode.getAllConnections().isEmpty())
|
||||||
retryAfterDelay();
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -185,15 +182,18 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||||
new GetDataRequestHandler.Listener() {
|
new GetDataRequestHandler.Listener() {
|
||||||
@Override
|
@Override
|
||||||
public void onComplete() {
|
public void onComplete() {
|
||||||
log.trace("requestDataHandshake completed.\n\tConnection={}",
|
log.trace("requestDataHandshake completed.\n\tConnection={}", connection);
|
||||||
connection);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFault(String errorMessage, @Nullable Connection connection) {
|
public void onFault(String errorMessage, @Nullable Connection connection) {
|
||||||
|
if (!stopped) {
|
||||||
log.trace("GetDataRequestHandler failed.\n\tConnection={}\n\t" +
|
log.trace("GetDataRequestHandler failed.\n\tConnection={}\n\t" +
|
||||||
"ErrorMessage={}", connection, errorMessage);
|
"ErrorMessage={}", connection, errorMessage);
|
||||||
peerManager.handleConnectionFault(connection);
|
peerManager.handleConnectionFault(connection);
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that getDataRequestHandler.handle.onFault call.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
getDataRequestHandler.handle((GetDataRequest) message, connection);
|
getDataRequestHandler.handle((GetDataRequest) message, connection);
|
||||||
|
@ -239,13 +239,11 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFault(String errorMessage, @Nullable Connection connection) {
|
public void onFault(String errorMessage, @Nullable Connection connection) {
|
||||||
log.trace("requestDataHandshake of outbound connection failed.\n\tnodeAddress={}\n\t" +
|
log.trace("requestDataHandshake with outbound connection failed.\n\tnodeAddress={}\n\t" +
|
||||||
"ErrorMessage={}", nodeAddress, errorMessage);
|
"ErrorMessage={}", nodeAddress, errorMessage);
|
||||||
|
|
||||||
handlerMap.remove(nodeAddress);
|
handlerMap.remove(nodeAddress);
|
||||||
peerManager.handleConnectionFault(nodeAddress, connection);
|
|
||||||
|
|
||||||
if (!stopped) {
|
|
||||||
if (!remainingNodeAddresses.isEmpty()) {
|
if (!remainingNodeAddresses.isEmpty()) {
|
||||||
log.info("There are remaining nodes available for requesting data. " +
|
log.info("There are remaining nodes available for requesting data. " +
|
||||||
"We will try requestDataFromPeers again.");
|
"We will try requestDataFromPeers again.");
|
||||||
|
@ -266,10 +264,7 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||||
listener.onNoPeersAvailable();
|
listener.onNoPeersAvailable();
|
||||||
}
|
}
|
||||||
|
|
||||||
retryAfterDelay();
|
restart();
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.warn("We have stopped already. We ignore that requestData.onFault call.");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -288,10 +283,13 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||||
// Utils
|
// Utils
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
private void retryAfterDelay() {
|
private void restart() {
|
||||||
|
Log.traceCall();
|
||||||
if (retryTimer == null) {
|
if (retryTimer == null) {
|
||||||
retryTimer = UserThread.runAfter(() -> {
|
retryTimer = UserThread.runAfter(() -> {
|
||||||
log.trace("retryTimer called");
|
log.trace("retryTimer called");
|
||||||
|
stopped = false;
|
||||||
|
|
||||||
stopRetryTimer();
|
stopRetryTimer();
|
||||||
|
|
||||||
// We create a new list of candidates
|
// We create a new list of candidates
|
||||||
|
@ -345,19 +343,21 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closeRequestDataHandler(Connection connection) {
|
private void closeHandler(Connection connection) {
|
||||||
Optional<NodeAddress> peersNodeAddressOptional = connection.getPeersNodeAddressOptional();
|
Optional<NodeAddress> peersNodeAddressOptional = connection.getPeersNodeAddressOptional();
|
||||||
if (peersNodeAddressOptional.isPresent()) {
|
if (peersNodeAddressOptional.isPresent()) {
|
||||||
NodeAddress nodeAddress = peersNodeAddressOptional.get();
|
NodeAddress nodeAddress = peersNodeAddressOptional.get();
|
||||||
if (handlerMap.containsKey(nodeAddress)) {
|
if (handlerMap.containsKey(nodeAddress)) {
|
||||||
handlerMap.get(nodeAddress).cleanup();
|
handlerMap.get(nodeAddress).cancel();
|
||||||
handlerMap.remove(nodeAddress);
|
handlerMap.remove(nodeAddress);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
log.trace("closeRequestDataHandler: nodeAddress not set in connection " + connection);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closeAllRequestDataHandlers() {
|
private void closeAllHandlers() {
|
||||||
handlerMap.values().stream().forEach(RequestDataHandler::cleanup);
|
handlerMap.values().stream().forEach(RequestDataHandler::cancel);
|
||||||
handlerMap.clear();
|
handlerMap.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,5 +3,5 @@ package io.bitsquare.p2p.peers.getdata.messages;
|
||||||
import io.bitsquare.p2p.Message;
|
import io.bitsquare.p2p.Message;
|
||||||
|
|
||||||
public interface GetDataRequest extends Message {
|
public interface GetDataRequest extends Message {
|
||||||
long getNonce();
|
int getNonce();
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,9 +12,9 @@ public final class GetDataResponse implements Message {
|
||||||
private final int messageVersion = Version.getP2PMessageVersion();
|
private final int messageVersion = Version.getP2PMessageVersion();
|
||||||
|
|
||||||
public final HashSet<ProtectedData> dataSet;
|
public final HashSet<ProtectedData> dataSet;
|
||||||
public final long requestNonce;
|
public final int requestNonce;
|
||||||
|
|
||||||
public GetDataResponse(HashSet<ProtectedData> dataSet, long requestNonce) {
|
public GetDataResponse(HashSet<ProtectedData> dataSet, int requestNonce) {
|
||||||
this.dataSet = dataSet;
|
this.dataSet = dataSet;
|
||||||
this.requestNonce = requestNonce;
|
this.requestNonce = requestNonce;
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,15 +10,15 @@ public final class GetUpdatedDataRequest implements SendersNodeAddressMessage, G
|
||||||
|
|
||||||
private final int messageVersion = Version.getP2PMessageVersion();
|
private final int messageVersion = Version.getP2PMessageVersion();
|
||||||
private final NodeAddress senderNodeAddress;
|
private final NodeAddress senderNodeAddress;
|
||||||
private final long nonce;
|
private final int nonce;
|
||||||
|
|
||||||
public GetUpdatedDataRequest(NodeAddress senderNodeAddress, long nonce) {
|
public GetUpdatedDataRequest(NodeAddress senderNodeAddress, int nonce) {
|
||||||
this.senderNodeAddress = senderNodeAddress;
|
this.senderNodeAddress = senderNodeAddress;
|
||||||
this.nonce = nonce;
|
this.nonce = nonce;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getNonce() {
|
public int getNonce() {
|
||||||
return nonce;
|
return nonce;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,14 +8,14 @@ public final class PreliminaryGetDataRequest implements AnonymousMessage, GetDat
|
||||||
private static final long serialVersionUID = Version.P2P_NETWORK_VERSION;
|
private static final long serialVersionUID = Version.P2P_NETWORK_VERSION;
|
||||||
|
|
||||||
private final int messageVersion = Version.getP2PMessageVersion();
|
private final int messageVersion = Version.getP2PMessageVersion();
|
||||||
private final long nonce;
|
private final int nonce;
|
||||||
|
|
||||||
public PreliminaryGetDataRequest(long nonce) {
|
public PreliminaryGetDataRequest(int nonce) {
|
||||||
this.nonce = nonce;
|
this.nonce = nonce;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getNonce() {
|
public int getNonce() {
|
||||||
return nonce;
|
return nonce;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,10 +57,8 @@ class KeepAliveHandler implements MessageListener {
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void cleanup() {
|
public void cancel() {
|
||||||
stopped = true;
|
cleanup();
|
||||||
if (connection != null)
|
|
||||||
connection.removeMessageListener(this);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -83,13 +81,18 @@ class KeepAliveHandler implements MessageListener {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(@NotNull Throwable throwable) {
|
public void onFailure(@NotNull Throwable throwable) {
|
||||||
|
if (!stopped) {
|
||||||
String errorMessage = "Sending ping to " + connection +
|
String errorMessage = "Sending ping to " + connection +
|
||||||
" failed. That is expected if the peer is offline.\n\tping=" + ping +
|
" failed. That is expected if the peer is offline.\n\tping=" + ping +
|
||||||
".\n\tException=" + throwable.getMessage();
|
".\n\tException=" + throwable.getMessage();
|
||||||
log.info(errorMessage);
|
log.info(errorMessage);
|
||||||
cleanup();
|
cleanup();
|
||||||
peerManager.shutDownConnection(connection, CloseConnectionReason.SEND_MSG_FAILURE);
|
peerManager.shutDownConnection(connection, CloseConnectionReason.SEND_MSG_FAILURE);
|
||||||
|
peerManager.handleConnectionFault(connection);
|
||||||
listener.onFault(errorMessage);
|
listener.onFault(errorMessage);
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that sendPing.onFailure call.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
|
@ -121,4 +124,10 @@ class KeepAliveHandler implements MessageListener {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void cleanup() {
|
||||||
|
stopped = true;
|
||||||
|
if (connection != null)
|
||||||
|
connection.removeMessageListener(this);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,14 +17,13 @@ import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
|
||||||
|
|
||||||
public class KeepAliveManager implements MessageListener, ConnectionListener, PeerManager.Listener {
|
public class KeepAliveManager implements MessageListener, ConnectionListener, PeerManager.Listener {
|
||||||
private static final Logger log = LoggerFactory.getLogger(KeepAliveManager.class);
|
private static final Logger log = LoggerFactory.getLogger(KeepAliveManager.class);
|
||||||
|
|
||||||
private static final int INTERVAL_SEC = new Random().nextInt(10) + 10;
|
//private static final int INTERVAL_SEC = new Random().nextInt(10) + 10;
|
||||||
//TODO
|
//TODO
|
||||||
// private static final int INTERVAL_SEC = 5;
|
private static final int INTERVAL_SEC = 5;
|
||||||
|
|
||||||
private final NetworkNode networkNode;
|
private final NetworkNode networkNode;
|
||||||
private final PeerManager peerManager;
|
private final PeerManager peerManager;
|
||||||
|
@ -49,13 +48,10 @@ public class KeepAliveManager implements MessageListener, ConnectionListener, Pe
|
||||||
public void shutDown() {
|
public void shutDown() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
stopped = true;
|
stopped = true;
|
||||||
|
|
||||||
networkNode.removeMessageListener(this);
|
networkNode.removeMessageListener(this);
|
||||||
networkNode.removeConnectionListener(this);
|
networkNode.removeConnectionListener(this);
|
||||||
peerManager.removeListener(this);
|
peerManager.removeListener(this);
|
||||||
|
closeAllHandlers();
|
||||||
closeAllMaintenanceHandlers();
|
|
||||||
|
|
||||||
stopKeepAliveTimer();
|
stopKeepAliveTimer();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,10 +60,14 @@ public class KeepAliveManager implements MessageListener, ConnectionListener, Pe
|
||||||
// API
|
// API
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
public void start() {
|
public void restart() {
|
||||||
stopped = false;
|
|
||||||
if (keepAliveTimer == null)
|
if (keepAliveTimer == null)
|
||||||
keepAliveTimer = UserThread.runPeriodically(this::keepAlive, INTERVAL_SEC);
|
keepAliveTimer = UserThread.runPeriodically(() -> {
|
||||||
|
stopped = false;
|
||||||
|
keepAlive();
|
||||||
|
}, INTERVAL_SEC);
|
||||||
|
else
|
||||||
|
log.warn("keepAliveTimer already running");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -91,12 +91,16 @@ public class KeepAliveManager implements MessageListener, ConnectionListener, Pe
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(@NotNull Throwable throwable) {
|
public void onFailure(@NotNull Throwable throwable) {
|
||||||
|
if (!stopped) {
|
||||||
String errorMessage = "Sending pong to " + connection +
|
String errorMessage = "Sending pong to " + connection +
|
||||||
" failed. That is expected if the peer is offline. pong=" + pong + "." +
|
" failed. That is expected if the peer is offline. pong=" + pong + "." +
|
||||||
"Exception: " + throwable.getMessage();
|
"Exception: " + throwable.getMessage();
|
||||||
log.info(errorMessage);
|
log.info(errorMessage);
|
||||||
peerManager.handleConnectionFault(connection);
|
peerManager.handleConnectionFault(connection);
|
||||||
peerManager.shutDownConnection(connection, CloseConnectionReason.SEND_MSG_FAILURE);
|
peerManager.shutDownConnection(connection, CloseConnectionReason.SEND_MSG_FAILURE);
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped already. We ignore that networkNode.sendMessage.onFailure call.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
|
@ -113,14 +117,12 @@ public class KeepAliveManager implements MessageListener, ConnectionListener, Pe
|
||||||
@Override
|
@Override
|
||||||
public void onConnection(Connection connection) {
|
public void onConnection(Connection connection) {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
// clean up in case we could not clean up at disconnect
|
|
||||||
closeMaintenanceHandler(connection);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onDisconnect(CloseConnectionReason closeConnectionReason, Connection connection) {
|
public void onDisconnect(CloseConnectionReason closeConnectionReason, Connection connection) {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeMaintenanceHandler(connection);
|
closeHandler(connection);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -135,23 +137,27 @@ public class KeepAliveManager implements MessageListener, ConnectionListener, Pe
|
||||||
@Override
|
@Override
|
||||||
public void onAllConnectionsLost() {
|
public void onAllConnectionsLost() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeAllMaintenanceHandlers();
|
closeAllHandlers();
|
||||||
stopKeepAliveTimer();
|
stopKeepAliveTimer();
|
||||||
|
stopped = true;
|
||||||
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onNewConnectionAfterAllConnectionsLost() {
|
public void onNewConnectionAfterAllConnectionsLost() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeAllMaintenanceHandlers();
|
closeAllHandlers();
|
||||||
start();
|
stopped = false;
|
||||||
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onAwakeFromStandby() {
|
public void onAwakeFromStandby() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeAllMaintenanceHandlers();
|
closeAllHandlers();
|
||||||
|
stopped = false;
|
||||||
if (!networkNode.getAllConnections().isEmpty())
|
if (!networkNode.getAllConnections().isEmpty())
|
||||||
start();
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -204,16 +210,16 @@ public class KeepAliveManager implements MessageListener, ConnectionListener, Pe
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closeMaintenanceHandler(Connection connection) {
|
private void closeHandler(Connection connection) {
|
||||||
String uid = connection.getUid();
|
String uid = connection.getUid();
|
||||||
if (handlerMap.containsKey(uid)) {
|
if (handlerMap.containsKey(uid)) {
|
||||||
handlerMap.get(uid).cleanup();
|
handlerMap.get(uid).cancel();
|
||||||
handlerMap.remove(uid);
|
handlerMap.remove(uid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closeAllMaintenanceHandlers() {
|
private void closeAllHandlers() {
|
||||||
handlerMap.values().stream().forEach(KeepAliveHandler::cleanup);
|
handlerMap.values().stream().forEach(KeepAliveHandler::cancel);
|
||||||
handlerMap.clear();
|
handlerMap.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ class GetPeersRequestHandler {
|
||||||
checkArgument(connection.getPeersNodeAddressOptional().isPresent(),
|
checkArgument(connection.getPeersNodeAddressOptional().isPresent(),
|
||||||
"The peers address must have been already set at the moment");
|
"The peers address must have been already set at the moment");
|
||||||
GetPeersResponse getPeersResponse = new GetPeersResponse(getPeersRequest.nonce,
|
GetPeersResponse getPeersResponse = new GetPeersResponse(getPeersRequest.nonce,
|
||||||
peerManager.getConnectedPeersNonSeedNodes(connection.getPeersNodeAddressOptional().get()));
|
peerManager.getConnectedNonSeedNodeReportedPeers(connection.getPeersNodeAddressOptional().get()));
|
||||||
SettableFuture<Connection> future = networkNode.sendMessage(connection,
|
SettableFuture<Connection> future = networkNode.sendMessage(connection,
|
||||||
getPeersResponse);
|
getPeersResponse);
|
||||||
Futures.addCallback(future, new FutureCallback<Connection>() {
|
Futures.addCallback(future, new FutureCallback<Connection>() {
|
||||||
|
|
|
@ -65,15 +65,9 @@ class PeerExchangeHandler implements MessageListener {
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void cleanup() {
|
public void cancel() {
|
||||||
stopped = true;
|
Log.traceCall();
|
||||||
if (connection != null)
|
cleanup();
|
||||||
connection.removeMessageListener(this);
|
|
||||||
|
|
||||||
if (timeoutTimer != null) {
|
|
||||||
timeoutTimer.stop();
|
|
||||||
timeoutTimer = null;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -85,11 +79,12 @@ class PeerExchangeHandler implements MessageListener {
|
||||||
Log.traceCall("nodeAddress=" + nodeAddress + " / this=" + this);
|
Log.traceCall("nodeAddress=" + nodeAddress + " / this=" + this);
|
||||||
if (!stopped) {
|
if (!stopped) {
|
||||||
if (networkNode.getNodeAddress() != null) {
|
if (networkNode.getNodeAddress() != null) {
|
||||||
GetPeersRequest getPeersRequest = new GetPeersRequest(networkNode.getNodeAddress(), nonce, peerManager.getConnectedPeersNonSeedNodes(nodeAddress));
|
GetPeersRequest getPeersRequest = new GetPeersRequest(networkNode.getNodeAddress(), nonce, peerManager.getConnectedNonSeedNodeReportedPeers(nodeAddress));
|
||||||
SettableFuture<Connection> future = networkNode.sendMessage(nodeAddress, getPeersRequest);
|
SettableFuture<Connection> future = networkNode.sendMessage(nodeAddress, getPeersRequest);
|
||||||
Futures.addCallback(future, new FutureCallback<Connection>() {
|
Futures.addCallback(future, new FutureCallback<Connection>() {
|
||||||
@Override
|
@Override
|
||||||
public void onSuccess(Connection connection) {
|
public void onSuccess(Connection connection) {
|
||||||
|
if (!stopped) {
|
||||||
if (!connection.getPeersNodeAddressOptional().isPresent()) {
|
if (!connection.getPeersNodeAddressOptional().isPresent()) {
|
||||||
connection.setPeersNodeAddress(nodeAddress);
|
connection.setPeersNodeAddress(nodeAddress);
|
||||||
//TODO remove setPeersNodeAddress if never needed
|
//TODO remove setPeersNodeAddress if never needed
|
||||||
|
@ -98,25 +93,36 @@ class PeerExchangeHandler implements MessageListener {
|
||||||
PeerExchangeHandler.this.connection = connection;
|
PeerExchangeHandler.this.connection = connection;
|
||||||
connection.addMessageListener(PeerExchangeHandler.this);
|
connection.addMessageListener(PeerExchangeHandler.this);
|
||||||
log.trace("Send " + getPeersRequest + " to " + nodeAddress + " succeeded.");
|
log.trace("Send " + getPeersRequest + " to " + nodeAddress + " succeeded.");
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped that handler already. We ignore that sendGetPeersRequest.onSuccess call.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(@NotNull Throwable throwable) {
|
public void onFailure(@NotNull Throwable throwable) {
|
||||||
|
if (!stopped) {
|
||||||
String errorMessage = "Sending getPeersRequest to " + nodeAddress +
|
String errorMessage = "Sending getPeersRequest to " + nodeAddress +
|
||||||
" failed. That is expected if the peer is offline.\n\tgetPeersRequest=" + getPeersRequest +
|
" failed. That is expected if the peer is offline.\n\tgetPeersRequest=" + getPeersRequest +
|
||||||
".\n\tException=" + throwable.getMessage();
|
".\n\tException=" + throwable.getMessage();
|
||||||
log.info(errorMessage);
|
log.info(errorMessage);
|
||||||
handleFault(errorMessage, CloseConnectionReason.SEND_MSG_FAILURE, nodeAddress);
|
handleFault(errorMessage, CloseConnectionReason.SEND_MSG_FAILURE, nodeAddress);
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped that handler already. We ignore that sendGetPeersRequest.onFailure call.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
checkArgument(timeoutTimer == null, "requestReportedPeers must not be called twice.");
|
checkArgument(timeoutTimer == null, "requestReportedPeers must not be called twice.");
|
||||||
timeoutTimer = UserThread.runAfter(() -> {
|
timeoutTimer = UserThread.runAfter(() -> {
|
||||||
|
if (!stopped) {
|
||||||
String errorMessage = "A timeout occurred at sending getPeersRequest:" + getPeersRequest + " for nodeAddress:" + nodeAddress;
|
String errorMessage = "A timeout occurred at sending getPeersRequest:" + getPeersRequest + " for nodeAddress:" + nodeAddress;
|
||||||
log.info(errorMessage + " / PeerExchangeHandler=" +
|
log.info(errorMessage + " / PeerExchangeHandler=" +
|
||||||
PeerExchangeHandler.this);
|
PeerExchangeHandler.this);
|
||||||
log.info("timeoutTimer called on " + this);
|
log.info("timeoutTimer called on " + this);
|
||||||
handleFault(errorMessage, CloseConnectionReason.SEND_MSG_TIMEOUT, nodeAddress);
|
handleFault(errorMessage, CloseConnectionReason.SEND_MSG_TIMEOUT, nodeAddress);
|
||||||
|
} else {
|
||||||
|
log.warn("We have stopped that handler already. We ignore that timeoutTimer.run call.");
|
||||||
|
}
|
||||||
},
|
},
|
||||||
TIME_OUT_SEC, TimeUnit.SECONDS);
|
TIME_OUT_SEC, TimeUnit.SECONDS);
|
||||||
} else {
|
} else {
|
||||||
|
@ -162,6 +168,7 @@ class PeerExchangeHandler implements MessageListener {
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
private void handleFault(String errorMessage, CloseConnectionReason sendMsgFailure, NodeAddress nodeAddress) {
|
private void handleFault(String errorMessage, CloseConnectionReason sendMsgFailure, NodeAddress nodeAddress) {
|
||||||
|
Log.traceCall();
|
||||||
// TODO retry
|
// TODO retry
|
||||||
cleanup();
|
cleanup();
|
||||||
if (connection == null)
|
if (connection == null)
|
||||||
|
@ -169,6 +176,20 @@ class PeerExchangeHandler implements MessageListener {
|
||||||
else
|
else
|
||||||
peerManager.shutDownConnection(connection, sendMsgFailure);
|
peerManager.shutDownConnection(connection, sendMsgFailure);
|
||||||
|
|
||||||
|
peerManager.handleConnectionFault(nodeAddress, connection);
|
||||||
listener.onFault(errorMessage, connection);
|
listener.onFault(errorMessage, connection);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void cleanup() {
|
||||||
|
Log.traceCall();
|
||||||
|
stopped = true;
|
||||||
|
if (connection != null)
|
||||||
|
connection.removeMessageListener(this);
|
||||||
|
|
||||||
|
if (timeoutTimer != null) {
|
||||||
|
timeoutTimer.stop();
|
||||||
|
timeoutTimer = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,7 +59,7 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||||
|
|
||||||
stopPeriodicTimer();
|
stopPeriodicTimer();
|
||||||
stopRetryTimer();
|
stopRetryTimer();
|
||||||
closeAllPeerExchangeHandlers();
|
closeAllHandlers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -85,14 +85,12 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||||
@Override
|
@Override
|
||||||
public void onConnection(Connection connection) {
|
public void onConnection(Connection connection) {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
// clean up in case we could not clean up at disconnect
|
|
||||||
closePeerExchangeHandler(connection);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onDisconnect(CloseConnectionReason closeConnectionReason, Connection connection) {
|
public void onDisconnect(CloseConnectionReason closeConnectionReason, Connection connection) {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closePeerExchangeHandler(connection);
|
closeHandler(connection);
|
||||||
|
|
||||||
if (retryTimer == null) {
|
if (retryTimer == null) {
|
||||||
retryTimer = UserThread.runAfter(() -> {
|
retryTimer = UserThread.runAfter(() -> {
|
||||||
|
@ -115,24 +113,26 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||||
@Override
|
@Override
|
||||||
public void onAllConnectionsLost() {
|
public void onAllConnectionsLost() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeAllPeerExchangeHandlers();
|
closeAllHandlers();
|
||||||
stopPeriodicTimer();
|
stopPeriodicTimer();
|
||||||
stopRetryTimer();
|
stopRetryTimer();
|
||||||
|
stopped = true;
|
||||||
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onNewConnectionAfterAllConnectionsLost() {
|
public void onNewConnectionAfterAllConnectionsLost() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeAllPeerExchangeHandlers();
|
closeAllHandlers();
|
||||||
|
stopped = false;
|
||||||
restart();
|
restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onAwakeFromStandby() {
|
public void onAwakeFromStandby() {
|
||||||
Log.traceCall();
|
Log.traceCall();
|
||||||
closeAllPeerExchangeHandlers();
|
closeAllHandlers();
|
||||||
|
stopped = false;
|
||||||
if (!networkNode.getAllConnections().isEmpty())
|
if (!networkNode.getAllConnections().isEmpty())
|
||||||
restart();
|
restart();
|
||||||
}
|
}
|
||||||
|
@ -197,8 +197,6 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||||
"nodeAddress={}", errorMessage, nodeAddress);
|
"nodeAddress={}", errorMessage, nodeAddress);
|
||||||
|
|
||||||
handlerMap.remove(nodeAddress);
|
handlerMap.remove(nodeAddress);
|
||||||
peerManager.handleConnectionFault(nodeAddress, connection);
|
|
||||||
if (!stopped) {
|
|
||||||
if (!remainingNodeAddresses.isEmpty()) {
|
if (!remainingNodeAddresses.isEmpty()) {
|
||||||
if (!peerManager.hasSufficientConnections()) {
|
if (!peerManager.hasSufficientConnections()) {
|
||||||
log.info("There are remaining nodes available for requesting peers. " +
|
log.info("There are remaining nodes available for requesting peers. " +
|
||||||
|
@ -216,11 +214,15 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||||
"We will try again after a pause.");
|
"We will try again after a pause.");
|
||||||
if (retryTimer == null)
|
if (retryTimer == null)
|
||||||
retryTimer = UserThread.runAfter(() -> {
|
retryTimer = UserThread.runAfter(() -> {
|
||||||
log.trace("ConnectToMorePeersTimer called from requestReportedPeers code path");
|
if (!stopped) {
|
||||||
|
log.trace("retryTimer called from requestReportedPeers code path");
|
||||||
stopRetryTimer();
|
stopRetryTimer();
|
||||||
requestWithAvailablePeers();
|
requestWithAvailablePeers();
|
||||||
}, RETRY_DELAY_SEC);
|
} else {
|
||||||
|
stopRetryTimer();
|
||||||
|
log.warn("We have stopped already. We ignore that retryTimer.run call.");
|
||||||
}
|
}
|
||||||
|
}, RETRY_DELAY_SEC);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -266,9 +268,14 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||||
log.info("No more peers are available for requestReportedPeers. We will try again after a pause.");
|
log.info("No more peers are available for requestReportedPeers. We will try again after a pause.");
|
||||||
if (retryTimer == null)
|
if (retryTimer == null)
|
||||||
retryTimer = UserThread.runAfter(() -> {
|
retryTimer = UserThread.runAfter(() -> {
|
||||||
log.trace("ConnectToMorePeersTimer called from requestWithAvailablePeers code path");
|
if (!stopped) {
|
||||||
|
log.trace("retryTimer called from requestWithAvailablePeers code path");
|
||||||
stopRetryTimer();
|
stopRetryTimer();
|
||||||
requestWithAvailablePeers();
|
requestWithAvailablePeers();
|
||||||
|
} else {
|
||||||
|
stopRetryTimer();
|
||||||
|
log.warn("We have stopped already. We ignore that retryTimer.run call.");
|
||||||
|
}
|
||||||
}, RETRY_DELAY_SEC);
|
}, RETRY_DELAY_SEC);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -289,6 +296,8 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||||
if (periodicTimer == null)
|
if (periodicTimer == null)
|
||||||
periodicTimer = UserThread.runPeriodically(this::requestWithAvailablePeers,
|
periodicTimer = UserThread.runPeriodically(this::requestWithAvailablePeers,
|
||||||
REQUEST_PERIODICALLY_INTERVAL_MINUTES, TimeUnit.MINUTES);
|
REQUEST_PERIODICALLY_INTERVAL_MINUTES, TimeUnit.MINUTES);
|
||||||
|
else
|
||||||
|
log.warn("periodicTimer already started");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void restart() {
|
private void restart() {
|
||||||
|
@ -296,10 +305,13 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||||
|
|
||||||
if (retryTimer == null) {
|
if (retryTimer == null) {
|
||||||
retryTimer = UserThread.runAfter(() -> {
|
retryTimer = UserThread.runAfter(() -> {
|
||||||
log.trace("ConnectToMorePeersTimer called from onNewConnectionAfterAllConnectionsLost");
|
stopped = false;
|
||||||
|
log.trace("retryTimer called from restart");
|
||||||
stopRetryTimer();
|
stopRetryTimer();
|
||||||
requestWithAvailablePeers();
|
requestWithAvailablePeers();
|
||||||
}, RETRY_DELAY_AFTER_ALL_CON_LOST_SEC);
|
}, RETRY_DELAY_AFTER_ALL_CON_LOST_SEC);
|
||||||
|
} else {
|
||||||
|
log.warn("retryTimer already started");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,19 +350,23 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closePeerExchangeHandler(Connection connection) {
|
private void closeHandler(Connection connection) {
|
||||||
|
Log.traceCall();
|
||||||
Optional<NodeAddress> peersNodeAddressOptional = connection.getPeersNodeAddressOptional();
|
Optional<NodeAddress> peersNodeAddressOptional = connection.getPeersNodeAddressOptional();
|
||||||
if (peersNodeAddressOptional.isPresent()) {
|
if (peersNodeAddressOptional.isPresent()) {
|
||||||
NodeAddress nodeAddress = peersNodeAddressOptional.get();
|
NodeAddress nodeAddress = peersNodeAddressOptional.get();
|
||||||
if (handlerMap.containsKey(nodeAddress)) {
|
if (handlerMap.containsKey(nodeAddress)) {
|
||||||
handlerMap.get(nodeAddress).cleanup();
|
handlerMap.get(nodeAddress).cancel();
|
||||||
handlerMap.remove(nodeAddress);
|
handlerMap.remove(nodeAddress);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
log.warn("closeHandler: nodeAddress not set in connection " + connection);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closeAllPeerExchangeHandlers() {
|
private void closeAllHandlers() {
|
||||||
handlerMap.values().stream().forEach(PeerExchangeHandler::cleanup);
|
Log.traceCall();
|
||||||
|
handlerMap.values().stream().forEach(PeerExchangeHandler::cancel);
|
||||||
handlerMap.clear();
|
handlerMap.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,14 @@
|
||||||
package io.bitsquare.p2p.storage;
|
package io.bitsquare.p2p.storage;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.util.concurrent.MoreExecutors;
|
|
||||||
import io.bitsquare.app.Log;
|
import io.bitsquare.app.Log;
|
||||||
import io.bitsquare.app.Version;
|
import io.bitsquare.app.Version;
|
||||||
|
import io.bitsquare.common.Timer;
|
||||||
import io.bitsquare.common.UserThread;
|
import io.bitsquare.common.UserThread;
|
||||||
import io.bitsquare.common.crypto.CryptoException;
|
import io.bitsquare.common.crypto.CryptoException;
|
||||||
import io.bitsquare.common.crypto.Hash;
|
import io.bitsquare.common.crypto.Hash;
|
||||||
import io.bitsquare.common.crypto.Sig;
|
import io.bitsquare.common.crypto.Sig;
|
||||||
import io.bitsquare.common.persistance.Persistable;
|
import io.bitsquare.common.persistance.Persistable;
|
||||||
import io.bitsquare.common.util.Utilities;
|
|
||||||
import io.bitsquare.common.wire.Payload;
|
import io.bitsquare.common.wire.Payload;
|
||||||
import io.bitsquare.p2p.Message;
|
import io.bitsquare.p2p.Message;
|
||||||
import io.bitsquare.p2p.NodeAddress;
|
import io.bitsquare.p2p.NodeAddress;
|
||||||
|
@ -30,7 +29,6 @@ import java.security.PublicKey;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.CopyOnWriteArraySet;
|
import java.util.concurrent.CopyOnWriteArraySet;
|
||||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
// Run in UserThread
|
// Run in UserThread
|
||||||
|
@ -39,15 +37,14 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
//public static int CHECK_TTL_INTERVAL_MILLIS = (int) TimeUnit.SECONDS.toMillis(30);
|
//public static int CHECK_TTL_INTERVAL_MILLIS = (int) TimeUnit.SECONDS.toMillis(30);
|
||||||
public static int CHECK_TTL_INTERVAL_MILLIS = (int) TimeUnit.HOURS.toMillis(30);
|
public static int CHECK_TTL_INTERVAL_SEC = 5;//TODO
|
||||||
// public static int CHECK_TTL_INTERVAL_MILLIS = (int) TimeUnit.SECONDS.toMillis(5);//TODO
|
|
||||||
|
|
||||||
private final Broadcaster broadcaster;
|
private final Broadcaster broadcaster;
|
||||||
private final Map<ByteArray, ProtectedData> map = new ConcurrentHashMap<>();
|
private final Map<ByteArray, ProtectedData> map = new ConcurrentHashMap<>();
|
||||||
private final CopyOnWriteArraySet<HashMapChangedListener> hashMapChangedListeners = new CopyOnWriteArraySet<>();
|
private final CopyOnWriteArraySet<HashMapChangedListener> hashMapChangedListeners = new CopyOnWriteArraySet<>();
|
||||||
|
private Timer removeExpiredEntriesTimer;
|
||||||
private HashMap<ByteArray, MapValue> sequenceNumberMap = new HashMap<>();
|
private HashMap<ByteArray, MapValue> sequenceNumberMap = new HashMap<>();
|
||||||
private final Storage<HashMap> storage;
|
private final Storage<HashMap> storage;
|
||||||
private final ScheduledThreadPoolExecutor removeExpiredEntriesExecutor;
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Constructor
|
// Constructor
|
||||||
|
@ -60,21 +57,25 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
networkNode.addConnectionListener(this);
|
networkNode.addConnectionListener(this);
|
||||||
|
|
||||||
storage = new Storage<>(storageDir);
|
storage = new Storage<>(storageDir);
|
||||||
removeExpiredEntriesExecutor = Utilities.getScheduledThreadPoolExecutor("removeExpiredEntries", 1, 10, 5);
|
|
||||||
|
|
||||||
HashMap<ByteArray, MapValue> persisted = storage.initAndGetPersisted("SequenceNumberMap");
|
HashMap<ByteArray, MapValue> persisted = storage.initAndGetPersisted("SequenceNumberMap");
|
||||||
if (persisted != null)
|
if (persisted != null)
|
||||||
sequenceNumberMap = getPurgedSequenceNumberMap(persisted);
|
sequenceNumberMap = getPurgedSequenceNumberMap(persisted);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void shutDown() {
|
||||||
|
if (removeExpiredEntriesTimer != null)
|
||||||
|
removeExpiredEntriesTimer.stop();
|
||||||
|
}
|
||||||
|
|
||||||
public void onBootstrapComplete() {
|
public void onBootstrapComplete() {
|
||||||
removeExpiredEntriesExecutor.scheduleAtFixedRate(() -> UserThread.execute(() -> {
|
removeExpiredEntriesTimer = UserThread.runPeriodically(() -> {
|
||||||
log.trace("removeExpiredEntries");
|
log.trace("removeExpiredEntries");
|
||||||
// The moment when an object becomes expired will not be synchronous in the network and we could
|
// The moment when an object becomes expired will not be synchronous in the network and we could
|
||||||
// get add messages after the object has expired. To avoid repeated additions of already expired
|
// get add messages after the object has expired. To avoid repeated additions of already expired
|
||||||
// object when we get it sent from new peers, we don’t remove the sequence number from the map.
|
// object when we get it sent from new peers, we don’t remove the sequence number from the map.
|
||||||
// That way an ADD message for an already expired data will fail because the sequence number
|
// That way an ADD message for an already expired data will fail because the sequence number
|
||||||
// is equal and not larger.
|
// is equal and not larger as expected.
|
||||||
Map<ByteArray, ProtectedData> temp = new HashMap<>(map);
|
Map<ByteArray, ProtectedData> temp = new HashMap<>(map);
|
||||||
Set<ProtectedData> toRemoveSet = new HashSet<>();
|
Set<ProtectedData> toRemoveSet = new HashSet<>();
|
||||||
temp.entrySet().stream()
|
temp.entrySet().stream()
|
||||||
|
@ -83,7 +84,7 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
ByteArray hashOfPayload = entry.getKey();
|
ByteArray hashOfPayload = entry.getKey();
|
||||||
ProtectedData protectedData = map.get(hashOfPayload);
|
ProtectedData protectedData = map.get(hashOfPayload);
|
||||||
toRemoveSet.add(protectedData);
|
toRemoveSet.add(protectedData);
|
||||||
log.error("remove protectedData:\n\t" + protectedData);
|
log.warn("We found an expired data entry. We remove the protectedData:\n\t" + protectedData);
|
||||||
map.remove(hashOfPayload);
|
map.remove(hashOfPayload);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -93,8 +94,7 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
|
|
||||||
if (sequenceNumberMap.size() > 1000)
|
if (sequenceNumberMap.size() > 1000)
|
||||||
sequenceNumberMap = getPurgedSequenceNumberMap(sequenceNumberMap);
|
sequenceNumberMap = getPurgedSequenceNumberMap(sequenceNumberMap);
|
||||||
|
}, CHECK_TTL_INTERVAL_SEC);
|
||||||
}), CHECK_TTL_INTERVAL_MILLIS, CHECK_TTL_INTERVAL_MILLIS, TimeUnit.MILLISECONDS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -166,11 +166,6 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
// API
|
// API
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
public void shutDown() {
|
|
||||||
Log.traceCall();
|
|
||||||
MoreExecutors.shutdownAndAwaitTermination(removeExpiredEntriesExecutor, 500, TimeUnit.MILLISECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean add(ProtectedData protectedData, @Nullable NodeAddress sender) {
|
public boolean add(ProtectedData protectedData, @Nullable NodeAddress sender) {
|
||||||
return add(protectedData, sender, false);
|
return add(protectedData, sender, false);
|
||||||
}
|
}
|
||||||
|
@ -190,13 +185,9 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
if (result) {
|
if (result) {
|
||||||
map.put(hashOfPayload, protectedData);
|
map.put(hashOfPayload, protectedData);
|
||||||
|
|
||||||
// Republished data have a larger sequence number. We set the rePublish flag to enable broadcasting
|
|
||||||
// even we had the data with the old seq nr. already
|
|
||||||
if (sequenceNumberMap.containsKey(hashOfPayload) &&
|
|
||||||
protectedData.sequenceNumber > sequenceNumberMap.get(hashOfPayload).sequenceNr)
|
|
||||||
|
|
||||||
sequenceNumberMap.put(hashOfPayload, new MapValue(protectedData.sequenceNumber, System.currentTimeMillis()));
|
sequenceNumberMap.put(hashOfPayload, new MapValue(protectedData.sequenceNumber, System.currentTimeMillis()));
|
||||||
storage.queueUpForSave(sequenceNumberMap, 5000);
|
storage.queueUpForSave(sequenceNumberMap, 100);
|
||||||
|
log.error("sequenceNumberMap queueUpForSave protectedData.sequenceNumber " + protectedData.sequenceNumber);
|
||||||
|
|
||||||
StringBuilder sb = new StringBuilder("\n\n------------------------------------------------------------\n");
|
StringBuilder sb = new StringBuilder("\n\n------------------------------------------------------------\n");
|
||||||
sb.append("Data set after doAdd (truncated)");
|
sb.append("Data set after doAdd (truncated)");
|
||||||
|
@ -230,7 +221,7 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
|
|
||||||
if (storedData.expirablePayload instanceof StoragePayload) {
|
if (storedData.expirablePayload instanceof StoragePayload) {
|
||||||
if (sequenceNumberMap.containsKey(hashOfPayload) && sequenceNumberMap.get(hashOfPayload).sequenceNr == sequenceNumber) {
|
if (sequenceNumberMap.containsKey(hashOfPayload) && sequenceNumberMap.get(hashOfPayload).sequenceNr == sequenceNumber) {
|
||||||
log.warn("We got that message with that seq nr already from another peer. We ignore that message.");
|
log.trace("We got that message with that seq nr already from another peer. We ignore that message.");
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
PublicKey ownerPubKey = ((StoragePayload) storedData.expirablePayload).getOwnerPubKey();
|
PublicKey ownerPubKey = ((StoragePayload) storedData.expirablePayload).getOwnerPubKey();
|
||||||
|
@ -239,11 +230,14 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
checkIfStoredDataPubKeyMatchesNewDataPubKey(ownerPubKey, hashOfPayload);
|
checkIfStoredDataPubKeyMatchesNewDataPubKey(ownerPubKey, hashOfPayload);
|
||||||
|
|
||||||
if (result) {
|
if (result) {
|
||||||
log.error("refreshDate called for storedData:\n\t" + StringUtils.abbreviate(storedData.toString(), 100));
|
log.info("refreshDate called for storedData:\n\t" + StringUtils.abbreviate(storedData.toString(), 100));
|
||||||
storedData.refreshDate();
|
storedData.refreshDate();
|
||||||
|
storedData.updateSequenceNumber(sequenceNumber);
|
||||||
|
storedData.updateSignature(signature);
|
||||||
|
|
||||||
sequenceNumberMap.put(hashOfPayload, new MapValue(sequenceNumber, System.currentTimeMillis()));
|
sequenceNumberMap.put(hashOfPayload, new MapValue(sequenceNumber, System.currentTimeMillis()));
|
||||||
storage.queueUpForSave(sequenceNumberMap, 5000);
|
storage.queueUpForSave(sequenceNumberMap, 100);
|
||||||
|
log.error("sequenceNumberMap queueUpForSave sequenceNumber " + sequenceNumber);
|
||||||
|
|
||||||
StringBuilder sb = new StringBuilder("\n\n------------------------------------------------------------\n");
|
StringBuilder sb = new StringBuilder("\n\n------------------------------------------------------------\n");
|
||||||
sb.append("Data set after refreshTTL (truncated)");
|
sb.append("Data set after refreshTTL (truncated)");
|
||||||
|
@ -286,7 +280,7 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
broadcast(new RemoveDataMessage(protectedData), sender);
|
broadcast(new RemoveDataMessage(protectedData), sender);
|
||||||
|
|
||||||
sequenceNumberMap.put(hashOfPayload, new MapValue(protectedData.sequenceNumber, System.currentTimeMillis()));
|
sequenceNumberMap.put(hashOfPayload, new MapValue(protectedData.sequenceNumber, System.currentTimeMillis()));
|
||||||
storage.queueUpForSave(sequenceNumberMap, 5000);
|
storage.queueUpForSave(sequenceNumberMap, 100);
|
||||||
} else {
|
} else {
|
||||||
log.debug("remove failed");
|
log.debug("remove failed");
|
||||||
}
|
}
|
||||||
|
@ -311,7 +305,7 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
broadcast(new RemoveMailboxDataMessage(protectedMailboxData), sender);
|
broadcast(new RemoveMailboxDataMessage(protectedMailboxData), sender);
|
||||||
|
|
||||||
sequenceNumberMap.put(hashOfData, new MapValue(protectedMailboxData.sequenceNumber, System.currentTimeMillis()));
|
sequenceNumberMap.put(hashOfData, new MapValue(protectedMailboxData.sequenceNumber, System.currentTimeMillis()));
|
||||||
storage.queueUpForSave(sequenceNumberMap, 5000);
|
storage.queueUpForSave(sequenceNumberMap, 100);
|
||||||
} else {
|
} else {
|
||||||
log.debug("removeMailboxData failed");
|
log.debug("removeMailboxData failed");
|
||||||
}
|
}
|
||||||
|
@ -332,6 +326,8 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
else
|
else
|
||||||
sequenceNumber = 0;
|
sequenceNumber = 0;
|
||||||
|
|
||||||
|
log.error("getProtectedData sequenceNumber " + sequenceNumber);
|
||||||
|
|
||||||
byte[] hashOfDataAndSeqNr = Hash.getHash(new DataAndSeqNrPair(payload, sequenceNumber));
|
byte[] hashOfDataAndSeqNr = Hash.getHash(new DataAndSeqNrPair(payload, sequenceNumber));
|
||||||
byte[] signature = Sig.sign(ownerStoragePubKey.getPrivate(), hashOfDataAndSeqNr);
|
byte[] signature = Sig.sign(ownerStoragePubKey.getPrivate(), hashOfDataAndSeqNr);
|
||||||
return new ProtectedData(payload, payload.getTTL(), ownerStoragePubKey.getPublic(), sequenceNumber, signature);
|
return new ProtectedData(payload, payload.getTTL(), ownerStoragePubKey.getPublic(), sequenceNumber, signature);
|
||||||
|
@ -346,6 +342,8 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
else
|
else
|
||||||
sequenceNumber = 0;
|
sequenceNumber = 0;
|
||||||
|
|
||||||
|
log.error("getRefreshTTLMessage sequenceNumber " + sequenceNumber);
|
||||||
|
|
||||||
byte[] hashOfDataAndSeqNr = Hash.getHash(new DataAndSeqNrPair(payload, sequenceNumber));
|
byte[] hashOfDataAndSeqNr = Hash.getHash(new DataAndSeqNrPair(payload, sequenceNumber));
|
||||||
byte[] signature = Sig.sign(ownerStoragePubKey.getPrivate(), hashOfDataAndSeqNr);
|
byte[] signature = Sig.sign(ownerStoragePubKey.getPrivate(), hashOfDataAndSeqNr);
|
||||||
return new RefreshTTLMessage(hashOfDataAndSeqNr, signature, hashOfPayload.bytes, sequenceNumber);
|
return new RefreshTTLMessage(hashOfDataAndSeqNr, signature, hashOfPayload.bytes, sequenceNumber);
|
||||||
|
@ -393,7 +391,7 @@ public class P2PDataStorage implements MessageListener, ConnectionListener {
|
||||||
if (sequenceNumberMap.containsKey(hashOfData)) {
|
if (sequenceNumberMap.containsKey(hashOfData)) {
|
||||||
Integer storedSequenceNumber = sequenceNumberMap.get(hashOfData).sequenceNr;
|
Integer storedSequenceNumber = sequenceNumberMap.get(hashOfData).sequenceNr;
|
||||||
if (newSequenceNumber < storedSequenceNumber) {
|
if (newSequenceNumber < storedSequenceNumber) {
|
||||||
log.warn("Sequence number is invalid. newSequenceNumber="
|
log.warn("Sequence number is invalid. sequenceNumber = "
|
||||||
+ newSequenceNumber + " / storedSequenceNumber=" + storedSequenceNumber);
|
+ newSequenceNumber + " / storedSequenceNumber=" + storedSequenceNumber);
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -19,8 +19,8 @@ public class ProtectedData implements Payload {
|
||||||
transient public long ttl;
|
transient public long ttl;
|
||||||
|
|
||||||
public final PublicKey ownerPubKey;
|
public final PublicKey ownerPubKey;
|
||||||
public final int sequenceNumber;
|
public int sequenceNumber;
|
||||||
public final byte[] signature;
|
public byte[] signature;
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
transient public Date date;
|
transient public Date date;
|
||||||
|
|
||||||
|
@ -49,6 +49,14 @@ public class ProtectedData implements Payload {
|
||||||
date = new Date();
|
date = new Date();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void updateSequenceNumber(int sequenceNumber) {
|
||||||
|
this.sequenceNumber = sequenceNumber;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void updateSignature(byte[] signature) {
|
||||||
|
this.signature = signature;
|
||||||
|
}
|
||||||
|
|
||||||
public boolean isExpired() {
|
public boolean isExpired() {
|
||||||
return (new Date().getTime() - date.getTime()) > ttl;
|
return (new Date().getTime() - date.getTime()) > ttl;
|
||||||
}
|
}
|
||||||
|
@ -64,4 +72,5 @@ public class ProtectedData implements Payload {
|
||||||
", signature.hashCode()=" + Arrays.toString(signature).hashCode() +
|
", signature.hashCode()=" + Arrays.toString(signature).hashCode() +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class ProtectedDataStorageTest {
|
||||||
dir2.mkdir();
|
dir2.mkdir();
|
||||||
|
|
||||||
UserThread.setExecutor(Executors.newSingleThreadExecutor());
|
UserThread.setExecutor(Executors.newSingleThreadExecutor());
|
||||||
P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS = 500;
|
P2PDataStorage.CHECK_TTL_INTERVAL_SEC = 500;
|
||||||
|
|
||||||
keyRing1 = new KeyRing(new KeyStorage(dir1));
|
keyRing1 = new KeyRing(new KeyStorage(dir1));
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ public class ProtectedDataStorageTest {
|
||||||
|
|
||||||
// @Test
|
// @Test
|
||||||
public void testTTL() throws InterruptedException, NoSuchAlgorithmException, CertificateException, KeyStoreException, IOException, CryptoException, SignatureException, InvalidKeyException, NoSuchProviderException {
|
public void testTTL() throws InterruptedException, NoSuchAlgorithmException, CertificateException, KeyStoreException, IOException, CryptoException, SignatureException, InvalidKeyException, NoSuchProviderException {
|
||||||
mockData.ttl = (int) (P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS * 1.5);
|
mockData.ttl = (int) (P2PDataStorage.CHECK_TTL_INTERVAL_SEC * 1.5);
|
||||||
ProtectedData data = dataStorage1.getProtectedData(mockData, storageSignatureKeyPair1);
|
ProtectedData data = dataStorage1.getProtectedData(mockData, storageSignatureKeyPair1);
|
||||||
log.debug("data.date " + data.date);
|
log.debug("data.date " + data.date);
|
||||||
log.debug("data.date " + data.date.getTime());
|
log.debug("data.date " + data.date.getTime());
|
||||||
|
@ -120,11 +120,11 @@ public class ProtectedDataStorageTest {
|
||||||
log.debug("test 1");
|
log.debug("test 1");
|
||||||
Assert.assertEquals(1, dataStorage1.getMap().size());
|
Assert.assertEquals(1, dataStorage1.getMap().size());
|
||||||
|
|
||||||
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS);
|
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_SEC);
|
||||||
log.debug("test 2");
|
log.debug("test 2");
|
||||||
Assert.assertEquals(1, dataStorage1.getMap().size());
|
Assert.assertEquals(1, dataStorage1.getMap().size());
|
||||||
|
|
||||||
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS * 2);
|
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_SEC * 2);
|
||||||
log.debug("test 3 removed");
|
log.debug("test 3 removed");
|
||||||
Assert.assertEquals(0, dataStorage1.getMap().size());
|
Assert.assertEquals(0, dataStorage1.getMap().size());
|
||||||
}
|
}
|
||||||
|
@ -162,31 +162,31 @@ public class ProtectedDataStorageTest {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testRefreshTTL() throws InterruptedException, NoSuchAlgorithmException, CertificateException, KeyStoreException, IOException, CryptoException, SignatureException, InvalidKeyException, NoSuchProviderException {
|
public void testRefreshTTL() throws InterruptedException, NoSuchAlgorithmException, CertificateException, KeyStoreException, IOException, CryptoException, SignatureException, InvalidKeyException, NoSuchProviderException {
|
||||||
mockData.ttl = (int) (P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS * 1.5);
|
mockData.ttl = (int) (P2PDataStorage.CHECK_TTL_INTERVAL_SEC * 1.5);
|
||||||
ProtectedData data = dataStorage1.getProtectedData(mockData, storageSignatureKeyPair1);
|
ProtectedData data = dataStorage1.getProtectedData(mockData, storageSignatureKeyPair1);
|
||||||
Assert.assertTrue(dataStorage1.add(data, null));
|
Assert.assertTrue(dataStorage1.add(data, null));
|
||||||
Assert.assertEquals(1, dataStorage1.getMap().size());
|
Assert.assertEquals(1, dataStorage1.getMap().size());
|
||||||
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS);
|
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_SEC);
|
||||||
log.debug("test 1");
|
log.debug("test 1");
|
||||||
Assert.assertEquals(1, dataStorage1.getMap().size());
|
Assert.assertEquals(1, dataStorage1.getMap().size());
|
||||||
|
|
||||||
RefreshTTLMessage refreshTTLMessage = dataStorage1.getRefreshTTLMessage(mockData, storageSignatureKeyPair1);
|
RefreshTTLMessage refreshTTLMessage = dataStorage1.getRefreshTTLMessage(mockData, storageSignatureKeyPair1);
|
||||||
Assert.assertTrue(dataStorage1.refreshTTL(refreshTTLMessage, null));
|
Assert.assertTrue(dataStorage1.refreshTTL(refreshTTLMessage, null));
|
||||||
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS);
|
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_SEC);
|
||||||
log.debug("test 2");
|
log.debug("test 2");
|
||||||
Assert.assertEquals(1, dataStorage1.getMap().size());
|
Assert.assertEquals(1, dataStorage1.getMap().size());
|
||||||
|
|
||||||
refreshTTLMessage = dataStorage1.getRefreshTTLMessage(mockData, storageSignatureKeyPair1);
|
refreshTTLMessage = dataStorage1.getRefreshTTLMessage(mockData, storageSignatureKeyPair1);
|
||||||
Assert.assertTrue(dataStorage1.refreshTTL(refreshTTLMessage, null));
|
Assert.assertTrue(dataStorage1.refreshTTL(refreshTTLMessage, null));
|
||||||
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS);
|
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_SEC);
|
||||||
log.debug("test 3");
|
log.debug("test 3");
|
||||||
Assert.assertEquals(1, dataStorage1.getMap().size());
|
Assert.assertEquals(1, dataStorage1.getMap().size());
|
||||||
|
|
||||||
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS);
|
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_SEC);
|
||||||
log.debug("test 4");
|
log.debug("test 4");
|
||||||
Assert.assertEquals(1, dataStorage1.getMap().size());
|
Assert.assertEquals(1, dataStorage1.getMap().size());
|
||||||
|
|
||||||
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_MILLIS * 2);
|
Thread.sleep(P2PDataStorage.CHECK_TTL_INTERVAL_SEC * 2);
|
||||||
log.debug("test 5 removed");
|
log.debug("test 5 removed");
|
||||||
Assert.assertEquals(0, dataStorage1.getMap().size());
|
Assert.assertEquals(0, dataStorage1.getMap().size());
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue