From 356cb6412fbfda050dd37d8a680bb6b13351b52f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 10:46:59 +0100 Subject: [PATCH 001/123] Optimise structure overhead --- LXMF/LXMPeer.py | 30 +++++++++++++++++------------- LXMF/LXMRouter.py | 14 +++++++++++++- 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a88f6da..2b10987 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -63,12 +63,13 @@ class LXMPeer: for transient_id in dictionary["handled_ids"]: if transient_id in router.propagation_entries: - peer.handled_messages[transient_id] = router.propagation_entries[transient_id] + peer.handled_messages.append(transient_id) for transient_id in dictionary["unhandled_ids"]: if transient_id in router.propagation_entries: - peer.unhandled_messages[transient_id] = router.propagation_entries[transient_id] + peer.unhandled_messages.append(transient_id) + del dictionary return peer def to_bytes(self): @@ -108,8 +109,8 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - self.unhandled_messages = {} - self.handled_messages = {} + self.unhandled_messages = [] + self.handled_messages = [] self.last_offer = [] self.router = router @@ -118,6 +119,7 @@ class LXMPeer: if self.identity != None: self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") else: + self.destination = None RNS.log(f"Could not recall identity for LXMF propagation peer {RNS.prettyhexrep(self.destination_hash)}, will retry identity resolution on next sync", RNS.LOG_WARNING) def sync(self): @@ -171,7 +173,7 @@ class LXMPeer: for transient_id in purged_ids: RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) - self.unhandled_messages.pop(transient_id) + self.unhandled_messages.remove(transient_id) unhandled_entries.sort(key=lambda e: e[1], reverse=False) per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now @@ -189,7 +191,7 @@ class LXMPeer: RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) self.last_offer = unhandled_ids - self.link.request(LXMPeer.OFFER_REQUEST_PATH, self.last_offer, response_callback=self.offer_response, failed_callback=self.request_failed) + self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT else: @@ -226,13 +228,14 @@ class LXMPeer: # Peer already has all advertised messages for transient_id in self.last_offer: if transient_id in self.unhandled_messages: - self.handled_messages[transient_id] = self.unhandled_messages.pop(transient_id) + self.handled_messages.append(transient_id) + self.unhandled_messages.remove(transient_id) elif response == True: # Peer wants all advertised messages for transient_id in self.last_offer: - wanted_messages.append(self.unhandled_messages[transient_id]) + wanted_messages.append(self.router.propagation_entries[transient_id]) wanted_message_ids.append(transient_id) else: @@ -242,10 +245,11 @@ class LXMPeer: # already received it from another peer. if not transient_id in response: if transient_id in self.unhandled_messages: - self.handled_messages[transient_id] = self.unhandled_messages.pop(transient_id) + self.handled_messages.append(transient_id) + self.unhandled_messages.remove(transient_id) for transient_id in response: - wanted_messages.append(self.unhandled_messages[transient_id]) + wanted_messages.append(self.router.propagation_entries[transient_id]) wanted_message_ids.append(transient_id) if len(wanted_messages) > 0: @@ -288,8 +292,8 @@ class LXMPeer: def resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: for transient_id in resource.transferred_messages: - message = self.unhandled_messages.pop(transient_id) - self.handled_messages[transient_id] = message + self.handled_messages.append(transient_id) + self.unhandled_messages.remove(transient_id) if self.link != None: self.link.teardown() @@ -330,7 +334,7 @@ class LXMPeer: def handle_message(self, transient_id): if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - self.unhandled_messages[transient_id] = self.router.propagation_entries[transient_id] + self.unhandled_messages.append(transient_id) def __str__(self): if self.destination_hash: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 79678c6..a19f401 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1,5 +1,6 @@ import os import time +import math import random import base64 import atexit @@ -427,6 +428,8 @@ class LXMRouter: os.makedirs(self.messagepath) self.propagation_entries = {} + + st = time.time(); RNS.log("Indexing messagestore...", RNS.LOG_NOTICE) for filename in os.listdir(self.messagepath): components = filename.split("_") if len(components) == 2: @@ -452,9 +455,13 @@ class LXMRouter: except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) + et = time.time(); RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {math.floor(len(self.propagation_entries)/(et-st))} msgs/s", RNS.LOG_NOTICE) + st = time.time(); RNS.log("Loading propagation node peers...", RNS.LOG_NOTICE) + if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") peers_data = peers_file.read() + peers_file.close() if len(peers_data) > 0: serialised_peers = msgpack.unpackb(peers_data) @@ -468,8 +475,13 @@ class LXMRouter: lim_str = ", "+RNS.prettysize(peer.propagation_transfer_limit*1000)+" transfer limit" RNS.log("Loaded peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(len(peer.unhandled_messages))+" unhandled messages"+lim_str, RNS.LOG_DEBUG) else: + del peer RNS.log("Peer "+RNS.prettyhexrep(peer.destination_hash)+" could not be loaded, because its identity could not be recalled. Dropping peer.", RNS.LOG_DEBUG) + del serialised_peers + del peers_data + + RNS.log(f"Loaded {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) self.propagation_node = True self.propagation_destination.set_link_established_callback(self.propagation_link_established) @@ -1676,7 +1688,7 @@ class LXMRouter: if remote_hash != None and remote_hash in self.peers: transient_id = RNS.Identity.full_hash(lxmf_data) peer = self.peers[remote_hash] - peer.handled_messages[transient_id] = [transient_id, remote_timebase, lxmf_data] + peer.handled_messages.append(transient_id) self.lxmf_propagation(lxmf_data) else: From 7701f326d99b20bfed3d64c3a80809e02755a06f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:33:39 +0100 Subject: [PATCH 002/123] Memory optimisations --- LXMF/LXMPeer.py | 120 +++++++++++++++++++++++++++++++------ LXMF/LXMRouter.py | 149 +++++++++++++++++++++++++++++++++------------- 2 files changed, 209 insertions(+), 60 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 2b10987..f4c522c 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -38,11 +38,16 @@ class LXMPeer: @staticmethod def from_bytes(peer_bytes, router): dictionary = msgpack.unpackb(peer_bytes) + peer_destination_hash = dictionary["destination_hash"] + peer_peering_timebase = dictionary["peering_timebase"] + peer_alive = dictionary["alive"] + peer_last_heard = dictionary["last_heard"] + + peer = LXMPeer(router, peer_destination_hash) + peer.peering_timebase = peer_peering_timebase + peer.alive = peer_alive + peer.last_heard = peer_last_heard - peer = LXMPeer(router, dictionary["destination_hash"]) - peer.peering_timebase = dictionary["peering_timebase"] - peer.alive = dictionary["alive"] - peer.last_heard = dictionary["last_heard"] if "link_establishment_rate" in dictionary: peer.link_establishment_rate = dictionary["link_establishment_rate"] else: @@ -61,13 +66,22 @@ class LXMPeer: else: peer.propagation_transfer_limit = None + hm_count = 0 for transient_id in dictionary["handled_ids"]: if transient_id in router.propagation_entries: - peer.handled_messages.append(transient_id) + peer.add_handled_message(transient_id) + hm_count += 1 + um_count = 0 for transient_id in dictionary["unhandled_ids"]: if transient_id in router.propagation_entries: - peer.unhandled_messages.append(transient_id) + peer.add_unhandled_message(transient_id) + um_count += 1 + + peer._hm_count = hm_count + peer._um_count = um_count + peer._hm_counts_synced = True + peer._um_counts_synced = True del dictionary return peer @@ -93,7 +107,10 @@ class LXMPeer: dictionary["handled_ids"] = handled_ids dictionary["unhandled_ids"] = unhandled_ids - return msgpack.packb(dictionary) + peer_bytes = msgpack.packb(dictionary) + del dictionary + + return peer_bytes def __init__(self, router, destination_hash): self.alive = False @@ -106,11 +123,14 @@ class LXMPeer: self.sync_transfer_rate = 0 self.propagation_transfer_limit = None + self._hm_count = 0 + self._um_count = 0 + self._hm_counts_synced = False + self._um_counts_synced = False + self.link = None self.state = LXMPeer.IDLE - self.unhandled_messages = [] - self.handled_messages = [] self.last_offer = [] self.router = router @@ -173,7 +193,7 @@ class LXMPeer: for transient_id in purged_ids: RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) - self.unhandled_messages.remove(transient_id) + self.remove_unhandled_message(transient_id) unhandled_entries.sort(key=lambda e: e[1], reverse=False) per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now @@ -228,8 +248,8 @@ class LXMPeer: # Peer already has all advertised messages for transient_id in self.last_offer: if transient_id in self.unhandled_messages: - self.handled_messages.append(transient_id) - self.unhandled_messages.remove(transient_id) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) elif response == True: @@ -244,9 +264,8 @@ class LXMPeer: # If the peer did not want the message, it has # already received it from another peer. if not transient_id in response: - if transient_id in self.unhandled_messages: - self.handled_messages.append(transient_id) - self.unhandled_messages.remove(transient_id) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) for transient_id in response: wanted_messages.append(self.router.propagation_entries[transient_id]) @@ -292,8 +311,8 @@ class LXMPeer: def resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: for transient_id in resource.transferred_messages: - self.handled_messages.append(transient_id) - self.unhandled_messages.remove(transient_id) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) if self.link != None: self.link.teardown() @@ -332,9 +351,72 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - def handle_message(self, transient_id): + def new_propagation_message(self, transient_id): if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - self.unhandled_messages.append(transient_id) + self.add_unhandled_message(transient_id) + + @property + def handled_messages(self): + pes = self.router.propagation_entries.copy() + hm = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][4], pes)) + self._hm_count = len(hm); del pes + return hm + + @property + def unhandled_messages(self): + pes = self.router.propagation_entries.copy() + um = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][5], pes)) + self._um_count = len(um); del pes + return um + + @property + def handled_message_count(self): + if not self._hm_counts_synced: + self._update_counts() + + return self._hm_count + + @property + def unhandled_message_count(self): + if not self._um_counts_synced: + self._update_counts() + + return self._um_count + + def _update_counts(self): + if not self._hm_counts_synced: + RNS.log("UPDATE HM COUNTS") + hm = self.handled_messages; del hm + self._hm_counts_synced = True + + if not self._um_counts_synced: + RNS.log("UPDATE UM COUNTS") + um = self.unhandled_messages; del um + self._um_counts_synced = True + + def add_handled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if not self.destination_hash in self.router.propagation_entries[transient_id][4]: + self.router.propagation_entries[transient_id][4].append(self.destination_hash) + self._hm_counts_synced = False + + def add_unhandled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if not self.destination_hash in self.router.propagation_entries[transient_id][5]: + self.router.propagation_entries[transient_id][5].append(self.destination_hash) + self._um_count += 1 + + def remove_handled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if self.destination_hash in self.router.propagation_entries[transient_id][4]: + self.router.propagation_entries[transient_id][4].remove(self.destination_hash) + self._hm_counts_synced = False + + def remove_unhandled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if self.destination_hash in self.router.propagation_entries[transient_id][5]: + self.router.propagation_entries[transient_id][5].remove(self.destination_hash) + self._um_counts_synced = False def __str__(self): if self.destination_hash: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index a19f401..9163824 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1,9 +1,11 @@ import os +import sys import time import math import random import base64 import atexit +import signal import threading import RNS @@ -94,6 +96,9 @@ class LXMRouter: self.outbound_propagation_node = None self.outbound_propagation_link = None + if delivery_limit == None: + delivery_limit = LXMRouter.DELIVERY_LIMIT + self.message_storage_limit = None self.information_storage_limit = None self.propagation_per_transfer_limit = propagation_limit @@ -117,6 +122,7 @@ class LXMRouter: self.cost_file_lock = threading.Lock() self.ticket_file_lock = threading.Lock() self.stamp_gen_lock = threading.Lock() + self.exit_handler_running = False if identity == None: identity = RNS.Identity() @@ -221,6 +227,8 @@ class LXMRouter: RNS.log("Could not load outbound stamp costs from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) atexit.register(self.exit_handler) + signal.signal(signal.SIGINT, self.sigint_handler) + signal.signal(signal.SIGTERM, self.sigterm_handler) job_thread = threading.Thread(target=self.jobloop) job_thread.setDaemon(True) @@ -446,17 +454,19 @@ class LXMRouter: file.close() self.propagation_entries[transient_id] = [ - destination_hash, - filepath, - received, - msg_size, + destination_hash, # 0: Destination hash + filepath, # 1: Storage location + received, # 2: Receive timestamp + msg_size, # 3: Message size + [], # 4: Handled peers + [], # 5: Unhandled peers ] except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) et = time.time(); RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {math.floor(len(self.propagation_entries)/(et-st))} msgs/s", RNS.LOG_NOTICE) - st = time.time(); RNS.log("Loading propagation node peers...", RNS.LOG_NOTICE) + st = time.time(); RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") @@ -465,23 +475,25 @@ class LXMRouter: if len(peers_data) > 0: serialised_peers = msgpack.unpackb(peers_data) + del peers_data - for serialised_peer in serialised_peers: + while len(serialised_peers) > 0: + serialised_peer = serialised_peers.pop() peer = LXMPeer.from_bytes(serialised_peer, self) + del serialised_peer if peer.identity != None: self.peers[peer.destination_hash] = peer lim_str = ", no transfer limit" if peer.propagation_transfer_limit != None: lim_str = ", "+RNS.prettysize(peer.propagation_transfer_limit*1000)+" transfer limit" - RNS.log("Loaded peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(len(peer.unhandled_messages))+" unhandled messages"+lim_str, RNS.LOG_DEBUG) + RNS.log("Rebuilt peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(peer.unhandled_message_count)+" unhandled messages"+lim_str, RNS.LOG_DEBUG) else: - del peer RNS.log("Peer "+RNS.prettyhexrep(peer.destination_hash)+" could not be loaded, because its identity could not be recalled. Dropping peer.", RNS.LOG_DEBUG) + del peer del serialised_peers - del peers_data - RNS.log(f"Loaded {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) self.propagation_node = True self.propagation_destination.set_link_established_callback(self.propagation_link_established) @@ -602,36 +614,37 @@ class LXMRouter: JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 def jobs(self): - self.processing_count += 1 + if not self.exit_handler_running: + self.processing_count += 1 - if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0: - self.process_outbound() + if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0: + self.process_outbound() - if self.processing_count % LXMRouter.JOB_STAMPS_INTERVAL == 0: - threading.Thread(target=self.process_deferred_stamps, daemon=True).start() + if self.processing_count % LXMRouter.JOB_STAMPS_INTERVAL == 0: + threading.Thread(target=self.process_deferred_stamps, daemon=True).start() - if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0: - self.clean_links() + if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0: + self.clean_links() - if self.processing_count % LXMRouter.JOB_TRANSIENT_INTERVAL == 0: - self.clean_transient_id_caches() + if self.processing_count % LXMRouter.JOB_TRANSIENT_INTERVAL == 0: + self.clean_transient_id_caches() - if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: - self.clean_message_store() + if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: + self.clean_message_store() - if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: - self.sync_peers() + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: + self.sync_peers() def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual # triggers can delay next run - try: self.jobs() except Exception as e: RNS.log("An error ocurred while running LXMF Router jobs.", RNS.LOG_ERROR) RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR) + RNS.trace_exception(e) time.sleep(LXMRouter.PROCESSING_INTERVAL) def clean_links(self): @@ -888,22 +901,24 @@ class LXMRouter: def save_locally_delivered_transient_ids(self): try: - if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + if len(self.locally_delivered_transient_ids) > 0: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) - with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: - locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) + with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: + locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) except Exception as e: RNS.log("Could not save locally delivered message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) def save_locally_processed_transient_ids(self): try: - if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + if len(self.locally_processed_transient_ids) > 0: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) - with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: - locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) + with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: + locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) except Exception as e: RNS.log("Could not save locally processed transient ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1001,10 +1016,43 @@ class LXMRouter: RNS.log(f"An error occurred while reloading available tickets from storage: {e}", RNS.LOG_ERROR) def exit_handler(self): + if self.exit_handler_running: + return + + self.exit_handler_running = True + + RNS.log("Tearing down delivery destinations...", RNS.LOG_NOTICE) + for destination_hash in self.delivery_destinations: + delivery_destination = self.delivery_destinations[destination_hash] + delivery_destination.set_packet_callback(None) + delivery_destination.set_link_established_callback(None) + for link in delivery_destination.links: + try: + if link.status == RNS.Link.ACTIVE: + link.teardown() + except Exception as e: + RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) + + if self.propagation_node: + RNS.log("Tearing down propagation node destination...", RNS.LOG_NOTICE) + self.propagation_destination.set_link_established_callback(None) + self.propagation_destination.set_packet_callback(None) + self.propagation_destination.deregister_request_handler(LXMPeer.OFFER_REQUEST_PATH) + self.propagation_destination.deregister_request_handler(LXMPeer.MESSAGE_GET_PATH) + for link in self.active_propagation_links: + try: + if link.status == RNS.Link.ACTIVE: + link.teardown() + except Exception as e: + RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) + + RNS.log("Persisting LXMF state data to storage...", RNS.LOG_NOTICE) if self.propagation_node: try: + st = time.time(); RNS.log("Saving peer synchronisation states to storage...", RNS.LOG_NOTICE) serialised_peers = [] - for peer_id in self.peers: + peer_dict = self.peers.copy() + for peer_id in peer_dict: peer = self.peers[peer_id] serialised_peers.append(peer.to_bytes()) @@ -1012,7 +1060,7 @@ class LXMRouter: peers_file.write(msgpack.packb(serialised_peers)) peers_file.close() - RNS.log("Saved "+str(len(serialised_peers))+" peers to storage", RNS.LOG_DEBUG) + RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) except Exception as e: RNS.log("Could not save propagation node peers to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1020,6 +1068,20 @@ class LXMRouter: self.save_locally_delivered_transient_ids() self.save_locally_processed_transient_ids() + def sigint_handler(self, signal, frame): + if not self.exit_handler_running: + RNS.log("Received SIGINT, shutting down now!", RNS.LOG_WARNING) + sys.exit(0) + else: + RNS.log("Received SIGINT, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) + + def sigterm_handler(self, signal, frame): + if not self.exit_handler_running: + RNS.log("Received SIGTERM, shutting down now!", RNS.LOG_WARNING) + sys.exit(0) + else: + RNS.log("Received SIGTERM, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) + def __str__(self): return "" @@ -1685,19 +1747,23 @@ class LXMRouter: messages = data[1] for lxmf_data in messages: + peer = None + transient_id = RNS.Identity.full_hash(lxmf_data) if remote_hash != None and remote_hash in self.peers: - transient_id = RNS.Identity.full_hash(lxmf_data) peer = self.peers[remote_hash] - peer.handled_messages.append(transient_id) - self.lxmf_propagation(lxmf_data) + self.lxmf_propagation(lxmf_data, from_peer=peer) + if peer != None: + peer.add_handled_message(transient_id) + else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) except Exception as e: RNS.log("Error while unpacking received propagation resource", RNS.LOG_DEBUG) + RNS.trace_exception(e) - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False): + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False, from_peer=None): no_stamp_enforcement = False if is_paper_message: no_stamp_enforcement = True @@ -1708,7 +1774,6 @@ class LXMRouter: if not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids: received = time.time() - propagation_entry = [transient_id, received, lxmf_data] destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] self.locally_processed_transient_ids[transient_id] = received @@ -1732,12 +1797,13 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data)] + self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) for peer_id in self.peers: peer = self.peers[peer_id] - peer.handle_message(transient_id) + if peer != from_peer: + peer.new_propagation_message(transient_id) else: # TODO: Add message to sneakernet queues when implemented @@ -1757,6 +1823,7 @@ class LXMRouter: except Exception as e: RNS.log("Could not assemble propagated LXMF message from received data", RNS.LOG_DEBUG) RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG) + RNS.trace_exception(e) return False def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None): From 44d1d992f8a9be4b81c5f6b302f6f48b1e46e161 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:34:00 +0100 Subject: [PATCH 003/123] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 906d362..43c4ab0 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.0" +__version__ = "0.6.1" From bfed126a7c17fd90551204afd0bbab3fac1441f9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:44:24 +0100 Subject: [PATCH 004/123] Memory optimisations --- LXMF/LXMPeer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index f4c522c..d133027 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -358,14 +358,14 @@ class LXMPeer: @property def handled_messages(self): pes = self.router.propagation_entries.copy() - hm = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][4], pes)) + hm = list(filter(lambda tid: self.destination_hash in pes[tid][4], pes)) self._hm_count = len(hm); del pes return hm @property def unhandled_messages(self): pes = self.router.propagation_entries.copy() - um = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][5], pes)) + um = list(filter(lambda tid: self.destination_hash in pes[tid][5], pes)) self._um_count = len(um); del pes return um From 1c9c74410790188db976dbecdff3b994d33ac5d9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:51:25 +0100 Subject: [PATCH 005/123] Memory optimisations --- LXMF/LXMPeer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index d133027..add54da 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -360,6 +360,7 @@ class LXMPeer: pes = self.router.propagation_entries.copy() hm = list(filter(lambda tid: self.destination_hash in pes[tid][4], pes)) self._hm_count = len(hm); del pes + self._hm_counts_synced = True return hm @property @@ -367,6 +368,7 @@ class LXMPeer: pes = self.router.propagation_entries.copy() um = list(filter(lambda tid: self.destination_hash in pes[tid][5], pes)) self._um_count = len(um); del pes + self._um_counts_synced = True return um @property @@ -387,12 +389,10 @@ class LXMPeer: if not self._hm_counts_synced: RNS.log("UPDATE HM COUNTS") hm = self.handled_messages; del hm - self._hm_counts_synced = True if not self._um_counts_synced: RNS.log("UPDATE UM COUNTS") um = self.unhandled_messages; del um - self._um_counts_synced = True def add_handled_message(self, transient_id): if transient_id in self.router.propagation_entries: From 1430b1ce90b989e9627d07841b5634e6f3a1f8e1 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 20:20:39 +0100 Subject: [PATCH 006/123] Enqueue and batch process distribution queue mappings --- LXMF/LXMPeer.py | 40 ++++++++++++++++++++++++++++++----- LXMF/LXMRouter.py | 53 +++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 79 insertions(+), 14 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index add54da..74a40c7 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -4,6 +4,7 @@ import time import RNS import RNS.vendor.umsgpack as msgpack +from collections import deque from .LXMF import APP_NAME class LXMPeer: @@ -122,6 +123,8 @@ class LXMPeer: self.link_establishment_rate = 0 self.sync_transfer_rate = 0 self.propagation_transfer_limit = None + self.handled_messages_queue = deque() + self.unhandled_messages_queue = deque() self._hm_count = 0 self._um_count = 0 @@ -351,9 +354,38 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - def new_propagation_message(self, transient_id): - if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - self.add_unhandled_message(transient_id) + def queued_items(self): + return len(self.handled_messages_queue) > 0 or len(self.unhandled_messages_queue) > 0 + + def queue_unhandled_message(self, transient_id): + self.unhandled_messages_queue.append(transient_id) + + def queue_handled_message(self, transient_id): + self.handled_messages_queue.append(transient_id) + + def process_queues(self): + if len(self.unhandled_messages_queue) > 0 or len(self.handled_messages_queue) > 0: + # TODO: Remove debug + # st = time.time(); lu = len(self.unhandled_messages_queue); lh = len(self.handled_messages_queue) + + handled_messages = self.handled_messages + unhandled_messages = self.unhandled_messages + + while len(self.handled_messages_queue) > 0: + transient_id = self.handled_messages_queue.pop() + if not transient_id in handled_messages: + self.add_handled_message(transient_id) + if transient_id in unhandled_messages: + self.remove_unhandled_message(transient_id) + + while len(self.unhandled_messages_queue) > 0: + transient_id = self.unhandled_messages_queue.pop() + if not transient_id in handled_messages and not transient_id in unhandled_messages: + self.add_unhandled_message(transient_id) + + del handled_messages, unhandled_messages + # TODO: Remove debug + # RNS.log(f"{self} processed {lh}/{lu} in {RNS.prettytime(time.time()-st)}") @property def handled_messages(self): @@ -387,11 +419,9 @@ class LXMPeer: def _update_counts(self): if not self._hm_counts_synced: - RNS.log("UPDATE HM COUNTS") hm = self.handled_messages; del hm if not self._um_counts_synced: - RNS.log("UPDATE UM COUNTS") um = self.unhandled_messages; del um def add_handled_message(self, transient_id): diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 9163824..1e62914 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -8,6 +8,8 @@ import atexit import signal import threading +from collections import deque + import RNS import RNS.vendor.umsgpack as msgpack @@ -143,6 +145,8 @@ class LXMRouter: self.peers = {} self.propagation_entries = {} + self.peer_distribution_queue = deque() + RNS.Transport.register_announce_handler(LXMFDeliveryAnnounceHandler(self)) RNS.Transport.register_announce_handler(LXMFPropagationAnnounceHandler(self)) @@ -613,6 +617,7 @@ class LXMRouter: JOB_TRANSIENT_INTERVAL = 60 JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 + JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL def jobs(self): if not self.exit_handler_running: self.processing_count += 1 @@ -632,6 +637,9 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: self.clean_message_store() + if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: + self.flush_queues() + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: self.sync_peers() @@ -647,6 +655,17 @@ class LXMRouter: RNS.trace_exception(e) time.sleep(LXMRouter.PROCESSING_INTERVAL) + def flush_queues(self): + self.flush_peer_distribution_queue() + RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + if peer.queued_items(): + peer.process_queues() + + RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) + def clean_links(self): closed_links = [] for link_hash in self.direct_links: @@ -1047,6 +1066,7 @@ class LXMRouter: RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) RNS.log("Persisting LXMF state data to storage...", RNS.LOG_NOTICE) + self.flush_queues() if self.propagation_node: try: st = time.time(); RNS.log("Saving peer synchronisation states to storage...", RNS.LOG_NOTICE) @@ -1608,8 +1628,9 @@ class LXMRouter: culled_peers = [] waiting_peers = [] unresponsive_peers = [] - for peer_id in self.peers: - peer = self.peers[peer_id] + peers = self.peers.copy() + for peer_id in peers: + peer = peers[peer_id] if time.time() > peer.last_heard + LXMPeer.MAX_UNREACHABLE: culled_peers.append(peer_id) else: @@ -1754,7 +1775,7 @@ class LXMRouter: self.lxmf_propagation(lxmf_data, from_peer=peer) if peer != None: - peer.add_handled_message(transient_id) + peer.queue_handled_message(transient_id) else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) @@ -1763,6 +1784,24 @@ class LXMRouter: RNS.log("Error while unpacking received propagation resource", RNS.LOG_DEBUG) RNS.trace_exception(e) + def enqueue_peer_distribution(self, transient_id, from_peer): + self.peer_distribution_queue.append([transient_id, from_peer]) + + def flush_peer_distribution_queue(self): + if len(self.peer_distribution_queue) > 0: + entries = [] + while len(self.peer_distribution_queue) > 0: + entries.append(self.peer_distribution_queue.pop()) + + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + for entry in entries: + transient_id = entry[0] + from_peer = entry[1] + if peer != from_peer: + peer.queue_unhandled_message(transient_id) + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False, from_peer=None): no_stamp_enforcement = False if is_paper_message: @@ -1797,13 +1836,9 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] - RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) - for peer_id in self.peers: - peer = self.peers[peer_id] - if peer != from_peer: - peer.new_propagation_message(transient_id) + self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] + self.enqueue_peer_distribution(transient_id, from_peer) else: # TODO: Add message to sneakernet queues when implemented From c2a08ef35588ccd512a7ea7c9898c83e5fd2864e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 20:44:11 +0100 Subject: [PATCH 007/123] Enqueue and batch process distribution queue mappings --- LXMF/LXMRouter.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 1e62914..ee1dca8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -656,15 +656,16 @@ class LXMRouter: time.sleep(LXMRouter.PROCESSING_INTERVAL) def flush_queues(self): - self.flush_peer_distribution_queue() - RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() - for peer_id in self.peers.copy(): - if peer_id in self.peers: - peer = self.peers[peer_id] - if peer.queued_items(): - peer.process_queues() + if len(self.peers) > 0: + self.flush_peer_distribution_queue() + RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + if peer.queued_items(): + peer.process_queues() - RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) + RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) def clean_links(self): closed_links = [] From e69da2ed2a29b33af0acda059aa9a624b475a6e7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 22 Jan 2025 01:37:09 +0100 Subject: [PATCH 008/123] Added static peers and peering limit --- LXMF/Handlers.py | 14 ++++-- LXMF/LXMPeer.py | 59 ++++++++++++++++++++--- LXMF/LXMRouter.py | 107 +++++++++++++++++++++++++++++++++++------ LXMF/Utilities/lxmd.py | 29 +++++++++-- 4 files changed, 179 insertions(+), 30 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 7420ea5..22c6cd3 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -51,12 +51,16 @@ class LXMFPropagationAnnounceHandler: except: propagation_transfer_limit = None - if data[0] == True: - if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + if destination_hash in self.lxmrouter.static_peers: + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) - elif data[0] == False: - self.lxmrouter.unpeer(destination_hash, node_timebase) + else: + if data[0] == True: + if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + + elif data[0] == False: + self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 74a40c7..ec0cfe2 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -66,6 +66,31 @@ class LXMPeer: peer.propagation_transfer_limit = None else: peer.propagation_transfer_limit = None + + if "offered" in dictionary: + peer.offered = dictionary["offered"] + else: + peer.offered = 0 + + if "outgoing" in dictionary: + peer.outgoing = dictionary["outgoing"] + else: + peer.outgoing = 0 + + if "incoming" in dictionary: + peer.incoming = dictionary["incoming"] + else: + peer.incoming = 0 + + if "rx_bytes" in dictionary: + peer.rx_bytes = dictionary["rx_bytes"] + else: + peer.rx_bytes = 0 + + if "tx_bytes" in dictionary: + peer.tx_bytes = dictionary["tx_bytes"] + else: + peer.tx_bytes = 0 hm_count = 0 for transient_id in dictionary["handled_ids"]: @@ -96,6 +121,11 @@ class LXMPeer: dictionary["link_establishment_rate"] = self.link_establishment_rate dictionary["sync_transfer_rate"] = self.sync_transfer_rate dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit + dictionary["offered"] = self.offered + dictionary["outgoing"] = self.outgoing + dictionary["incoming"] = self.incoming + dictionary["rx_bytes"] = self.rx_bytes + dictionary["tx_bytes"] = self.tx_bytes handled_ids = [] for transient_id in self.handled_messages: @@ -126,6 +156,12 @@ class LXMPeer: self.handled_messages_queue = deque() self.unhandled_messages_queue = deque() + self.offered = 0 # Messages offered to this peer + self.outgoing = 0 # Messages transferred to this peer + self.incoming = 0 # Messages received from this peer + self.rx_bytes = 0 # Bytes received from this peer + self.tx_bytes = 0 # Bytes sent to this peer + self._hm_count = 0 self._um_count = 0 self._hm_counts_synced = False @@ -212,7 +248,7 @@ class LXMPeer: cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) - RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) + RNS.log(f"Offering {len(unhandled_ids)} messages to peer {RNS.prettyhexrep(self.destination.hash)}", RNS.LOG_VERBOSE) self.last_offer = unhandled_ids self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT @@ -242,10 +278,16 @@ class LXMPeer: if response == LXMPeer.ERROR_NO_IDENTITY: if self.link != None: - RNS.log("Remote peer indicated that no identification was received, retrying...", RNS.LOG_DEBUG) + RNS.log("Remote peer indicated that no identification was received, retrying...", RNS.LOG_VERBOSE) self.link.identify() self.state = LXMPeer.LINK_READY self.sync() + return + + elif response == LXMPeer.ERROR_NO_ACCESS: + RNS.log("Remote indicated that access was denied, breaking peering", RNS.LOG_VERBOSE) + self.router.unpeer(self.destination_hash) + return elif response == False: # Peer already has all advertised messages @@ -275,10 +317,9 @@ class LXMPeer: wanted_message_ids.append(transient_id) if len(wanted_messages) > 0: - RNS.log("Peer wanted "+str(len(wanted_messages))+" of the available messages", RNS.LOG_DEBUG) + RNS.log("Peer wanted "+str(len(wanted_messages))+" of the available messages", RNS.LOG_VERBOSE) lxm_list = [] - for message_entry in wanted_messages: file_path = message_entry[1] if os.path.isfile(file_path): @@ -294,7 +335,8 @@ class LXMPeer: self.state = LXMPeer.RESOURCE_TRANSFERRING else: - RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_DEBUG) + RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_VERBOSE) + self.offered += len(self.last_offer) if self.link != None: self.link.teardown() @@ -328,12 +370,15 @@ class LXMPeer: self.sync_transfer_rate = (resource.get_transfer_size()*8)/(time.time()-resource.sync_transfer_started) rate_str = f" at {RNS.prettyspeed(self.sync_transfer_rate)}" - RNS.log("Sync to peer "+RNS.prettyhexrep(self.destination_hash)+" completed"+rate_str, RNS.LOG_DEBUG) + RNS.log(f"Syncing {len(resource.transferred_messages)} messages to peer {RNS.prettyhexrep(self.destination_hash)} completed{rate_str}", RNS.LOG_VERBOSE) self.alive = True self.last_heard = time.time() + self.offered += len(self.last_offer) + self.outgoing += len(resource.transferred_messages) + self.tx_bytes += resource.get_data_size() else: - RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_DEBUG) + RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_VERBOSE) if self.link != None: self.link.teardown() diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index ee1dca8..bfe863d 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -37,6 +37,7 @@ class LXMRouter: NODE_ANNOUNCE_DELAY = 20 + MAX_PEERS = 50 AUTOPEER = True AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 @@ -67,7 +68,10 @@ class LXMRouter: ### Developer-facing API ############################## ####################################################### - def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None, propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT, enforce_ratchets = False, enforce_stamps = False): + def __init__(self, identity=None, storagepath=None, autopeer=AUTOPEER, autopeer_maxdepth=None, + propagation_limit=PROPAGATION_LIMIT, delivery_limit=DELIVERY_LIMIT, enforce_ratchets=False, + enforce_stamps=False, static_peers = [], max_peers=None, from_static_only=False): + random.seed(os.urandom(10)) self.pending_inbound = [] @@ -142,6 +146,27 @@ class LXMRouter: else: self.autopeer_maxdepth = LXMRouter.AUTOPEER_MAXDEPTH + if max_peers == None: + self.max_peers = LXMRouter.MAX_PEERS + else: + if type(max_peers) == int and max_peers >= 0: + self.max_peers = max_peers + else: + raise ValueError(f"Invalid value for max_peers: {max_peers}") + + self.from_static_only = from_static_only + if type(static_peers) != list: + raise ValueError(f"Invalid type supplied for static peer list: {type(static_peers)}") + else: + for static_peer in static_peers: + if type(static_peer) != bytes: + raise ValueError(f"Invalid static peer destination hash: {static_peer}") + else: + if len(static_peer) != RNS.Reticulum.TRUNCATED_HASHLENGTH//8: + raise ValueError(f"Invalid static peer destination hash: {static_peer}") + + self.static_peers = static_peers + self.peers = {} self.propagation_entries = {} @@ -245,8 +270,9 @@ class LXMRouter: def announce_propagation_node(self): def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) + node_state = self.propagation_node and not self.from_static_only announce_data = [ - self.propagation_node, # Boolean flag signalling propagation node state + node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes ] @@ -485,6 +511,11 @@ class LXMRouter: serialised_peer = serialised_peers.pop() peer = LXMPeer.from_bytes(serialised_peer, self) del serialised_peer + if peer.destination_hash in self.static_peers and peer.last_heard == 0: + # TODO: Allow path request responses through announce handler + # momentarily here, so peering config can be updated even if + # the static peer is not available to directly send an announce. + RNS.Transport.request_path(peer.destination_hash) if peer.identity != None: self.peers[peer.destination_hash] = peer lim_str = ", no transfer limit" @@ -497,6 +528,17 @@ class LXMRouter: del serialised_peers + if len(self.static_peers) > 0: + for static_peer in self.static_peers: + if not static_peer in self.peers: + RNS.log(f"Activating static peering with {RNS.prettyhexrep(static_peer)}", RNS.LOG_NOTICE) + self.peers[static_peer] = LXMPeer(self, static_peer) + if self.peers[static_peer].last_heard == 0: + # TODO: Allow path request responses through announce handler + # momentarily here, so peering config can be updated even if + # the static peer is not available to directly send an announce. + RNS.Transport.request_path(static_peer) + RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) self.propagation_node = True @@ -643,6 +685,11 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: self.sync_peers() + # def syncstats(self): + # for peer_id in self.peers: + # p = self.peers[peer_id] + # RNS.log(f"{RNS.prettyhexrep(peer_id)} O={p.offered} S={p.outgoing} I={p.incoming} TX={RNS.prettysize(p.tx_bytes)} RX={RNS.prettysize(p.rx_bytes)}") + def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual @@ -1070,7 +1117,7 @@ class LXMRouter: self.flush_queues() if self.propagation_node: try: - st = time.time(); RNS.log("Saving peer synchronisation states to storage...", RNS.LOG_NOTICE) + st = time.time(); RNS.log(f"Saving {len(self.peers)} peer synchronisation states to storage...", RNS.LOG_NOTICE) serialised_peers = [] peer_dict = self.peers.copy() for peer_id in peer_dict: @@ -1081,7 +1128,7 @@ class LXMRouter: peers_file.write(msgpack.packb(serialised_peers)) peers_file.close() - RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettyshorttime(time.time()-st)}", RNS.LOG_NOTICE) except Exception as e: RNS.log("Could not save propagation node peers to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1605,14 +1652,18 @@ class LXMRouter: peer.peering_timebase = timestamp peer.last_heard = time.time() peer.propagation_transfer_limit = propagation_transfer_limit + RNS.log(f"Peering config updated for {RNS.prettyhexrep(destination_hash)}", RNS.LOG_VERBOSE) else: - peer = LXMPeer(self, destination_hash) - peer.alive = True - peer.last_heard = time.time() - peer.propagation_transfer_limit = propagation_transfer_limit - self.peers[destination_hash] = peer - RNS.log("Peered with "+str(peer.destination)) + if len(self.peers) < self.max_peers: + peer = LXMPeer(self, destination_hash) + peer.alive = True + peer.last_heard = time.time() + peer.propagation_transfer_limit = propagation_transfer_limit + self.peers[destination_hash] = peer + RNS.log(f"Peered with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_NOTICE) + else: + RNS.log(f"Max peers reached, not peering with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_DEBUG) def unpeer(self, destination_hash, timestamp = None): if timestamp == None: @@ -1633,7 +1684,8 @@ class LXMRouter: for peer_id in peers: peer = peers[peer_id] if time.time() > peer.last_heard + LXMPeer.MAX_UNREACHABLE: - culled_peers.append(peer_id) + if not peer_id in self.static_peers: + culled_peers.append(peer_id) else: if peer.state == LXMPeer.IDLE and len(peer.unhandled_messages) > 0: if peer.alive: @@ -1693,10 +1745,23 @@ class LXMRouter: self.active_propagation_links.append(link) def propagation_resource_advertised(self, resource): + if self.from_static_only: + remote_identity = resource.link.get_remote_identity() + if remote_identity == None: + RNS.log(f"Rejecting propagation resource from unidentified peer", RNS.LOG_DEBUG) + return False + else: + remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) + if not remote_hash in self.static_peers: + RNS.log(f"Rejecting propagation resource from {remote_str} not in static peers list", RNS.LOG_DEBUG) + return False + size = resource.get_data_size() limit = self.propagation_per_transfer_limit*1000 if limit != None and size > limit: - RNS.log("Rejecting "+RNS.prettysize(size)+" incoming LXMF propagation resource, since it exceeds the limit of "+RNS.prettysize(limit), RNS.LOG_DEBUG) + RNS.log(f"Rejecting {RNS.prettysize(size)} incoming propagation resource, since it exceeds the limit of {RNS.prettysize(limit)}", RNS.LOG_DEBUG) return False else: return True @@ -1723,6 +1788,14 @@ class LXMRouter: if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY else: + if self.from_static_only: + remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) + if not remote_hash in self.static_peers: + RNS.log(f"Rejecting propagation request from {remote_str} not in static peers list", RNS.LOG_DEBUG) + return LXMPeer.ERROR_NO_ACCESS + try: transient_ids = data wanted_ids = [] @@ -1745,7 +1818,6 @@ class LXMRouter: return None def propagation_resource_concluded(self, resource): - RNS.log("Transfer concluded for incoming propagation resource "+str(resource), RNS.LOG_DEBUG) if resource.status == RNS.Resource.COMPLETE: # TODO: The peer this was received from should # have the transient id added to its list of @@ -1757,22 +1829,29 @@ class LXMRouter: # This is a series of propagation messages from a peer or originator remote_timebase = data[0] remote_hash = None + remote_str = "unknown peer" remote_identity = resource.link.get_remote_identity() if remote_identity != None: remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) if not remote_hash in self.peers: if self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: self.peer(remote_hash, remote_timebase) + else: + remote_str = f"peer {remote_str}" messages = data[1] + RNS.log(f"Received {len(messages)} messages from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) if remote_hash != None and remote_hash in self.peers: peer = self.peers[remote_hash] + peer.incoming += 1 + peer.rx_bytes += len(lxmf_data) self.lxmf_propagation(lxmf_data, from_peer=peer) if peer != None: @@ -1837,7 +1916,7 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) + RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_EXTREME) self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] self.enqueue_peer_distribution(transient_id, from_peer) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 38e71b1..0c87a73 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -140,6 +140,24 @@ def apply_config(): else: active_configuration["prioritised_lxmf_destinations"] = [] + if "propagation" in lxmd_config and "static_peers" in lxmd_config["propagation"]: + static_peers = lxmd_config["propagation"].as_list("static_peers") + active_configuration["static_peers"] = [] + for static_peer in static_peers: + active_configuration["static_peers"].append(bytes.fromhex(static_peer)) + else: + active_configuration["static_peers"] = [] + + if "propagation" in lxmd_config and "max_peers" in lxmd_config["propagation"]: + active_configuration["max_peers"] = lxmd_config["propagation"].as_int("max_peers") + else: + active_configuration["max_peers"] = None + + if "propagation" in lxmd_config and "from_static_only" in lxmd_config["propagation"]: + active_configuration["from_static_only"] = lxmd_config["propagation"].as_bool("from_static_only") + else: + active_configuration["from_static_only"] = False + # Load various settings if "logging" in lxmd_config and "loglevel" in lxmd_config["logging"]: targetloglevel = lxmd_config["logging"].as_int("loglevel") @@ -305,7 +323,10 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo autopeer_maxdepth = active_configuration["autopeer_maxdepth"], propagation_limit = active_configuration["propagation_transfer_max_accepted_size"], delivery_limit = active_configuration["delivery_transfer_max_accepted_size"], - ) + max_peers = active_configuration["max_peers"], + static_peers = active_configuration["static_peers"], + from_static_only = active_configuration["from_static_only"]) + message_router.register_delivery_callback(lxmf_delivery) for destination_hash in active_configuration["ignored_lxmf_destinations"]: @@ -362,13 +383,13 @@ def jobs(): try: if "peer_announce_interval" in active_configuration and active_configuration["peer_announce_interval"] != None: if time.time() > last_peer_announce + active_configuration["peer_announce_interval"]: - RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_EXTREME) + RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_VERBOSE) message_router.announce(lxmf_destination.hash) last_peer_announce = time.time() if "node_announce_interval" in active_configuration and active_configuration["node_announce_interval"] != None: if time.time() > last_node_announce + active_configuration["node_announce_interval"]: - RNS.log("Sending announce for LXMF Propagation Node", RNS.LOG_EXTREME) + RNS.log("Sending announce for LXMF Propagation Node", RNS.LOG_VERBOSE) message_router.announce_propagation_node() last_node_announce = time.time() @@ -381,7 +402,7 @@ def deferred_start_jobs(): global active_configuration, last_peer_announce, last_node_announce global message_router, lxmf_destination time.sleep(DEFFERED_JOBS_DELAY) - RNS.log("Running deferred start jobs") + RNS.log("Running deferred start jobs", RNS.LOG_DEBUG) if active_configuration["peer_announce_at_start"]: RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_EXTREME) message_router.announce(lxmf_destination.hash) From 68257a441ff1029054378185b09f4b61020e9d3e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 22 Jan 2025 09:44:03 +0100 Subject: [PATCH 009/123] Set transfer limit on reverse auto-peer --- LXMF/LXMRouter.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index bfe863d..5465356 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1839,7 +1839,12 @@ class LXMRouter: if not remote_hash in self.peers: if self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: - self.peer(remote_hash, remote_timebase) + # TODO: Query cache for an announce and get propagation + # transfer limit from that. For now, initialise it to a + # sane default value, and wait for an announce to arrive + # that will update the peering config to the actual limit. + propagation_transfer_limit = LXMRouter.PROPAGATION_LIMIT//4 + self.peer(remote_hash, remote_timebase, propagation_transfer_limit) else: remote_str = f"peer {remote_str}" From 61b1ecce276631a4ec2c1165c33b5195e46e946d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 22 Jan 2025 10:10:57 +0100 Subject: [PATCH 010/123] Updated readme --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index faced95..ed7e4f0 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,7 @@ User-facing clients built on LXMF include: Community-provided tools and utilities for LXMF include: +- [LXMFy](https://lxmfy.quad4.io/) - [LXMF-Bot](https://github.com/randogoth/lxmf-bot) - [LXMF Messageboard](https://github.com/chengtripp/lxmf_messageboard) - [LXMEvent](https://github.com/faragher/LXMEvent) From 2c71cea7a0d2fc0a3ab5bbd26883befb5a0dd9fc Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 14:13:08 +0100 Subject: [PATCH 011/123] Added local node stats request handler --- LXMF/LXMRouter.py | 134 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 131 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 5465356..22ef3ac 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -64,6 +64,8 @@ class LXMRouter: PR_ALL_MESSAGES = 0x00 + STATS_GET_PATH = "/pn/get/stats" + ### Developer-facing API ############################## ####################################################### @@ -92,6 +94,7 @@ class LXMRouter: self.processing_count = 0 self.propagation_node = False + self.propagation_node_start_time = None if storagepath == None: raise ValueError("LXMF cannot be initialised without a storage path") @@ -135,6 +138,11 @@ class LXMRouter: self.identity = identity self.propagation_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation") + self.control_destination = None + self.client_propagation_messages_received = 0 + self.client_propagation_messages_served = 0 + self.unpeered_propagation_incoming = 0 + self.unpeered_propagation_rx_bytes = 0 if autopeer != None: self.autopeer = autopeer @@ -541,13 +549,35 @@ class LXMRouter: RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + try: + if os.path.isfile(self.storagepath+"/node_stats"): + node_stats_file = open(self.storagepath+"/node_stats", "rb") + data = node_stats_file.read() + node_stats_file.close() + node_stats = msgpack.unpackb(data) + + if not type(node_stats) == dict: + RNS.log("Invalid data format for loaded local node stats, node stats will be reset", RNS.LOG_ERROR) + else: + self.client_propagation_messages_received = node_stats["client_propagation_messages_received"] + self.client_propagation_messages_served = node_stats["client_propagation_messages_served"] + self.unpeered_propagation_incoming = node_stats["unpeered_propagation_incoming"] + self.unpeered_propagation_rx_bytes = node_stats["unpeered_propagation_rx_bytes"] + + except Exception as e: + RNS.log("Could not load local node stats. The contained exception was: "+str(e), RNS.LOG_ERROR) + self.propagation_node = True + self.propagation_node_start_time = time.time() self.propagation_destination.set_link_established_callback(self.propagation_link_established) self.propagation_destination.set_packet_callback(self.propagation_packet) self.propagation_destination.register_request_handler(LXMPeer.OFFER_REQUEST_PATH, self.offer_request, allow = RNS.Destination.ALLOW_ALL) self.propagation_destination.register_request_handler(LXMPeer.MESSAGE_GET_PATH, self.message_get_request, allow = RNS.Destination.ALLOW_ALL) + self.control_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + self.control_destination.register_request_handler(LXMRouter.STATS_GET_PATH, self.stats_get_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=[self.identity.hash]) + if self.message_storage_limit != None: limit_str = ", limit is "+RNS.prettysize(self.message_storage_limit) else: @@ -650,6 +680,76 @@ class LXMRouter: return False + ### Propagation Node Control ########################## + ####################################################### + + def compile_stats(self): + if not self.propagation_node: + return None + else: + peer_stats = {} + for peer_id in self.peers.copy(): + peer = self.peers[peer_id] + peer_stats[peer_id] = { + "type": "static" if peer_id in self.static_peers else "discovered", + "state": peer.state, + "alive": peer.alive, + "last_heard": int(peer.last_heard), + "next_sync_attempt": peer.next_sync_attempt, + "last_sync_attempt": peer.last_sync_attempt, + "sync_backoff": peer.sync_backoff, + "peering_timebase": peer.peering_timebase, + "ler": int(peer.link_establishment_rate), + "str": int(peer.sync_transfer_rate), + "transfer_limit": peer.propagation_transfer_limit, + "network_distance": RNS.Transport.hops_to(peer_id), + "rx_bytes": peer.rx_bytes, + "tx_bytes": peer.tx_bytes, + "messages": { + "offered": peer.offered, + "outgoing": peer.outgoing, + "incoming": peer.incoming, + }, + } + + node_stats = { + "identity_hash": self.identity.hash, + "destination_hash": self.propagation_destination.hash, + "uptime": time.time()-self.propagation_node_start_time, + "delivery_limit": self.delivery_per_transfer_limit, + "propagation_limit": self.propagation_per_transfer_limit, + "autopeer_maxdepth": self.autopeer_maxdepth, + "from_static_only": self.from_static_only, + "messagestore": { + "count": len(self.propagation_entries), + "bytes": self.message_storage_size(), + "limit": self.message_storage_limit, + }, + "clients" : { + "client_propagation_messages_received": self.client_propagation_messages_received, + "client_propagation_messages_served": self.client_propagation_messages_served, + }, + "unpeered_propagation_incoming": self.unpeered_propagation_incoming, + "unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes, + "static_peers": len(self.static_peers), + "discovered_peers": len(self.peers)-len(self.static_peers), + "total_peers": len(self.peers), + "max_peers": self.max_peers, + "peers": peer_stats, + } + + return node_stats + + def stats_get_request(self, path, data, request_id, remote_identity, requested_at): + RNS.log("Stats request", RNS.LOG_DEBUG) # TODO: Remove debug + if remote_identity == None: + return LXMPeer.ERROR_NO_IDENTITY + elif remote_identity.hash != self.identity.hash: + return LXMPeer.ERROR_NO_ACCESS + else: + return self.compile_stats() + + ### Utility & Maintenance ############################# ####################################################### @@ -970,7 +1070,7 @@ class LXMRouter: try: if len(self.locally_delivered_transient_ids) > 0: if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + os.makedirs(self.storagepath) with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) @@ -982,7 +1082,7 @@ class LXMRouter: try: if len(self.locally_processed_transient_ids) > 0: if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + os.makedirs(self.storagepath) with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) @@ -990,6 +1090,24 @@ class LXMRouter: except Exception as e: RNS.log("Could not save locally processed transient ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + def save_node_stats(self): + try: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) + + with open(self.storagepath+"/node_stats", "wb") as stats_file: + node_stats = { + "client_propagation_messages_received": self.client_propagation_messages_received, + "client_propagation_messages_served": self.client_propagation_messages_served, + "unpeered_propagation_incoming": self.unpeered_propagation_incoming, + "unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes, + } + stats_file.write(msgpack.packb(node_stats)) + + except Exception as e: + RNS.log("Could not save local node stats to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + + def clean_outbound_stamp_costs(self): try: expired = [] @@ -1106,6 +1224,7 @@ class LXMRouter: self.propagation_destination.set_packet_callback(None) self.propagation_destination.deregister_request_handler(LXMPeer.OFFER_REQUEST_PATH) self.propagation_destination.deregister_request_handler(LXMPeer.MESSAGE_GET_PATH) + self.propagation_destination.deregister_request_handler(LXMRouter.STATS_GET_PATH) for link in self.active_propagation_links: try: if link.status == RNS.Link.ACTIVE: @@ -1135,6 +1254,7 @@ class LXMRouter: self.save_locally_delivered_transient_ids() self.save_locally_processed_transient_ids() + self.save_node_stats() def sigint_handler(self, signal, frame): if not self.exit_handler_running: @@ -1263,6 +1383,7 @@ class LXMRouter: except Exception as e: RNS.log("Error while processing message download request from "+RNS.prettyhexrep(remote_destination.hash)+". The contained exception was: "+str(e), RNS.LOG_ERROR) + self.client_propagation_messages_served += len(response_messages) return response_messages @@ -1777,6 +1898,7 @@ class LXMRouter: messages = data[1] for lxmf_data in messages: self.lxmf_propagation(lxmf_data) + self.client_propagation_messages_received += 1 packet.prove() @@ -1849,7 +1971,7 @@ class LXMRouter: remote_str = f"peer {remote_str}" messages = data[1] - RNS.log(f"Received {len(messages)} messages from {remote_str}", RNS.LOG_VERBOSE) + RNS.log(f"Received {len(messages)} message{"" if len(messages) == 1 else "s"} from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) @@ -1857,6 +1979,12 @@ class LXMRouter: peer = self.peers[remote_hash] peer.incoming += 1 peer.rx_bytes += len(lxmf_data) + else: + if remote_identity != None: + self.unpeered_propagation_incoming += 1 + self.unpeered_propagation_rx_bytes += len(lxmf_data) + else: + self.client_propagation_messages_received += 1 self.lxmf_propagation(lxmf_data, from_peer=peer) if peer != None: From f683e038910e45cf9be83b4dc01465ce8c8877ff Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 14:15:12 +0100 Subject: [PATCH 012/123] Added lxmd status getter --- LXMF/Utilities/lxmd.py | 96 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 9 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 0c87a73..1bc1d12 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -35,6 +35,7 @@ import time import os from LXMF._version import __version__ +from LXMF import APP_NAME from RNS.vendor.configobj import ConfigObj @@ -415,6 +416,75 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() +def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5): + global configpath, identitypath, storagedir, lxmdir + global lxmd_config, active_configuration, targetloglevel + targetlogdest = RNS.LOG_STDOUT + + if configdir == None: + if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): + configdir = "/etc/lxmd" + elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): + configdir = RNS.Reticulum.userdir+"/.config/lxmd" + else: + configdir = RNS.Reticulum.userdir+"/.lxmd" + + configpath = configdir+"/config" + identitypath = configdir+"/identity" + identity = None + + if not os.path.isdir(configdir): + RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) + exit(201) + if not os.path.isfile(identitypath): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identitypath) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) + exit(4) + + if targetloglevel == None: + targetloglevel = 3 + if verbosity != 0 or quietness != 0: + targetloglevel = targetloglevel+verbosity-quietness + + reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) + control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) + exit(200) + else: + time.sleep(0.1) + + if not RNS.Transport.has_path(control_destination.hash): + RNS.Transport.request_path(control_destination.hash) + while not RNS.Transport.has_path(control_destination.hash): + check_timeout() + + link = RNS.Link(control_destination) + while not link.status == RNS.Link.ACTIVE: + check_timeout() + + link.identify(identity) + request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) + while not request_receipt.get_status() == RNS.RequestReceipt.READY: + check_timeout() + + response = request_receipt.get_response() + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: + RNS.log("Remote received no identity") + exit(203) + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_ACCESS: + RNS.log("Access denied") + exit(204) + else: + # TODO: Output stats + def main(): try: parser = argparse.ArgumentParser(description="Lightweight Extensible Messaging Daemon") @@ -425,6 +495,8 @@ def main(): parser.add_argument("-v", "--verbose", action="count", default=0) parser.add_argument("-q", "--quiet", action="count", default=0) parser.add_argument("-s", "--service", action="store_true", default=False, help="lxmd is running as a service and should log to file") + parser.add_argument("--status", action="store_true", default=False, help="display node status") + parser.add_argument("--timeout", action="store", default=5, help="timeout in seconds for query operations", type=float) parser.add_argument("--exampleconfig", action="store_true", default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version="lxmd {version}".format(version=__version__)) @@ -434,15 +506,21 @@ def main(): print(__default_lxmd_config__) exit() - program_setup( - configdir = args.config, - rnsconfigdir=args.rnsconfig, - run_pn=args.propagation_node, - on_inbound=args.on_inbound, - verbosity=args.verbose, - quietness=args.quiet, - service=args.service - ) + if args.status: + get_status(configdir = args.config, + rnsconfigdir=args.rnsconfig, + verbosity=args.verbose, + quietness=args.quiet, + timeout=args.timeout) + exit() + + program_setup(configdir = args.config, + rnsconfigdir=args.rnsconfig, + run_pn=args.propagation_node, + on_inbound=args.on_inbound, + verbosity=args.verbose, + quietness=args.quiet, + service=args.service) except KeyboardInterrupt: print("") From 460645cea2abc0a72b8f5d6444184286c4c676e8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 14:15:31 +0100 Subject: [PATCH 013/123] Added lxmd status getter --- LXMF/Utilities/lxmd.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 1bc1d12..d8b24d3 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -484,6 +484,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = exit(204) else: # TODO: Output stats + pass def main(): try: From e3be7e0cfdb529dece6e51165b67f697c70724b3 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:27:01 +0100 Subject: [PATCH 014/123] Persist last sync attempt --- LXMF/LXMPeer.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index ec0cfe2..61602c3 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -91,6 +91,11 @@ class LXMPeer: peer.tx_bytes = dictionary["tx_bytes"] else: peer.tx_bytes = 0 + + if "last_sync_attempt" in dictionary: + peer.last_sync_attempt = dictionary["last_sync_attempt"] + else: + peer.last_sync_attempt = 0 hm_count = 0 for transient_id in dictionary["handled_ids"]: @@ -121,6 +126,7 @@ class LXMPeer: dictionary["link_establishment_rate"] = self.link_establishment_rate dictionary["sync_transfer_rate"] = self.sync_transfer_rate dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit + dictionary["last_sync_attempt"] = self.last_sync_attempt dictionary["offered"] = self.offered dictionary["outgoing"] = self.outgoing dictionary["incoming"] = self.incoming From a198e96064fa47af3f8e1dc8db225fbb39f77f80 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:27:23 +0100 Subject: [PATCH 015/123] Include unhandled message count in stats --- LXMF/LXMRouter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 22ef3ac..8e824e4 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -709,6 +709,7 @@ class LXMRouter: "offered": peer.offered, "outgoing": peer.outgoing, "incoming": peer.incoming, + "unhandled": peer.unhandled_message_count }, } From 35dd70c59e09a7fa093f7e24b60065317e7507c9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:27:48 +0100 Subject: [PATCH 016/123] Format status and peers output --- LXMF/Utilities/lxmd.py | 140 +++++++++++++++++++++++++++++++++-------- 1 file changed, 115 insertions(+), 25 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index d8b24d3..dd070fc 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -416,34 +416,45 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() -def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5): +def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): global configpath, identitypath, storagedir, lxmdir global lxmd_config, active_configuration, targetloglevel targetlogdest = RNS.LOG_STDOUT - if configdir == None: - if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): - configdir = "/etc/lxmd" - elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): - configdir = RNS.Reticulum.userdir+"/.config/lxmd" + if identity_path == None: + if configdir == None: + if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): + configdir = "/etc/lxmd" + elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): + configdir = RNS.Reticulum.userdir+"/.config/lxmd" + else: + configdir = RNS.Reticulum.userdir+"/.lxmd" + + configpath = configdir+"/config" + identitypath = configdir+"/identity" + identity = None + + if not os.path.isdir(configdir): + RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) + exit(201) + if not os.path.isfile(identitypath): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) else: - configdir = RNS.Reticulum.userdir+"/.lxmd" + identity = RNS.Identity.from_file(identitypath) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) + exit(4) - configpath = configdir+"/config" - identitypath = configdir+"/identity" - identity = None - - if not os.path.isdir(configdir): - RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) - exit(201) - if not os.path.isfile(identitypath): - RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) - exit(202) else: - identity = RNS.Identity.from_file(identitypath) - if identity == None: - RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) - exit(4) + if not os.path.isfile(identity_path): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identity_path) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identity_path, RNS.LOG_ERROR) + exit(4) if targetloglevel == None: targetloglevel = 3 @@ -483,8 +494,82 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = RNS.log("Access denied") exit(204) else: - # TODO: Output stats - pass + s = response + ms_util = f"{round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2)}%" + if s["from_static_only"]: + who_str = "static peers only" + else: + who_str = "all nodes" + + available_peers = 0 + unreachable_peers = 0 + peered_incoming = 0 + peered_outgoing = 0 + peered_rx_bytes = 0 + peered_tx_bytes = 0 + for peer_id in s["peers"]: + p = s["peers"][peer_id] + pm = p["messages"] + peered_incoming += pm["incoming"] + peered_outgoing += pm["outgoing"] + peered_rx_bytes += p["rx_bytes"] + peered_tx_bytes += p["tx_bytes"] + if p["alive"]: + available_peers += 1 + else: + unreachable_peers += 1 + + total_incoming = peered_incoming+s["unpeered_propagation_incoming"]+s["clients"]["client_propagation_messages_received"] + total_rx_bytes = peered_rx_bytes+s["unpeered_propagation_rx_bytes"] + df = round(peered_outgoing/total_incoming, 2) + + print(f"\nLXMF Propagation Node running on {RNS.prettyhexrep(s["destination_hash"])}, uptime is {RNS.prettytime(s["uptime"])}") + + if show_status: + print(f"Messagestore contains {s["messagestore"]["count"]} messages, {RNS.prettysize(s["messagestore"]["bytes"])} of {RNS.prettysize(s["messagestore"]["limit"])} ({ms_util} utilised)") + print(f"Accepting propagated messages from {who_str}, {RNS.prettysize(s["propagation_limit"]*1000)} per-transfer limit") + print(f"") + print(f"Peers : {s["total_peers"]} total (peer limit is {s["max_peers"]})") + print(f" {s["discovered_peers"]} discovered, {s["static_peers"]} static") + print(f" {available_peers} available, {unreachable_peers} unreachable") + print(f"") + print(f"Traffic : {s["unpeered_propagation_incoming"]} messages received from unpeered nodes ({RNS.prettysize(s["unpeered_propagation_rx_bytes"])})") + print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") + print(f" {total_incoming} messages received from peered nodes ({RNS.prettysize(total_rx_bytes)})") + print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") + print(f" {s["clients"]["client_propagation_messages_received"]} messages received from clients") + print(f" {s["clients"]["client_propagation_messages_served"]} messages served to clients") + print(f" Distribution factor is {df}") + print(f"") + + if show_peers: + for peer_id in s["peers"]: + ind = " " + p = s["peers"][peer_id] + if p["type"] == "static": + t = "Static peer " + elif p["type"] == "discovered": + t = "Discovered peer " + else: + t = "Unknown peer " + a = "Available" if p["alive"] == True else "Unreachable" + h = max(time.time()-p["last_heard"], 0) + hops = p["network_distance"] + hs = f"{hops} hop away" if hops == 1 else f"{hops} hops away" + pm = p["messages"] + if p["last_sync_attempt"] != 0: + ls = f"last synced {RNS.prettytime(max(time.time()-p["last_sync_attempt"], 0))} ago" + else: + ls = "never synced" + + print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") + print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") + print(f"{ind*2}Speeds : {RNS.prettyspeed(p["str"])} STR, {RNS.prettyspeed(p["ler"])} LER, {RNS.prettysize(p["transfer_limit"]*1000)} transfer limit") + print(f"{ind*2}Messages : {pm["offered"]} offered, {pm["outgoing"]} outgoing, {pm["incoming"]} incoming") + print(f"{ind*2}Traffic : {RNS.prettysize(p["rx_bytes"])} received, {RNS.prettysize(p["tx_bytes"])} sent") + print(f"{ind*2}Sync state : {pm["unhandled"]} unhandled message{"" if pm["unhandled"] == 1 else "s"}, {ls}") + print("") + def main(): try: @@ -497,7 +582,9 @@ def main(): parser.add_argument("-q", "--quiet", action="count", default=0) parser.add_argument("-s", "--service", action="store_true", default=False, help="lxmd is running as a service and should log to file") parser.add_argument("--status", action="store_true", default=False, help="display node status") + parser.add_argument("--peers", action="store_true", default=False, help="display peered nodes") parser.add_argument("--timeout", action="store", default=5, help="timeout in seconds for query operations", type=float) + parser.add_argument("--identity", action="store", default=None, help="path to identity used for query request", type=str) parser.add_argument("--exampleconfig", action="store_true", default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version="lxmd {version}".format(version=__version__)) @@ -507,12 +594,15 @@ def main(): print(__default_lxmd_config__) exit() - if args.status: + if args.status or args.peers: get_status(configdir = args.config, rnsconfigdir=args.rnsconfig, verbosity=args.verbose, quietness=args.quiet, - timeout=args.timeout) + timeout=args.timeout, + show_status=args.status, + show_peers=args.peers, + identity_path=args.identity) exit() program_setup(configdir = args.config, From a87458d25f794d84c68f0c4212fedc91bcd7e7fb Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:28:11 +0100 Subject: [PATCH 017/123] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 43c4ab0..22049ab 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.1" +__version__ = "0.6.2" From fe59b265c50ce87ca33e183b2b154b3eaaf163c0 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:54:12 +0100 Subject: [PATCH 018/123] Fixed fstrings not working on Python < 3.12 --- LXMF/LXMRouter.py | 3 ++- LXMF/Utilities/lxmd.py | 40 ++++++++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 8e824e4..0358428 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1972,7 +1972,8 @@ class LXMRouter: remote_str = f"peer {remote_str}" messages = data[1] - RNS.log(f"Received {len(messages)} message{"" if len(messages) == 1 else "s"} from {remote_str}", RNS.LOG_VERBOSE) + ms = "" if len(messages) == 1 else "s" + RNS.log(f"Received {len(messages)} message{ms} from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index dd070fc..54e0b54 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -495,7 +495,8 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = exit(204) else: s = response - ms_util = f"{round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2)}%" + mutil = round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2) + ms_util = f"{mutil}%" if s["from_static_only"]: who_str = "static peers only" else: @@ -523,22 +524,28 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = total_rx_bytes = peered_rx_bytes+s["unpeered_propagation_rx_bytes"] df = round(peered_outgoing/total_incoming, 2) - print(f"\nLXMF Propagation Node running on {RNS.prettyhexrep(s["destination_hash"])}, uptime is {RNS.prettytime(s["uptime"])}") + dhs = RNS.prettyhexrep(s["destination_hash"]); uts = RNS.prettytime(s["uptime"]) + print(f"\nLXMF Propagation Node running on {dhs}, uptime is {uts}") if show_status: - print(f"Messagestore contains {s["messagestore"]["count"]} messages, {RNS.prettysize(s["messagestore"]["bytes"])} of {RNS.prettysize(s["messagestore"]["limit"])} ({ms_util} utilised)") - print(f"Accepting propagated messages from {who_str}, {RNS.prettysize(s["propagation_limit"]*1000)} per-transfer limit") + msb = RNS.prettysize(s["messagestore"]["bytes"]); msl = RNS.prettysize(s["messagestore"]["limit"]) + ptl = RNS.prettysize(s["propagation_limit"]*1000); uprx = RNS.prettysize(s["unpeered_propagation_rx_bytes"]) + mscnt = s["messagestore"]["count"]; stp = s["total_peers"]; smp = s["max_peers"]; sdp = s["discovered_peers"] + ssp = s["static_peers"]; cprr = s["clients"]["client_propagation_messages_received"] + cprs = s["clients"]["client_propagation_messages_served"]; upi = s["unpeered_propagation_incoming"] + print(f"Messagestore contains {mscnt} messages, {msb} ({ms_util} utilised of {msl})") + print(f"Accepting propagated messages from {who_str}, {ptl} per-transfer limit") print(f"") - print(f"Peers : {s["total_peers"]} total (peer limit is {s["max_peers"]})") - print(f" {s["discovered_peers"]} discovered, {s["static_peers"]} static") + print(f"Peers : {stp} total (peer limit is {smp})") + print(f" {sdp} discovered, {ssp} static") print(f" {available_peers} available, {unreachable_peers} unreachable") print(f"") - print(f"Traffic : {s["unpeered_propagation_incoming"]} messages received from unpeered nodes ({RNS.prettysize(s["unpeered_propagation_rx_bytes"])})") + print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") print(f" {total_incoming} messages received from peered nodes ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") - print(f" {s["clients"]["client_propagation_messages_received"]} messages received from clients") - print(f" {s["clients"]["client_propagation_messages_served"]} messages served to clients") + print(f" {cprr} messages received from clients") + print(f" {cprs} messages served to clients") print(f" Distribution factor is {df}") print(f"") @@ -558,16 +565,21 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = hs = f"{hops} hop away" if hops == 1 else f"{hops} hops away" pm = p["messages"] if p["last_sync_attempt"] != 0: - ls = f"last synced {RNS.prettytime(max(time.time()-p["last_sync_attempt"], 0))} ago" + lsa = p["last_sync_attempt"] + ls = f"last synced {RNS.prettytime(max(time.time()-lsa, 0))} ago" else: ls = "never synced" + sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]); stl = RNS.prettysize(p["transfer_limit"]*1000) + srxb = RNS.prettysize(p["rx_bytes"]); stxb = RNS.prettysize(p["tx_bytes"]); pmo = pm["offered"]; pmout = pm["outgoing"] + pmi = pm["incoming"]; pmuh = pm["unhandled"] print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") - print(f"{ind*2}Speeds : {RNS.prettyspeed(p["str"])} STR, {RNS.prettyspeed(p["ler"])} LER, {RNS.prettysize(p["transfer_limit"]*1000)} transfer limit") - print(f"{ind*2}Messages : {pm["offered"]} offered, {pm["outgoing"]} outgoing, {pm["incoming"]} incoming") - print(f"{ind*2}Traffic : {RNS.prettysize(p["rx_bytes"])} received, {RNS.prettysize(p["tx_bytes"])} sent") - print(f"{ind*2}Sync state : {pm["unhandled"]} unhandled message{"" if pm["unhandled"] == 1 else "s"}, {ls}") + print(f"{ind*2}Speeds : {sstr} STR, {sler} LER, {stl} transfer limit") + print(f"{ind*2}Messages : {pmo} offered, {pmout} outgoing, {pmi} incoming") + print(f"{ind*2}Traffic : {srxb} received, {stxb} sent") + ms = "" if pm["unhandled"] == 1 else "s" + print(f"{ind*2}Sync state : {pmuh} unhandled message{ms}, {ls}") print("") From 70186cf8d9fc780eba6ce39494964c31b2519a57 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:07:20 +0100 Subject: [PATCH 019/123] Fixed typo --- LXMF/Utilities/lxmd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 54e0b54..bb29661 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -542,7 +542,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"") print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") - print(f" {total_incoming} messages received from peered nodes ({RNS.prettysize(total_rx_bytes)})") + print(f" {total_incoming} messages received from all nodes ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") print(f" {cprr} messages received from clients") print(f" {cprs} messages served to clients") From a3e3868f9258ed1f0b930e85a8993234440d448d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:09:40 +0100 Subject: [PATCH 020/123] Changed formatting --- LXMF/Utilities/lxmd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index bb29661..415ecb6 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -542,7 +542,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"") print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") - print(f" {total_incoming} messages received from all nodes ({RNS.prettysize(total_rx_bytes)})") + print(f" {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") print(f" {cprr} messages received from clients") print(f" {cprs} messages served to clients") From fb4bf9b0b9307cb872e97619a685e8adf44a467e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:36:30 +0100 Subject: [PATCH 021/123] Cleanup --- LXMF/LXMRouter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 0358428..bd63e17 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -742,7 +742,6 @@ class LXMRouter: return node_stats def stats_get_request(self, path, data, request_id, remote_identity, requested_at): - RNS.log("Stats request", RNS.LOG_DEBUG) # TODO: Remove debug if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY elif remote_identity.hash != self.identity.hash: From cdea838a6c38f0b9a4f7d983b48361565bbc835f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:43:24 +0100 Subject: [PATCH 022/123] Updated status output --- LXMF/Utilities/lxmd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 415ecb6..b52d5ae 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -544,8 +544,8 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") print(f" {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") - print(f" {cprr} messages received from clients") - print(f" {cprs} messages served to clients") + print(f" {cprr} propagation messages received diretly from clients") + print(f" {cprs} propagation messages served to clients") print(f" Distribution factor is {df}") print(f"") From aa57b16cf562d8f9409e877f959d9751f8c5b300 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 00:09:36 +0100 Subject: [PATCH 023/123] Fixed #23 --- LXMF/LXMRouter.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index bd63e17..b0a4cc8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -777,13 +777,16 @@ class LXMRouter: self.clean_transient_id_caches() if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: - self.clean_message_store() + if self.propagation_node == True: + self.clean_message_store() if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: - self.flush_queues() + if self.propagation_node == True: + self.flush_queues() if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: - self.sync_peers() + if self.propagation_node == True: + self.sync_peers() # def syncstats(self): # for peer_id in self.peers: @@ -986,12 +989,12 @@ class LXMRouter: lxm_size = self.propagation_entries[transient_id][3] return lxm_size - def clean_message_store(self): + RNS.log("Cleaning message store", RNS.LOG_VERBOSE) # Check and remove expired messages now = time.time() removed_entries = {} - for transient_id in self.propagation_entries: + for transient_id in self.propagation_entries.copy(): entry = self.propagation_entries[transient_id] filepath = entry[1] components = filepath.split("_") @@ -999,7 +1002,7 @@ class LXMRouter: if len(components) == 2 and float(components[1]) > 0 and len(os.path.split(components[0])[1]) == (RNS.Identity.HASHLENGTH//8)*2: timestamp = float(components[1]) if now > timestamp+LXMRouter.MESSAGE_EXPIRY: - RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_DEBUG) + RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_EXTREME) removed_entries[transient_id] = filepath else: RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to invalid file path", RNS.LOG_WARNING) @@ -1017,7 +1020,7 @@ class LXMRouter: RNS.log("Could not remove "+RNS.prettyhexrep(transient_id)+" from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) if removed_count > 0: - RNS.log("Cleaned "+str(removed_count)+" entries from the message store", RNS.LOG_DEBUG) + RNS.log("Cleaned "+str(removed_count)+" entries from the message store", RNS.LOG_VERBOSE) # Check size of message store and cull if needed try: @@ -1029,7 +1032,7 @@ class LXMRouter: bytes_cleaned = 0 weighted_entries = [] - for transient_id in self.propagation_entries: + for transient_id in self.propagation_entries.copy(): weighted_entries.append([ self.propagation_entries[transient_id], self.get_weight(transient_id), From a8cc5f41cf92a7e35b80bc2f6b55292ad4cf170d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 00:21:37 +0100 Subject: [PATCH 024/123] Fixed typo --- LXMF/Utilities/lxmd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index b52d5ae..2f21108 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -540,11 +540,11 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f" {sdp} discovered, {ssp} static") print(f" {available_peers} available, {unreachable_peers} unreachable") print(f"") - print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") + print(f"Traffic : {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") - print(f" {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") + print(f" {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") - print(f" {cprr} propagation messages received diretly from clients") + print(f" {cprr} propagation messages received directly from clients") print(f" {cprs} propagation messages served to clients") print(f" Distribution factor is {df}") print(f"") From 6d2eb4f97375dc2c637dd531d94a11738d2cb7ed Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 00:26:47 +0100 Subject: [PATCH 025/123] Updated default config --- LXMF/Utilities/lxmd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 2f21108..7f54835 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -127,7 +127,7 @@ def apply_config(): if active_configuration["message_storage_limit"] < 0.005: active_configuration["message_storage_limit"] = 0.005 else: - active_configuration["message_storage_limit"] = 2000 + active_configuration["message_storage_limit"] = 500 if "propagation" in lxmd_config and "propagation_transfer_max_accepted_size" in lxmd_config["propagation"]: active_configuration["propagation_transfer_max_accepted_size"] = lxmd_config["propagation"].as_float("propagation_transfer_max_accepted_size") @@ -679,9 +679,9 @@ propagation_transfer_max_accepted_size = 256 # LXMF prioritises keeping messages that are # new and small. Large and old messages will # be removed first. This setting is optional -# and defaults to 2 gigabytes. +# and defaults to 500 megabytes. -# message_storage_limit = 2000 +# message_storage_limit = 500 # You can tell the LXMF message router to # prioritise storage for one or more From 962d9c90d1c468e95589b15ccaf2384a379dea35 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 13:50:56 +0100 Subject: [PATCH 026/123] Added wanted inbound peers to PN announce data --- LXMF/Handlers.py | 9 +++++++-- LXMF/LXMRouter.py | 10 ++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 22c6cd3..ea8960d 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,6 +45,11 @@ class LXMFPropagationAnnounceHandler: if pn_announce_data_is_valid(data): node_timebase = data[1] propagation_transfer_limit = None + if len(data) >= 4: + try: + wanted_inbound_peers = int(data[3]) + except: + wanted_inbound_peers = None if len(data) >= 3: try: propagation_transfer_limit = float(data[2]) @@ -52,12 +57,12 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit = None if destination_hash in self.lxmrouter.static_peers: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) else: if data[0] == True: if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) elif data[0] == False: self.lxmrouter.unpeer(destination_hash, node_timebase) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index b0a4cc8..9eccedc 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -283,6 +283,7 @@ class LXMRouter: node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes + self.get_wanted_inbound_peers(), # How many more inbound peers this node wants ] data = msgpack.packb(announce_data) @@ -888,6 +889,10 @@ class LXMRouter: self.save_outbound_stamp_costs() threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() + def get_wanted_inbound_peers(self): + # TODO: Implement + return None + def get_announce_app_data(self, destination_hash): if destination_hash in self.delivery_destinations: delivery_destination = self.delivery_destinations[destination_hash] @@ -1766,7 +1771,7 @@ class LXMRouter: ### Peer Sync & Propagation ########################### ####################################################### - def peer(self, destination_hash, timestamp, propagation_transfer_limit): + def peer(self, destination_hash, timestamp, propagation_transfer_limit, wanted_inbound_peers = None): if destination_hash in self.peers: peer = self.peers[destination_hash] if timestamp > peer.peering_timebase: @@ -1969,7 +1974,8 @@ class LXMRouter: # sane default value, and wait for an announce to arrive # that will update the peering config to the actual limit. propagation_transfer_limit = LXMRouter.PROPAGATION_LIMIT//4 - self.peer(remote_hash, remote_timebase, propagation_transfer_limit) + wanted_inbound_peers = None + self.peer(remote_hash, remote_timebase, propagation_transfer_limit, wanted_inbound_peers) else: remote_str = f"peer {remote_str}" From cec903a4dcc878f14f8cd8be6a9abc54868cbea6 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 14:05:12 +0100 Subject: [PATCH 027/123] Added status query API function --- LXMF/Handlers.py | 1 + LXMF/LXMPeer.py | 1 + LXMF/Utilities/lxmd.py | 60 ++++++++++++++++++++++++------------------ 3 files changed, 37 insertions(+), 25 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index ea8960d..26a5df6 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,6 +45,7 @@ class LXMFPropagationAnnounceHandler: if pn_announce_data_is_valid(data): node_timebase = data[1] propagation_transfer_limit = None + wanted_inbound_peers = None if len(data) >= 4: try: wanted_inbound_peers = int(data[3]) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 61602c3..5036528 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -20,6 +20,7 @@ class LXMPeer: ERROR_NO_IDENTITY = 0xf0 ERROR_NO_ACCESS = 0xf1 + ERROR_TIMEOUT = 0xfe # Maximum amount of time a peer can # be unreachable before it is removed diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 7f54835..a06d60c 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -416,6 +416,36 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() +def query_status(identity, timeout=5, exit_on_fail=False): + control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) + if exit_on_fail: + exit(200) + else: + return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT + else: + time.sleep(0.1) + + if not RNS.Transport.has_path(control_destination.hash): + RNS.Transport.request_path(control_destination.hash) + while not RNS.Transport.has_path(control_destination.hash): + check_timeout() + + link = RNS.Link(control_destination) + while not link.status == RNS.Link.ACTIVE: + check_timeout() + + link.identify(identity) + request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) + while not request_receipt.get_status() == RNS.RequestReceipt.READY: + check_timeout() + + return request_receipt.get_response() + def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): global configpath, identitypath, storagedir, lxmdir global lxmd_config, active_configuration, targetloglevel @@ -462,31 +492,8 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = targetloglevel = targetloglevel+verbosity-quietness reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) - control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + response = query_status(identity, timeout=timeout, exit_on_fail=True) - timeout = time.time()+timeout - def check_timeout(): - if time.time() > timeout: - RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) - exit(200) - else: - time.sleep(0.1) - - if not RNS.Transport.has_path(control_destination.hash): - RNS.Transport.request_path(control_destination.hash) - while not RNS.Transport.has_path(control_destination.hash): - check_timeout() - - link = RNS.Link(control_destination) - while not link.status == RNS.Link.ACTIVE: - check_timeout() - - link.identify(identity) - request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) - while not request_receipt.get_status() == RNS.RequestReceipt.READY: - check_timeout() - - response = request_receipt.get_response() if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: RNS.log("Remote received no identity") exit(203) @@ -550,6 +557,9 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"") if show_peers: + if not show_status: + print("") + for peer_id in s["peers"]: ind = " " p = s["peers"][peer_id] @@ -562,7 +572,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = a = "Available" if p["alive"] == True else "Unreachable" h = max(time.time()-p["last_heard"], 0) hops = p["network_distance"] - hs = f"{hops} hop away" if hops == 1 else f"{hops} hops away" + hs = "hops unknown" if hops == RNS.Transport.PATHFINDER_M else f"{hops} hop away" if hops == 1 else f"{hops} hops away" pm = p["messages"] if p["last_sync_attempt"] != 0: lsa = p["last_sync_attempt"] From 26a10cce8f8a572553084c69603ca6605f2672fd Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 26 Jan 2025 01:13:11 +0100 Subject: [PATCH 028/123] Status query return code --- LXMF/Utilities/lxmd.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index a06d60c..73d0eb0 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -422,8 +422,8 @@ def query_status(identity, timeout=5, exit_on_fail=False): timeout = time.time()+timeout def check_timeout(): if time.time() > timeout: - RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) if exit_on_fail: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) exit(200) else: return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT @@ -433,16 +433,22 @@ def query_status(identity, timeout=5, exit_on_fail=False): if not RNS.Transport.has_path(control_destination.hash): RNS.Transport.request_path(control_destination.hash) while not RNS.Transport.has_path(control_destination.hash): - check_timeout() + tc = check_timeout() + if tc: + return tc link = RNS.Link(control_destination) while not link.status == RNS.Link.ACTIVE: - check_timeout() + tc = check_timeout() + if tc: + return tc link.identify(identity) request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) while not request_receipt.get_status() == RNS.RequestReceipt.READY: - check_timeout() + tc = check_timeout() + if tc: + return tc return request_receipt.get_response() From e0163e100a5541ed9abf4c57bb38960739ca23ea Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 27 Jan 2025 10:26:11 +0100 Subject: [PATCH 029/123] Updated issue template --- .github/ISSUE_TEMPLATE/🐛-bug-report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/🐛-bug-report.md b/.github/ISSUE_TEMPLATE/🐛-bug-report.md index 77ad6c2..65b492e 100644 --- a/.github/ISSUE_TEMPLATE/🐛-bug-report.md +++ b/.github/ISSUE_TEMPLATE/🐛-bug-report.md @@ -12,7 +12,7 @@ Before creating a bug report on this issue tracker, you **must** read the [Contr - The issue tracker is used by developers of this project. **Do not use it to ask general questions, or for support requests**. - Ideas and feature requests can be made on the [Discussions](https://github.com/markqvist/Reticulum/discussions). **Only** feature requests accepted by maintainers and developers are tracked and included on the issue tracker. **Do not post feature requests here**. -- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), delete this section from your bug report. +- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), **delete this section only** (*"Read the Contribution Guidelines"*) from your bug report, **and fill in all the other sections**. **Describe the Bug** A clear and concise description of what the bug is. From 886ac69a8284e8ca3c3c0a4e2106f7160e8c7f62 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 27 Jan 2025 12:04:05 +0100 Subject: [PATCH 030/123] Tear down control link after use --- LXMF/Utilities/lxmd.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 73d0eb0..f9a2ef6 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -450,6 +450,7 @@ def query_status(identity, timeout=5, exit_on_fail=False): if tc: return tc + link.teardown() return request_receipt.get_response() def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): From e0e901291e20515d12abe105fef30010db7fb1f1 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 27 Jan 2025 12:04:16 +0100 Subject: [PATCH 031/123] Updated logging --- LXMF/LXMessage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 2342708..515ab11 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -380,7 +380,7 @@ class LXMessage: if self.desired_method == LXMessage.OPPORTUNISTIC: if self.__destination.type == RNS.Destination.SINGLE: if content_size > LXMessage.ENCRYPTED_PACKET_MAX_CONTENT: - RNS.log(f"Opportunistic delivery was requested for {self}, but content exceeds packet size limit. Falling back to link-based delivery.", RNS.LOG_DEBUG) + RNS.log(f"Opportunistic delivery was requested for {self}, but content of length {content_size} exceeds packet size limit. Falling back to link-based delivery.", RNS.LOG_DEBUG) self.desired_method = LXMessage.DIRECT # Set delivery parameters according to delivery method From f1d060a92ef9ea9b0a0f3402ff46fc9d91fddd5c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Jan 2025 01:26:36 +0100 Subject: [PATCH 032/123] Added peer rotation --- LXMF/LXMPeer.py | 4 +++ LXMF/LXMRouter.py | 68 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 5036528..e2f951a 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -469,6 +469,10 @@ class LXMPeer: return self._um_count + @property + def acceptance_rate(self): + return 0 if self.offered == 0 else (self.outgoing/self.offered) + def _update_counts(self): if not self._hm_counts_synced: hm = self.handled_messages; del hm diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 9eccedc..4bbd24c 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -41,6 +41,7 @@ class LXMRouter: AUTOPEER = True AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 + ROTATION_HEADROOM_PCT = 10 PROPAGATION_LIMIT = 256 DELIVERY_LIMIT = 1000 @@ -122,6 +123,7 @@ class LXMRouter: self.propagation_transfer_progress = 0.0 self.propagation_transfer_last_result = None self.propagation_transfer_max_messages = None + self.prioritise_rotating_unreachable_peers = False self.active_propagation_links = [] self.locally_delivered_transient_ids = {} self.locally_processed_transient_ids = {} @@ -783,17 +785,13 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: if self.propagation_node == True: + self.rotate_peers() self.flush_queues() if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: if self.propagation_node == True: self.sync_peers() - # def syncstats(self): - # for peer_id in self.peers: - # p = self.peers[peer_id] - # RNS.log(f"{RNS.prettyhexrep(peer_id)} O={p.offered} S={p.outgoing} I={p.incoming} TX={RNS.prettysize(p.tx_bytes)} RX={RNS.prettysize(p.rx_bytes)}") - def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual @@ -1805,6 +1803,66 @@ class LXMRouter: self.peers.pop(destination_hash) RNS.log("Broke peering with "+str(peer.destination)) + def rotate_peers(self): + try: + rotation_headroom = max(1, math.floor(self.max_peers*(LXMRouter.ROTATION_HEADROOM_PCT/100.0))) + required_drops = len(self.peers) - (self.max_peers - rotation_headroom) + if required_drops > 0 and len(self.peers) - required_drops > 1: + peers = self.peers.copy() + untested_peers = [] + for peer_id in self.peers: + peer = self.peers[peer_id] + if peer.last_sync_attempt == 0: + untested_peers.append(peer) + + if len(untested_peers) >= rotation_headroom: + RNS.log("Newly added peer threshold reached, postponing peer rotation", RNS.LOG_DEBUG) + return + + culled_peers = [] + waiting_peers = [] + unresponsive_peers = [] + for peer_id in peers: + peer = peers[peer_id] + if not peer_id in self.static_peers and peer.state == LXMPeer.IDLE: + if peer.alive: + if peer.offered == 0: + # Don't consider for unpeering until at + # least one message has been offered + pass + else: + waiting_peers.append(peer) + else: + unresponsive_peers.append(peer) + + drop_pool = [] + if len(unresponsive_peers) > 0: + drop_pool.extend(unresponsive_peers) + if not self.prioritise_rotating_unreachable_peers: + drop_pool.extend(waiting_peers) + + else: + drop_pool.extend(waiting_peers) + + if len(drop_pool) > 0: + drop_count = min(required_drops, len(drop_pool)) + low_acceptance_rate_peers = sorted( + drop_pool, + key=lambda p: ( 0 if p.offered == 0 else (p.outgoing/p.offered) ), + reverse=False + )[0:drop_count] + + ms = "" if len(low_acceptance_rate_peers) == 1 else "s" + RNS.log(f"Dropping {len(low_acceptance_rate_peers)} lowest acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) + for peer in low_acceptance_rate_peers: + ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) + RNS.log(f"Acceptance rate for {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing} / {peer.offered})", RNS.LOG_DEBUG) + self.unpeer(peer.destination_hash) + + except Exception as e: + RNS.log(f"An error occurred during peer rotation: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + def sync_peers(self): culled_peers = [] waiting_peers = [] From 40fc75f5593aee19563ed9403170c7b1c938e7fd Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Jan 2025 14:24:09 +0100 Subject: [PATCH 033/123] Refined peer rotation algorithm --- LXMF/LXMRouter.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 4bbd24c..1d8f50e 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -763,6 +763,7 @@ class LXMRouter: JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL + JOB_ROTATE_INTERVAL = 675 def jobs(self): if not self.exit_handler_running: self.processing_count += 1 @@ -785,9 +786,12 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: if self.propagation_node == True: - self.rotate_peers() self.flush_queues() + if self.processing_count % LXMRouter.JOB_ROTATE_INTERVAL == 0: + if self.propagation_node == True: + self.rotate_peers() + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: if self.propagation_node == True: self.sync_peers() @@ -1819,6 +1823,17 @@ class LXMRouter: RNS.log("Newly added peer threshold reached, postponing peer rotation", RNS.LOG_DEBUG) return + fully_synced_peers = {} + for peer_id in peers: + peer = peers[peer_id] + if peer.unhandled_message_count == 0: + fully_synced_peers[peer_id] = peer + + if len(fully_synced_peers) > 0: + peers = fully_synced_peers + ms = "" if len(fully_synced_peers) == 1 else "s" + RNS.log(f"Found {len(fully_synced_peers)} fully synced peer{ms}, using as peer rotation pool basis", RNS.LOG_DEBUG) + culled_peers = [] waiting_peers = [] unresponsive_peers = [] @@ -1856,7 +1871,8 @@ class LXMRouter: RNS.log(f"Dropping {len(low_acceptance_rate_peers)} lowest acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) for peer in low_acceptance_rate_peers: ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) - RNS.log(f"Acceptance rate for {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing} / {peer.offered})", RNS.LOG_DEBUG) + reachable_str = "reachable" if peer.alive else "unreachable" + RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) self.unpeer(peer.destination_hash) except Exception as e: From 40d0b9a5deca5fb054946dfcf37d2442bdac4469 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Jan 2025 21:21:51 +0100 Subject: [PATCH 034/123] Added acceptance rate threshold to peer rotation --- LXMF/LXMRouter.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 1d8f50e..df340d2 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -42,6 +42,7 @@ class LXMRouter: AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 ROTATION_HEADROOM_PCT = 10 + ROTATION_AR_MAX = 0.5 PROPAGATION_LIMIT = 256 DELIVERY_LIMIT = 1000 @@ -1867,13 +1868,16 @@ class LXMRouter: reverse=False )[0:drop_count] - ms = "" if len(low_acceptance_rate_peers) == 1 else "s" - RNS.log(f"Dropping {len(low_acceptance_rate_peers)} lowest acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) + dropped_peers = 0 for peer in low_acceptance_rate_peers: ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) - reachable_str = "reachable" if peer.alive else "unreachable" - RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) - self.unpeer(peer.destination_hash) + if ar < LXMRouter.ROTATION_AR_MAX*100: + reachable_str = "reachable" if peer.alive else "unreachable" + RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) + self.unpeer(peer.destination_hash) + + ms = "" if dropped_peers == 1 else "s" + RNS.log(f"Dropped {dropped_peers} low acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) except Exception as e: RNS.log(f"An error occurred during peer rotation: {e}", RNS.LOG_ERROR) From b7b67536400e768658dcc9cf63406ccf9baba468 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 00:37:50 +0100 Subject: [PATCH 035/123] Fixed potential division by zero. Fixes #25. --- LXMF/LXMRouter.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index df340d2..7bb44a5 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -507,8 +507,10 @@ class LXMRouter: except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) - et = time.time(); RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {math.floor(len(self.propagation_entries)/(et-st))} msgs/s", RNS.LOG_NOTICE) - st = time.time(); RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) + et = time.time(); mps = 0 if et-st == 0 else math.floor(len(self.propagation_entries)/(et-st)) + RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {mps} msgs/s", RNS.LOG_NOTICE) + RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) + st = time.time(); if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") @@ -1875,6 +1877,7 @@ class LXMRouter: reachable_str = "reachable" if peer.alive else "unreachable" RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) self.unpeer(peer.destination_hash) + dropped_peers += 1 ms = "" if dropped_peers == 1 else "s" RNS.log(f"Dropped {dropped_peers} low acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) From 9eca747757933d283922923c3b598d68a32f7902 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 10:46:31 +0100 Subject: [PATCH 036/123] Updated peer rotation timing to align with distribution queue mapping --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 7bb44a5..a364a12 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -766,7 +766,7 @@ class LXMRouter: JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL - JOB_ROTATE_INTERVAL = 675 + JOB_ROTATE_INTERVAL = 56*JOB_PEERINGEST_INTERVAL def jobs(self): if not self.exit_handler_running: self.processing_count += 1 From f42ccfc4e93b9a85ed32a6ebc3b5f3ed21a24b49 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 11:23:18 +0100 Subject: [PATCH 037/123] Automatically clean messages exceeding propagation transfer limit for peer from unhandled message queues --- LXMF/LXMPeer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index e2f951a..a777b86 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -250,7 +250,10 @@ class LXMPeer: lxm_size = unhandled_entry[2] next_size = cumulative_size + (lxm_size+per_message_overhead) if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): - pass + if lxm_size+per_message_overhead > self.propagation_transfer_limit: + RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) + self.remove_unhandled_message(transient_id) + self.add_handled_message(transient_id) else: cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) From b94a712bb626e83755fb54694a2e6a30690957f8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 11:30:45 +0100 Subject: [PATCH 038/123] Automatically clean messages exceeding propagation transfer limit for peer from unhandled message queues --- LXMF/LXMPeer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a777b86..a2b6b18 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -250,7 +250,7 @@ class LXMPeer: lxm_size = unhandled_entry[2] next_size = cumulative_size + (lxm_size+per_message_overhead) if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): - if lxm_size+per_message_overhead > self.propagation_transfer_limit: + if lxm_size+per_message_overhead > (self.propagation_transfer_limit*1000): RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) self.remove_unhandled_message(transient_id) self.add_handled_message(transient_id) From 7b4780cfb7537ccc114c9a0d99da7dc6928eb113 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 11:36:11 +0100 Subject: [PATCH 039/123] Automatically clean messages exceeding propagation transfer limit for peer from unhandled message queues --- LXMF/LXMPeer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a2b6b18..c1294bd 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -251,9 +251,9 @@ class LXMPeer: next_size = cumulative_size + (lxm_size+per_message_overhead) if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): if lxm_size+per_message_overhead > (self.propagation_transfer_limit*1000): - RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) self.remove_unhandled_message(transient_id) self.add_handled_message(transient_id) + RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) else: cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) From c0a8f3be498514611ccb7e44925bf78afd5d71ac Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 15:04:21 +0100 Subject: [PATCH 040/123] Cleanup --- LXMF/Handlers.py | 10 ++++++---- LXMF/LXMRouter.py | 5 +++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 26a5df6..9876f4c 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -47,10 +47,12 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit = None wanted_inbound_peers = None if len(data) >= 4: - try: - wanted_inbound_peers = int(data[3]) - except: - wanted_inbound_peers = None + # TODO: Rethink, probably not necessary anymore + # try: + # wanted_inbound_peers = int(data[3]) + # except: + # wanted_inbound_peers = None + if len(data) >= 3: try: propagation_transfer_limit = float(data[2]) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index a364a12..070dc71 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -286,7 +286,7 @@ class LXMRouter: node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes - self.get_wanted_inbound_peers(), # How many more inbound peers this node wants + None, # How many more inbound peers this node wants ] data = msgpack.packb(announce_data) @@ -895,7 +895,8 @@ class LXMRouter: threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() def get_wanted_inbound_peers(self): - # TODO: Implement + # TODO: Implement/rethink. + # Probably not necessary anymore. return None def get_announce_app_data(self, destination_hash): From a6cf585109a354554fb223394db3405ed0b9510c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 15:11:26 +0100 Subject: [PATCH 041/123] Cleanup --- LXMF/Handlers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 9876f4c..0705074 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -52,6 +52,7 @@ class LXMFPropagationAnnounceHandler: # wanted_inbound_peers = int(data[3]) # except: # wanted_inbound_peers = None + pass if len(data) >= 3: try: From d5540b927fd96d171a096e85efac64a3de921d37 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Jan 2025 13:38:56 +0100 Subject: [PATCH 042/123] Added allow_duplicate option to message ingest API --- LXMF/LXMRouter.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 070dc71..d631841 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1618,7 +1618,7 @@ class LXMRouter: ### Message Routing & Delivery ######################## ####################################################### - def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None, no_stamp_enforcement=False): + def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None, no_stamp_enforcement=False, allow_duplicate=False): try: message = LXMessage.unpack_from_bytes(lxmf_data) if ratchet_id and not message.ratchet_id: @@ -1685,7 +1685,7 @@ class LXMRouter: RNS.log(str(self)+" ignored message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) return False - if self.has_message(message.hash): + if not allow_duplicate and self.has_message(message.hash): RNS.log(str(self)+" ignored already received message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) return False else: @@ -2107,7 +2107,7 @@ class LXMRouter: if peer != from_peer: peer.queue_unhandled_message(transient_id) - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False, from_peer=None): + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False, is_paper_message=False, from_peer=None): no_stamp_enforcement = False if is_paper_message: no_stamp_enforcement = True @@ -2116,9 +2116,10 @@ class LXMRouter: if len(lxmf_data) >= LXMessage.LXMF_OVERHEAD: transient_id = RNS.Identity.full_hash(lxmf_data) - if not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids: + if (not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids) or allow_duplicate == True: received = time.time() destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] + RNS.log("GOT MESSAGE FOR "+RNS.prettyhexrep(destination_hash)) self.locally_processed_transient_ids[transient_id] = received @@ -2128,7 +2129,7 @@ class LXMRouter: decrypted_lxmf_data = delivery_destination.decrypt(encrypted_lxmf_data) if decrypted_lxmf_data != None: delivery_data = lxmf_data[:LXMessage.DESTINATION_LENGTH]+decrypted_lxmf_data - self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED, no_stamp_enforcement=no_stamp_enforcement) + self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED, no_stamp_enforcement=no_stamp_enforcement, allow_duplicate=allow_duplicate) self.locally_delivered_transient_ids[transient_id] = time.time() if signal_local_delivery != None: @@ -2166,7 +2167,7 @@ class LXMRouter: RNS.trace_exception(e) return False - def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None): + def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False): try: if not uri.lower().startswith(LXMessage.URI_SCHEMA+"://"): RNS.log("Cannot ingest LXM, invalid URI provided.", RNS.LOG_ERROR) @@ -2176,7 +2177,7 @@ class LXMRouter: lxmf_data = base64.urlsafe_b64decode(uri.replace(LXMessage.URI_SCHEMA+"://", "").replace("/", "")+"==") transient_id = RNS.Identity.full_hash(lxmf_data) - router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate, is_paper_message=True) + router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate, allow_duplicate=allow_duplicate, is_paper_message=True) if router_propagation_result != False: RNS.log("LXM with transient ID "+RNS.prettyhexrep(transient_id)+" was ingested.", RNS.LOG_DEBUG) return router_propagation_result From 1ef46650738f4ddc67579080d2eab60c9affcfa8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 18 Feb 2025 20:05:19 +0100 Subject: [PATCH 043/123] Cleanup --- LXMF/LXMRouter.py | 1 - requirements.txt | 5 ++--- setup.py | 4 ++-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index d631841..d502ee6 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -2119,7 +2119,6 @@ class LXMRouter: if (not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids) or allow_duplicate == True: received = time.time() destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] - RNS.log("GOT MESSAGE FOR "+RNS.prettyhexrep(destination_hash)) self.locally_processed_transient_ids[transient_id] = received diff --git a/requirements.txt b/requirements.txt index 6b7926a..2f4f642 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,2 @@ -qrcode==7.4.2 -rns==0.7.8 -setuptools==70.0.0 +qrcode>=7.4.2 +rns>=0.9.1 diff --git a/setup.py b/setup.py index cabf20a..e01e9eb 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.9.1'], - python_requires='>=3.7', + install_requires=["rns>=0.9.2"], + python_requires=">=3.7", ) From 570d2c68467e0614609df4dc89d295793e2a4878 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 7 Mar 2025 11:05:50 +0100 Subject: [PATCH 044/123] Added configuration options to default config file --- LXMF/Utilities/lxmd.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index f9a2ef6..48885b2 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -710,6 +710,25 @@ propagation_transfer_max_accepted_size = 256 # prioritise_destinations = 41d20c727598a3fbbdf9106133a3a0ed, d924b81822ca24e68e2effea99bcb8cf +# You can configure the maximum number of other +# propagation nodes that this node will peer +# with automatically. The default is 50. + +# max_peers = 25 + +# You can configure a list of static propagation +# node peers, that this node will always be +# peered with, by specifying a list of +# destination hashes. + +# static_peers = e17f833c4ddf8890dd3a79a6fea8161d, 5a2d0029b6e5ec87020abaea0d746da4 + +# You can configure the propagation node to +# only accept incoming propagation messages +# from configured static peers. + +# from_static_only = True + # By default, any destination is allowed to # connect and download messages, but you can # optionally restrict this. If you enable From 336792c07a47ca60dcf2df1295b8065f0b458a22 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 13 Mar 2025 19:45:15 +0100 Subject: [PATCH 045/123] Updated dependencies --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e01e9eb..19737d6 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=["rns>=0.9.2"], + install_requires=["rns>=0.9.3"], python_requires=">=3.7", ) From 326c0eed8f767e298eb3f09340a41314c254ddda Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 13 Mar 2025 19:46:11 +0100 Subject: [PATCH 046/123] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 22049ab..63af887 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.2" +__version__ = "0.6.3" From e6021b8fed0ffcffcc6a7b9bbdb7d0d1a282ba8f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 15 Apr 2025 20:21:16 +0200 Subject: [PATCH 047/123] Updated license --- LICENSE | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/LICENSE b/LICENSE index a25bd7a..eda392f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ -MIT License +Reticulum License -Copyright (c) 2020 Mark Qvist / unsigned.io +Copyright (c) 2025 Mark Qvist / unsigned.io Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,8 +9,16 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +- The Software shall not be used in any kind of system which includes amongst + its functions the ability to purposefully do harm to human beings. + +- The Software shall not be used, directly or indirectly, in the creation of + an artificial intelligence, machine learning or language model training + dataset, including but not limited to any use that contributes to the + training or development of such a model or algorithm. + +- The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, From 1bdcf6ad534c3d34e4966d23bb04a129fd5ebc4f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 15 Apr 2025 20:21:54 +0200 Subject: [PATCH 048/123] Updated license --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index eda392f..f5fb92d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ Reticulum License -Copyright (c) 2025 Mark Qvist / unsigned.io +Copyright (c) 2020-2025 Mark Qvist Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 005d71707c1f99a721e78196e2ecfad1df1c11fa Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 17 Apr 2025 13:31:00 +0200 Subject: [PATCH 049/123] Cleanup --- LXMF/Handlers.py | 14 +++++++------- LXMF/LXMRouter.py | 3 +-- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 0705074..5671170 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -1,4 +1,5 @@ import time +import threading import RNS import RNS.vendor.umsgpack as msgpack @@ -17,10 +18,11 @@ class LXMFDeliveryAnnounceHandler: if lxmessage.method == LXMessage.DIRECT or lxmessage.method == LXMessage.OPPORTUNISTIC: lxmessage.next_delivery_attempt = time.time() - while self.lxmrouter.processing_outbound: - time.sleep(0.1) + def outbound_trigger(): + while self.lxmrouter.processing_outbound: time.sleep(0.1) + self.lxmrouter.process_outbound() - self.lxmrouter.process_outbound() + threading.Thread(target=outbound_trigger, daemon=True).start() try: stamp_cost = stamp_cost_from_app_data(app_data) @@ -55,10 +57,8 @@ class LXMFPropagationAnnounceHandler: pass if len(data) >= 3: - try: - propagation_transfer_limit = float(data[2]) - except: - propagation_transfer_limit = None + try: propagation_transfer_limit = float(data[2]) + except: propagation_transfer_limit = None if destination_hash in self.lxmrouter.static_peers: self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index d502ee6..8da1476 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -2301,8 +2301,7 @@ class LXMRouter: else: RNS.log("Outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - if lxmessage.progress == None or lxmessage.progress < 0.01: - lxmessage.progress = 0.01 + if lxmessage.progress == None or lxmessage.progress < 0.01: lxmessage.progress = 0.01 # Outbound handling for opportunistic messages if lxmessage.method == LXMessage.OPPORTUNISTIC: From 37e99910ec8fb8f1b6df05567aa5ba2009ed9edc Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 12 May 2025 11:58:24 +0200 Subject: [PATCH 050/123] Updated version and RNS dependency version --- LXMF/_version.py | 2 +- setup.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 63af887..49e0fc1 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.3" +__version__ = "0.7.0" diff --git a/setup.py b/setup.py index 19737d6..724705f 100644 --- a/setup.py +++ b/setup.py @@ -15,9 +15,10 @@ setuptools.setup( long_description_content_type="text/markdown", url="https://github.com/markqvist/lxmf", packages=["LXMF", "LXMF.Utilities"], + license="Reticulum License", + license_files = ("LICENSE"), classifiers=[ "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], entry_points= { @@ -25,6 +26,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=["rns>=0.9.3"], + install_requires=["rns>=0.9.5"], python_requires=">=3.7", ) From 499fe4cc5381ffc641cbc04b7d27cf931cae3bb5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 15 May 2025 20:27:19 +0200 Subject: [PATCH 051/123] Use no_data_for instead of inactive_for for cleaning links --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 8da1476..5b7a5c2 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -827,7 +827,7 @@ class LXMRouter: closed_links = [] for link_hash in self.direct_links: link = self.direct_links[link_hash] - inactive_time = link.inactive_for() + inactive_time = link.no_data_for() if inactive_time > LXMRouter.LINK_MAX_INACTIVITY: link.teardown() From a9622e3a332fd36ffca2b045e42d22c07779266c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 15 May 2025 20:30:12 +0200 Subject: [PATCH 052/123] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 49e0fc1..a5f830a 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.7.0" +__version__ = "0.7.1" From c2207d1eb79db474a618b2ffd40d6be3ffc2a00a Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 17 May 2025 10:27:21 +0200 Subject: [PATCH 053/123] Added funding --- FUNDING.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 FUNDING.yml diff --git a/FUNDING.yml b/FUNDING.yml new file mode 100644 index 0000000..d125d55 --- /dev/null +++ b/FUNDING.yml @@ -0,0 +1,3 @@ +liberapay: Reticulum +ko_fi: markqvist +custom: "https://unsigned.io/donate" From 787cd069dc98ebab80afdef726a841c45f38566f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 26 May 2025 20:57:46 +0200 Subject: [PATCH 054/123] Fixed division by zero. Closes #30. --- LXMF/Utilities/lxmd.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 48885b2..a4ccaf5 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -529,14 +529,14 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = peered_outgoing += pm["outgoing"] peered_rx_bytes += p["rx_bytes"] peered_tx_bytes += p["tx_bytes"] - if p["alive"]: - available_peers += 1 - else: - unreachable_peers += 1 + + if p["alive"]: available_peers += 1 + else: unreachable_peers += 1 total_incoming = peered_incoming+s["unpeered_propagation_incoming"]+s["clients"]["client_propagation_messages_received"] total_rx_bytes = peered_rx_bytes+s["unpeered_propagation_rx_bytes"] - df = round(peered_outgoing/total_incoming, 2) + if total_incoming != 0: df = round(peered_outgoing/total_incoming, 2) + else: df = 0 dhs = RNS.prettyhexrep(s["destination_hash"]); uts = RNS.prettytime(s["uptime"]) print(f"\nLXMF Propagation Node running on {dhs}, uptime is {uts}") From 416ccf294f37265708f7a703dcdf2daa751fca67 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 13 Jul 2025 13:24:46 +0200 Subject: [PATCH 055/123] Retry path request on unexpectedly closed link --- LXMF/LXMRouter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 5b7a5c2..fe16b05 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -2368,7 +2368,8 @@ class LXMRouter: RNS.log("Waiting for proof for "+str(lxmessage)+" sent as link packet", RNS.LOG_DEBUG) elif direct_link.status == RNS.Link.CLOSED: if direct_link.activated_at != None: - RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was closed", RNS.LOG_DEBUG) + RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was closed unexpectedly, retrying path request...", RNS.LOG_DEBUG) + RNS.Transport.request_path(lxmessage.get_destination().hash) else: if not hasattr(lxmessage, "path_request_retried"): RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was never activated, retrying path request...", RNS.LOG_DEBUG) From 5b9f12159326de0f4aee6045f6bd4fa8f9199b2b Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 13 Jul 2025 13:24:52 +0200 Subject: [PATCH 056/123] Updated version and RNS dependency version --- LXMF/_version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index a5f830a..777f190 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.7.1" +__version__ = "0.8.0" diff --git a/setup.py b/setup.py index 724705f..568ade3 100644 --- a/setup.py +++ b/setup.py @@ -26,6 +26,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=["rns>=0.9.5"], + install_requires=["rns>=1.0.0"], python_requires=">=3.7", ) From 85d8f4f58361c8d523a34626513c1e83d3f3b74a Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 13 Jul 2025 13:42:01 +0200 Subject: [PATCH 057/123] Updated requirements --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2f4f642..f0f3fc8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ qrcode>=7.4.2 -rns>=0.9.1 +rns>=1.0.0 From 61b75526d281a54bbc95a24b0f23b92c1905d951 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Oct 2025 23:24:45 +0100 Subject: [PATCH 058/123] Added separate propagation node per-message and sync transfer limits. Added persistent PN sync strategy. Added concurrent PN peer sync. --- LXMF/Handlers.py | 35 +++++++++----- LXMF/LXMPeer.py | 104 +++++++++++++++++++++++++++++------------ LXMF/LXMRouter.py | 52 +++++++++++++-------- LXMF/Utilities/lxmd.py | 28 +++++++++-- LXMF/_version.py | 2 +- 5 files changed, 153 insertions(+), 68 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 5671170..e1dba8e 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -41,19 +41,21 @@ class LXMFPropagationAnnounceHandler: def received_announce(self, destination_hash, announced_identity, app_data): try: if type(app_data) == bytes: - if self.lxmrouter.propagation_node and self.lxmrouter.autopeer: + if self.lxmrouter.propagation_node: data = msgpack.unpackb(app_data) - if pn_announce_data_is_valid(data): node_timebase = data[1] propagation_transfer_limit = None + propagation_sync_limit = None wanted_inbound_peers = None + if len(data) >= 5: + try: propagation_sync_limit = int(data[4]) + except Exception as e: propagation_sync_limit = None + if len(data) >= 4: # TODO: Rethink, probably not necessary anymore - # try: - # wanted_inbound_peers = int(data[3]) - # except: - # wanted_inbound_peers = None + # try: wanted_inbound_peers = int(data[3]) + # except: wanted_inbound_peers = None pass if len(data) >= 3: @@ -61,15 +63,24 @@ class LXMFPropagationAnnounceHandler: except: propagation_transfer_limit = None if destination_hash in self.lxmrouter.static_peers: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) + self.lxmrouter.peer(destination_hash=destination_hash, + timestamp=node_timebase, + propagation_transfer_limit=propagation_transfer_limit, + propagation_sync_limit=propagation_sync_limit, + wanted_inbound_peers=wanted_inbound_peers) else: - if data[0] == True: - if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) + if self.lxmrouter.autopeer: + if data[0] == True: + if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: + self.lxmrouter.peer(destination_hash=destination_hash, + timestamp=node_timebase, + propagation_transfer_limit=propagation_transfer_limit, + propagation_sync_limit=propagation_sync_limit, + wanted_inbound_peers=wanted_inbound_peers) - elif data[0] == False: - self.lxmrouter.unpeer(destination_hash, node_timebase) + elif data[0] == False: + self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index c1294bd..41ea69a 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -11,16 +11,20 @@ class LXMPeer: OFFER_REQUEST_PATH = "/offer" MESSAGE_GET_PATH = "/get" - IDLE = 0x00 - LINK_ESTABLISHING = 0x01 - LINK_READY = 0x02 - REQUEST_SENT = 0x03 - RESPONSE_RECEIVED = 0x04 + IDLE = 0x00 + LINK_ESTABLISHING = 0x01 + LINK_READY = 0x02 + REQUEST_SENT = 0x03 + RESPONSE_RECEIVED = 0x04 RESOURCE_TRANSFERRING = 0x05 - ERROR_NO_IDENTITY = 0xf0 - ERROR_NO_ACCESS = 0xf1 - ERROR_TIMEOUT = 0xfe + ERROR_NO_IDENTITY = 0xf0 + ERROR_NO_ACCESS = 0xf1 + ERROR_TIMEOUT = 0xfe + + STRATEGY_LAZY = 0x01 + STRATEGY_PERSISTENT = 0x02 + DEFAULT_SYNC_STRATEGY = STRATEGY_PERSISTENT # Maximum amount of time a peer can # be unreachable before it is removed @@ -67,6 +71,16 @@ class LXMPeer: peer.propagation_transfer_limit = None else: peer.propagation_transfer_limit = None + + if "propagation_sync_limit" in dictionary: + try: peer.propagation_sync_limit = int(dictionary["propagation_sync_limit"]) + except: peer.propagation_sync_limit = peer.propagation_transfer_limit + else: peer.propagation_sync_limit = peer.propagation_transfer_limit + + if "sync_strategy" in dictionary: + try: peer.sync_strategy = int(dictionary["sync_strategy"]) + except: peer.sync_strategy = LXMPeer.DEFAULT_SYNC_STRATEGY + else: peer.sync_strategy = LXMPeer.DEFAULT_SYNC_STRATEGY if "offered" in dictionary: peer.offered = dictionary["offered"] @@ -127,6 +141,8 @@ class LXMPeer: dictionary["link_establishment_rate"] = self.link_establishment_rate dictionary["sync_transfer_rate"] = self.sync_transfer_rate dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit + dictionary["propagation_sync_limit"] = self.propagation_sync_limit + dictionary["sync_strategy"] = self.sync_strategy dictionary["last_sync_attempt"] = self.last_sync_attempt dictionary["offered"] = self.offered dictionary["outgoing"] = self.outgoing @@ -150,9 +166,11 @@ class LXMPeer: return peer_bytes - def __init__(self, router, destination_hash): + def __init__(self, router, destination_hash, sync_strategy=DEFAULT_SYNC_STRATEGY): self.alive = False self.last_heard = 0 + self.sync_strategy = sync_strategy + self.next_sync_attempt = 0 self.last_sync_attempt = 0 self.sync_backoff = 0 @@ -160,6 +178,8 @@ class LXMPeer: self.link_establishment_rate = 0 self.sync_transfer_rate = 0 self.propagation_transfer_limit = None + self.propagation_sync_limit = None + self.currently_transferring_messages = None self.handled_messages_queue = deque() self.unhandled_messages_queue = deque() @@ -209,6 +229,10 @@ class LXMPeer: if self.destination != None: if len(self.unhandled_messages) > 0: + if self.currently_transferring_messages != None: + RNS.log(f"Sync requested for {self}, but current message transfer index was not clear. Aborting.", RNS.LOG_ERROR) + return + if self.state == LXMPeer.IDLE: RNS.log("Establishing link for sync to peer "+RNS.prettyhexrep(self.destination_hash)+"...", RNS.LOG_DEBUG) self.sync_backoff += LXMPeer.SYNC_BACKOFF_STEP @@ -244,21 +268,26 @@ class LXMPeer: unhandled_entries.sort(key=lambda e: e[1], reverse=False) per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now cumulative_size = 24 # Initialised to highest reasonable binary structure overhead + RNS.log(f"Syncing to peer with per-message limit {RNS.prettysize(self.propagation_transfer_limit*1000)} and sync limit {RNS.prettysize(self.propagation_sync_limit*1000)}") # TODO: Remove debug for unhandled_entry in unhandled_entries: transient_id = unhandled_entry[0] weight = unhandled_entry[1] lxm_size = unhandled_entry[2] - next_size = cumulative_size + (lxm_size+per_message_overhead) - if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): - if lxm_size+per_message_overhead > (self.propagation_transfer_limit*1000): - self.remove_unhandled_message(transient_id) - self.add_handled_message(transient_id) - RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) - else: - cumulative_size += (lxm_size+per_message_overhead) - unhandled_ids.append(transient_id) + lxm_transfer_size = lxm_size+per_message_overhead + next_size = cumulative_size + lxm_transfer_size - RNS.log(f"Offering {len(unhandled_ids)} messages to peer {RNS.prettyhexrep(self.destination.hash)}", RNS.LOG_VERBOSE) + if self.propagation_transfer_limit != None and lxm_transfer_size > (self.propagation_transfer_limit*1000): + self.remove_unhandled_message(transient_id) + self.add_handled_message(transient_id) + continue + + if self.propagation_sync_limit != None and next_size >= (self.propagation_sync_limit*1000): + continue + + cumulative_size += lxm_transfer_size + unhandled_ids.append(transient_id) + + RNS.log(f"Offering {len(unhandled_ids)} messages to peer {RNS.prettyhexrep(self.destination.hash)} ({RNS.prettysize(len(msgpack.packb(unhandled_ids)))})", RNS.LOG_VERBOSE) self.last_offer = unhandled_ids self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT @@ -327,7 +356,7 @@ class LXMPeer: wanted_message_ids.append(transient_id) if len(wanted_messages) > 0: - RNS.log("Peer wanted "+str(len(wanted_messages))+" of the available messages", RNS.LOG_VERBOSE) + RNS.log(f"Peer {RNS.prettyhexrep(self.destination_hash)} wanted {str(len(wanted_messages))} of the available messages", RNS.LOG_VERBOSE) lxm_list = [] for message_entry in wanted_messages: @@ -339,13 +368,14 @@ class LXMPeer: lxm_list.append(lxmf_data) data = msgpack.packb([time.time(), lxm_list]) + RNS.log(f"Total transfer size for this sync is {RNS.prettysize(len(data))}", RNS.LOG_VERBOSE) resource = RNS.Resource(data, self.link, callback = self.resource_concluded) - resource.transferred_messages = wanted_message_ids - resource.sync_transfer_started = time.time() + self.currently_transferring_messages = wanted_message_ids + self.current_sync_transfer_started = time.time() self.state = LXMPeer.RESOURCE_TRANSFERRING else: - RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_VERBOSE) + RNS.log(f"Peer {RNS.prettyhexrep(self.destination_hash)} did not request any of the available messages, sync completed", RNS.LOG_VERBOSE) self.offered += len(self.last_offer) if self.link != None: self.link.teardown() @@ -365,7 +395,13 @@ class LXMPeer: def resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: - for transient_id in resource.transferred_messages: + if self.currently_transferring_messages == None: + RNS.log(f"Sync transfer completed on {self}, but transferred message index was unavailable. Aborting.", RNS.LOG_ERROR) + if self.link != None: self.link.teardown() + self.link = None + self.state = LXMPeer.IDLE + + for transient_id in self.currently_transferring_messages: self.add_handled_message(transient_id) self.remove_unhandled_message(transient_id) @@ -376,24 +412,30 @@ class LXMPeer: self.state = LXMPeer.IDLE rate_str = "" - if hasattr(resource, "sync_transfer_started") and resource.sync_transfer_started: - self.sync_transfer_rate = (resource.get_transfer_size()*8)/(time.time()-resource.sync_transfer_started) + if self.current_sync_transfer_started != None: + self.sync_transfer_rate = (resource.get_transfer_size()*8)/(time.time()-self.current_sync_transfer_started) rate_str = f" at {RNS.prettyspeed(self.sync_transfer_rate)}" - RNS.log(f"Syncing {len(resource.transferred_messages)} messages to peer {RNS.prettyhexrep(self.destination_hash)} completed{rate_str}", RNS.LOG_VERBOSE) + RNS.log(f"Syncing {len(self.currently_transferring_messages)} messages to peer {RNS.prettyhexrep(self.destination_hash)} completed{rate_str}", RNS.LOG_VERBOSE) self.alive = True self.last_heard = time.time() self.offered += len(self.last_offer) - self.outgoing += len(resource.transferred_messages) + self.outgoing += len(self.currently_transferring_messages) self.tx_bytes += resource.get_data_size() + + self.currently_transferring_messages = None + self.current_sync_transfer_started = None + + if self.sync_strategy == self.STRATEGY_PERSISTENT: + if self.unhandled_message_count > 0: self.sync() else: RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_VERBOSE) - if self.link != None: - self.link.teardown() - + if self.link != None: self.link.teardown() self.link = None self.state = LXMPeer.IDLE + self.currently_transferring_messages = None + self.current_sync_transfer_started = None def link_established(self, link): self.link.identify(self.router.identity) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index fe16b05..0ad75b7 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -45,6 +45,7 @@ class LXMRouter: ROTATION_AR_MAX = 0.5 PROPAGATION_LIMIT = 256 + SYNC_LIMIT = PROPAGATION_LIMIT*40 DELIVERY_LIMIT = 1000 PR_PATH_TIMEOUT = 10 @@ -73,8 +74,9 @@ class LXMRouter: ####################################################### def __init__(self, identity=None, storagepath=None, autopeer=AUTOPEER, autopeer_maxdepth=None, - propagation_limit=PROPAGATION_LIMIT, delivery_limit=DELIVERY_LIMIT, enforce_ratchets=False, - enforce_stamps=False, static_peers = [], max_peers=None, from_static_only=False): + propagation_limit=PROPAGATION_LIMIT, delivery_limit=DELIVERY_LIMIT, sync_limit=SYNC_LIMIT, + enforce_ratchets=False, enforce_stamps=False, static_peers = [], max_peers=None, + from_static_only=False, sync_strategy=LXMPeer.STRATEGY_PERSISTENT): random.seed(os.urandom(10)) @@ -91,9 +93,10 @@ class LXMRouter: self.auth_required = False self.retain_synced_on_node = False - self.processing_outbound = False - self.processing_inbound = False - self.processing_count = 0 + self.default_sync_strategy = sync_strategy + self.processing_outbound = False + self.processing_inbound = False + self.processing_count = 0 self.propagation_node = False self.propagation_node_start_time = None @@ -107,17 +110,20 @@ class LXMRouter: self.outbound_propagation_node = None self.outbound_propagation_link = None - if delivery_limit == None: - delivery_limit = LXMRouter.DELIVERY_LIMIT + if delivery_limit == None: delivery_limit = LXMRouter.DELIVERY_LIMIT self.message_storage_limit = None self.information_storage_limit = None self.propagation_per_transfer_limit = propagation_limit + self.propagation_per_sync_limit = sync_limit self.delivery_per_transfer_limit = delivery_limit self.enforce_ratchets = enforce_ratchets self._enforce_stamps = enforce_stamps self.pending_deferred_stamps = {} + if sync_limit == None or self.propagation_per_sync_limit < self.propagation_per_transfer_limit: + self.propagation_per_sync_limit = self.propagation_per_transfer_limit + self.wants_download_on_path_available_from = None self.wants_download_on_path_available_to = None self.propagation_transfer_state = LXMRouter.PR_IDLE @@ -287,6 +293,7 @@ class LXMRouter: int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes None, # How many more inbound peers this node wants + self.propagation_per_sync_limit, # Limit for incoming propagation node syncs ] data = msgpack.packb(announce_data) @@ -546,7 +553,7 @@ class LXMRouter: for static_peer in self.static_peers: if not static_peer in self.peers: RNS.log(f"Activating static peering with {RNS.prettyhexrep(static_peer)}", RNS.LOG_NOTICE) - self.peers[static_peer] = LXMPeer(self, static_peer) + self.peers[static_peer] = LXMPeer(self, static_peer, sync_strategy=self.default_sync_strategy) if self.peers[static_peer].last_heard == 0: # TODO: Allow path request responses through announce handler # momentarily here, so peering config can be updated even if @@ -708,6 +715,7 @@ class LXMRouter: "ler": int(peer.link_establishment_rate), "str": int(peer.sync_transfer_rate), "transfer_limit": peer.propagation_transfer_limit, + "sync_limit": peer.propagation_sync_limit, "network_distance": RNS.Transport.hops_to(peer_id), "rx_bytes": peer.rx_bytes, "tx_bytes": peer.tx_bytes, @@ -725,6 +733,7 @@ class LXMRouter: "uptime": time.time()-self.propagation_node_start_time, "delivery_limit": self.delivery_per_transfer_limit, "propagation_limit": self.propagation_per_transfer_limit, + "sync_limit": self.propagation_per_sync_limit, "autopeer_maxdepth": self.autopeer_maxdepth, "from_static_only": self.from_static_only, "messagestore": { @@ -1777,7 +1786,7 @@ class LXMRouter: ### Peer Sync & Propagation ########################### ####################################################### - def peer(self, destination_hash, timestamp, propagation_transfer_limit, wanted_inbound_peers = None): + def peer(self, destination_hash, timestamp, propagation_transfer_limit, propagation_sync_limit, wanted_inbound_peers = None): if destination_hash in self.peers: peer = self.peers[destination_hash] if timestamp > peer.peering_timebase: @@ -1787,16 +1796,23 @@ class LXMRouter: peer.peering_timebase = timestamp peer.last_heard = time.time() peer.propagation_transfer_limit = propagation_transfer_limit + if propagation_sync_limit != None: peer.propagation_sync_limit = propagation_sync_limit + else: peer.propagation_sync_limit = propagation_transfer_limit + RNS.log(f"Peering config updated for {RNS.prettyhexrep(destination_hash)}", RNS.LOG_VERBOSE) else: if len(self.peers) < self.max_peers: - peer = LXMPeer(self, destination_hash) + peer = LXMPeer(self, destination_hash, sync_strategy=self.default_sync_strategy) peer.alive = True peer.last_heard = time.time() peer.propagation_transfer_limit = propagation_transfer_limit + if propagation_sync_limit != None: peer.propagation_sync_limit = propagation_sync_limit + else: peer.propagation_sync_limit = propagation_transfer_limit + self.peers[destination_hash] = peer RNS.log(f"Peered with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_NOTICE) + else: RNS.log(f"Max peers reached, not peering with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_DEBUG) @@ -1895,18 +1911,14 @@ class LXMRouter: for peer_id in peers: peer = peers[peer_id] if time.time() > peer.last_heard + LXMPeer.MAX_UNREACHABLE: - if not peer_id in self.static_peers: - culled_peers.append(peer_id) + if not peer_id in self.static_peers: culled_peers.append(peer_id) + else: if peer.state == LXMPeer.IDLE and len(peer.unhandled_messages) > 0: - if peer.alive: - waiting_peers.append(peer) + if peer.alive: waiting_peers.append(peer) else: - if hasattr(peer, "next_sync_attempt") and time.time() > peer.next_sync_attempt: - unresponsive_peers.append(peer) - else: - pass - # RNS.log("Not adding peer "+str(peer)+" since it is in sync backoff", RNS.LOG_DEBUG) + if hasattr(peer, "next_sync_attempt") and time.time() > peer.next_sync_attempt: unresponsive_peers.append(peer) + else: pass # RNS.log("Not adding peer "+str(peer)+" since it is in sync backoff", RNS.LOG_DEBUG) peer_pool = [] if len(waiting_peers) > 0: @@ -1970,7 +1982,7 @@ class LXMRouter: return False size = resource.get_data_size() - limit = self.propagation_per_transfer_limit*1000 + limit = self.propagation_per_sync_limit*1000 if limit != None and size > limit: RNS.log(f"Rejecting {RNS.prettysize(size)} incoming propagation resource, since it exceeds the limit of {RNS.prettysize(limit)}", RNS.LOG_DEBUG) return False diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index a4ccaf5..e49bd7a 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -135,6 +135,20 @@ def apply_config(): active_configuration["propagation_transfer_max_accepted_size"] = 0.38 else: active_configuration["propagation_transfer_max_accepted_size"] = 256 + + if "propagation" in lxmd_config and "propagation_message_max_accepted_size" in lxmd_config["propagation"]: + active_configuration["propagation_transfer_max_accepted_size"] = lxmd_config["propagation"].as_float("propagation_message_max_accepted_size") + if active_configuration["propagation_transfer_max_accepted_size"] < 0.38: + active_configuration["propagation_transfer_max_accepted_size"] = 0.38 + else: + active_configuration["propagation_transfer_max_accepted_size"] = 256 + + if "propagation" in lxmd_config and "propagation_sync_max_accepted_size" in lxmd_config["propagation"]: + active_configuration["propagation_sync_max_accepted_size"] = lxmd_config["propagation"].as_float("propagation_sync_max_accepted_size") + if active_configuration["propagation_sync_max_accepted_size"] < 0.38: + active_configuration["propagation_sync_max_accepted_size"] = 0.38 + else: + active_configuration["propagation_sync_max_accepted_size"] = 256*40 if "propagation" in lxmd_config and "prioritise_destinations" in lxmd_config["propagation"]: active_configuration["prioritised_lxmf_destinations"] = lxmd_config["propagation"].as_list("prioritise_destinations") @@ -323,6 +337,7 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo autopeer = active_configuration["autopeer"], autopeer_maxdepth = active_configuration["autopeer_maxdepth"], propagation_limit = active_configuration["propagation_transfer_max_accepted_size"], + sync_limit = active_configuration["propagation_sync_max_accepted_size"], delivery_limit = active_configuration["delivery_transfer_max_accepted_size"], max_peers = active_configuration["max_peers"], static_peers = active_configuration["static_peers"], @@ -676,9 +691,14 @@ autopeer = yes autopeer_maxdepth = 4 # The maximum accepted transfer size per in- -# coming propagation transfer, in kilobytes. -# This also sets the upper limit for the size -# of single messages accepted onto this node. +# coming propagation message, in kilobytes. +# This sets the upper limit for the size of +# single messages accepted onto this node. + +propagation_message_max_accepted_size = 256 + +# The maximum accepted transfer size per in- +# coming propagation node sync. # # If a node wants to propagate a larger number # of messages to this node, than what can fit @@ -686,7 +706,7 @@ autopeer_maxdepth = 4 # the smallest messages first, and try again # with any remaining messages at a later point. -propagation_transfer_max_accepted_size = 256 +propagation_sync_max_accepted_size = 256 # The maximum amount of storage to use for # the LXMF Propagation Node message store, diff --git a/LXMF/_version.py b/LXMF/_version.py index 777f190..8088f75 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.8.0" +__version__ = "0.8.1" From 98347d3ad9d4d30979fa52564632a73ee8262872 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Oct 2025 23:25:15 +0100 Subject: [PATCH 059/123] Increased PN peer sync frequency --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 0ad75b7..392a76f 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -773,7 +773,7 @@ class LXMRouter: JOB_LINKS_INTERVAL = 1 JOB_TRANSIENT_INTERVAL = 60 JOB_STORE_INTERVAL = 120 - JOB_PEERSYNC_INTERVAL = 12 + JOB_PEERSYNC_INTERVAL = 6 JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL JOB_ROTATE_INTERVAL = 56*JOB_PEERINGEST_INTERVAL def jobs(self): From 16dfbc22cd0441937d0e2e2ceebf1f2cb6dd281b Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 12:38:49 +0100 Subject: [PATCH 060/123] Propagation stamp validation via pool dispatch --- LXMF/LXMessage.py | 11 +-------- LXMF/LXStamper.py | 58 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 55 insertions(+), 14 deletions(-) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 515ab11..4739f30 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -268,15 +268,6 @@ class LXMessage: def register_failed_callback(self, callback): self.failed_callback = callback - @staticmethod - def stamp_valid(stamp, target_cost, workblock): - target = 0b1 << 256-target_cost - result = RNS.Identity.full_hash(workblock+stamp) - if int.from_bytes(result, byteorder="big") > target: - return False - else: - return True - def validate_stamp(self, target_cost, tickets=None): if tickets != None: for ticket in tickets: @@ -293,7 +284,7 @@ class LXMessage: return False else: workblock = LXStamper.stamp_workblock(self.message_id) - if LXMessage.stamp_valid(self.stamp, target_cost, workblock): + if LXStamper.stamp_valid(self.stamp, target_cost, workblock): RNS.log(f"Stamp on {self} validated", RNS.LOG_DEBUG) # TODO: Remove at some point self.stamp_value = LXStamper.stamp_value(workblock, self.stamp) return True diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index bcfa95b..a9ca7d6 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -3,15 +3,18 @@ import RNS.vendor.umsgpack as msgpack import os import time +import math import multiprocessing -WORKBLOCK_EXPAND_ROUNDS = 3000 +WORKBLOCK_EXPAND_ROUNDS = 3000 +WORKBLOCK_EXPAND_ROUNDS_PN = 1000 +STAMP_SIZE = RNS.Identity.HASHLENGTH +PN_VALIDATION_POOL_MIN_SIZE = 256 active_jobs = {} -def stamp_workblock(message_id): +def stamp_workblock(message_id, expand_rounds=WORKBLOCK_EXPAND_ROUNDS): wb_st = time.time() - expand_rounds = WORKBLOCK_EXPAND_ROUNDS workblock = b"" for n in range(expand_rounds): workblock += RNS.Cryptography.hkdf( @@ -21,7 +24,7 @@ def stamp_workblock(message_id): context=None, ) wb_time = time.time() - wb_st - RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) + # RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) return workblock @@ -36,6 +39,53 @@ def stamp_value(workblock, stamp): return value +def stamp_valid(stamp, target_cost, workblock): + target = 0b1 << 256-target_cost + result = RNS.Identity.full_hash(workblock+stamp) + if int.from_bytes(result, byteorder="big") > target: return False + else: return True + +def validate_pn_stamp(transient_id, stamp): + target_cost = 8 + workblock = stamp_workblock(transient_id, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PN) + if stamp_valid(stamp, target_cost, workblock): + RNS.log(f"Stamp on {RNS.prettyhexrep(transient_id)} validated", RNS.LOG_DEBUG) + value = stamp_value(workblock, stamp) + return True + + return False + +def validate_pn_stamps_job_simple(transient_stamps): + for entry in transient_stamps: + # Get transient ID and stamp for validation + transient_id = transient_stamps[0] + stamp = transient_stamps[1] + + # Store validation result back into list + transient_stamps[2] = validate_pn_stamp(transient_id, stamp) + + return transient_stamps + +def _validate_single_pn_stamp_entry(entry): + transient_id = entry[0] + stamp = entry[1] + entry[2] = validate_pn_stamp(transient_id, stamp) + return entry + +def validate_pn_stamps_job_multip(transient_stamps): + cores = multiprocessing.cpu_count() + pool_count = min(cores, math.ceil(len(transient_stamps) / PN_VALIDATION_POOL_MIN_SIZE)) + + RNS.log(f"Validating {len(transient_stamps)} stamps using {pool_count} processes...") + with multiprocessing.Pool(pool_count) as p: validated_entries = p.map(_validate_single_pn_stamp_entry, transient_stamps) + + return validated_entries + +def validate_pn_stamps(transient_stamps): + non_mp_platform = RNS.vendor.platformutils.is_android() + if len(transient_stamps) <= PN_VALIDATION_POOL_MIN_SIZE or non_mp_platform: validate_pn_stamps_job_simple(transient_stamps) + else: validate_pn_stamps_job_multip(transient_stamps) + def generate_stamp(message_id, stamp_cost): RNS.log(f"Generating stamp with cost {stamp_cost} for {RNS.prettyhexrep(message_id)}...", RNS.LOG_DEBUG) workblock = stamp_workblock(message_id) From e17263d25a7c4022f0c57f1eefb54b989c139ec5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 12:41:18 +0100 Subject: [PATCH 061/123] Cleanup --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 392a76f..30abaa6 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -517,7 +517,7 @@ class LXMRouter: et = time.time(); mps = 0 if et-st == 0 else math.floor(len(self.propagation_entries)/(et-st)) RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {mps} msgs/s", RNS.LOG_NOTICE) RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) - st = time.time(); + st = time.time() if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") From 6cf785227101928a9f073be8a38ea6c86caf6be5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 13:14:59 +0100 Subject: [PATCH 062/123] Cleanup --- LXMF/LXMF.py | 40 +++++++++++++--------------------------- 1 file changed, 13 insertions(+), 27 deletions(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index db0edb7..93e283f 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -99,21 +99,17 @@ RENDERER_BBCODE = 0x03 import RNS import RNS.vendor.umsgpack as msgpack def display_name_from_app_data(app_data=None): - if app_data == None: - return None - elif len(app_data) == 0: - return None + if app_data == None: return None + elif len(app_data) == 0: return None else: # Version 0.5.0+ announce format if (app_data[0] >= 0x90 and app_data[0] <= 0x9f) or app_data[0] == 0xdc: peer_data = msgpack.unpackb(app_data) if type(peer_data) == list: - if len(peer_data) < 1: - return None + if len(peer_data) < 1: return None else: dn = peer_data[0] - if dn == None: - return None + if dn == None: return None else: try: decoded = dn.decode("utf-8") @@ -127,36 +123,26 @@ def display_name_from_app_data(app_data=None): return app_data.decode("utf-8") def stamp_cost_from_app_data(app_data=None): - if app_data == None or app_data == b"": - return None + if app_data == None or app_data == b"": return None else: # Version 0.5.0+ announce format if (app_data[0] >= 0x90 and app_data[0] <= 0x9f) or app_data[0] == 0xdc: peer_data = msgpack.unpackb(app_data) if type(peer_data) == list: - if len(peer_data) < 2: - return None - else: - return peer_data[1] + if len(peer_data) < 2: return None + else: return peer_data[1] # Original announce format - else: - return None + else: return None def pn_announce_data_is_valid(data): try: - if type(data) == bytes: - data = msgpack.unpackb(data) - - if len(data) < 3: - raise ValueError("Invalid announce data: Insufficient peer data") + if type(data) == bytes: data = msgpack.unpackb(data) + if len(data) < 3: raise ValueError("Invalid announce data: Insufficient peer data") else: - if data[0] != True and data[0] != False: - raise ValueError("Invalid announce data: Indeterminate propagation node status") - try: - int(data[1]) - except: - raise ValueError("Invalid announce data: Could not decode peer timebase") + if data[0] != True and data[0] != False: raise ValueError("Invalid announce data: Indeterminate propagation node status") + try: int(data[1]) + except: raise ValueError("Invalid announce data: Could not decode peer timebase") except Exception as e: RNS.log(f"Could not validate propagation node announce data: {e}", RNS.LOG_DEBUG) From b572723a5e293df59cb6e6608d96571a547088ba Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 13:49:26 +0100 Subject: [PATCH 063/123] Cleanup --- LXMF/LXMPeer.py | 66 ++++++++++++++++--------------------------------- 1 file changed, 21 insertions(+), 45 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 41ea69a..3a9fc00 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -54,63 +54,39 @@ class LXMPeer: peer.alive = peer_alive peer.last_heard = peer_last_heard - if "link_establishment_rate" in dictionary: - peer.link_establishment_rate = dictionary["link_establishment_rate"] - else: - peer.link_establishment_rate = 0 + if "link_establishment_rate" in dictionary: peer.link_establishment_rate = dictionary["link_establishment_rate"] + else: peer.link_establishment_rate = 0 - if "sync_transfer_rate" in dictionary: - peer.sync_transfer_rate = dictionary["sync_transfer_rate"] - else: - peer.sync_transfer_rate = 0 + if "sync_transfer_rate" in dictionary: peer.sync_transfer_rate = dictionary["sync_transfer_rate"] + else: peer.sync_transfer_rate = 0 if "propagation_transfer_limit" in dictionary: - try: - peer.propagation_transfer_limit = float(dictionary["propagation_transfer_limit"]) - except Exception as e: - peer.propagation_transfer_limit = None - else: - peer.propagation_transfer_limit = None + try: peer.propagation_transfer_limit = float(dictionary["propagation_transfer_limit"]) + except Exception as e: peer.propagation_transfer_limit = None + else: peer.propagation_transfer_limit = None if "propagation_sync_limit" in dictionary: - try: peer.propagation_sync_limit = int(dictionary["propagation_sync_limit"]) + try: peer.propagation_sync_limit = int(dictionary["propagation_sync_limit"]) except: peer.propagation_sync_limit = peer.propagation_transfer_limit - else: peer.propagation_sync_limit = peer.propagation_transfer_limit + else: peer.propagation_sync_limit = peer.propagation_transfer_limit if "sync_strategy" in dictionary: try: peer.sync_strategy = int(dictionary["sync_strategy"]) except: peer.sync_strategy = LXMPeer.DEFAULT_SYNC_STRATEGY else: peer.sync_strategy = LXMPeer.DEFAULT_SYNC_STRATEGY - if "offered" in dictionary: - peer.offered = dictionary["offered"] - else: - peer.offered = 0 - - if "outgoing" in dictionary: - peer.outgoing = dictionary["outgoing"] - else: - peer.outgoing = 0 - - if "incoming" in dictionary: - peer.incoming = dictionary["incoming"] - else: - peer.incoming = 0 - - if "rx_bytes" in dictionary: - peer.rx_bytes = dictionary["rx_bytes"] - else: - peer.rx_bytes = 0 - - if "tx_bytes" in dictionary: - peer.tx_bytes = dictionary["tx_bytes"] - else: - peer.tx_bytes = 0 - - if "last_sync_attempt" in dictionary: - peer.last_sync_attempt = dictionary["last_sync_attempt"] - else: - peer.last_sync_attempt = 0 + if "offered" in dictionary: peer.offered = dictionary["offered"] + else: peer.offered = 0 + if "outgoing" in dictionary: peer.outgoing = dictionary["outgoing"] + else: peer.outgoing = 0 + if "incoming" in dictionary: peer.incoming = dictionary["incoming"] + else: peer.incoming = 0 + if "rx_bytes" in dictionary: peer.rx_bytes = dictionary["rx_bytes"] + else: peer.rx_bytes = 0 + if "tx_bytes" in dictionary: peer.tx_bytes = dictionary["tx_bytes"] + else: peer.tx_bytes = 0 + if "last_sync_attempt" in dictionary: peer.last_sync_attempt = dictionary["last_sync_attempt"] + else: peer.last_sync_attempt = 0 hm_count = 0 for transient_id in dictionary["handled_ids"]: From 9beeafb0c87c22bf2f5647fdbf49cab65865c3d9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 14:08:39 +0100 Subject: [PATCH 064/123] Propagation node stamp cost handling --- LXMF/LXMPeer.py | 27 +++++++++++---- LXMF/LXMRouter.py | 57 +++++++++++++++---------------- LXMF/Utilities/lxmd.py | 76 ++++++++++++++++++++++++++++++------------ 3 files changed, 102 insertions(+), 58 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 3a9fc00..9f2519d 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -70,6 +70,16 @@ class LXMPeer: except: peer.propagation_sync_limit = peer.propagation_transfer_limit else: peer.propagation_sync_limit = peer.propagation_transfer_limit + if "propagation_stamp_cost" in dictionary: + try: peer.propagation_stamp_cost = int(dictionary["propagation_stamp_cost"]) + except: peer.propagation_stamp_cost = None + else: peer.propagation_stamp_cost = None + + if "propagation_stamp_cost_flexibility" in dictionary: + try: peer.propagation_stamp_cost_flexibility = int(dictionary["propagation_stamp_cost_flexibility"]) + except: peer.propagation_stamp_cost_flexibility = None + else: peer.propagation_stamp_cost_flexibility = None + if "sync_strategy" in dictionary: try: peer.sync_strategy = int(dictionary["sync_strategy"]) except: peer.sync_strategy = LXMPeer.DEFAULT_SYNC_STRATEGY @@ -118,9 +128,11 @@ class LXMPeer: dictionary["sync_transfer_rate"] = self.sync_transfer_rate dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit dictionary["propagation_sync_limit"] = self.propagation_sync_limit + dictionary["propagation_stamp_cost"] = self.propagation_stamp_cost + dictionary["propagation_stamp_cost_flexibility"] = self.propagation_stamp_cost_flexibility dictionary["sync_strategy"] = self.sync_strategy dictionary["last_sync_attempt"] = self.last_sync_attempt - dictionary["offered"] = self.offered + dictionary["offered"] = self.offered dictionary["outgoing"] = self.outgoing dictionary["incoming"] = self.incoming dictionary["rx_bytes"] = self.rx_bytes @@ -153,11 +165,14 @@ class LXMPeer: self.peering_timebase = 0 self.link_establishment_rate = 0 self.sync_transfer_rate = 0 - self.propagation_transfer_limit = None - self.propagation_sync_limit = None - self.currently_transferring_messages = None - self.handled_messages_queue = deque() - self.unhandled_messages_queue = deque() + + self.propagation_transfer_limit = None + self.propagation_sync_limit = None + self.propagation_stamp_cost = None + self.propagation_stamp_cost_flexibility = None + self.currently_transferring_messages = None + self.handled_messages_queue = deque() + self.unhandled_messages_queue = deque() self.offered = 0 # Messages offered to this peer self.outgoing = 0 # Messages transferred to this peer diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 30abaa6..5c02cf0 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -44,6 +44,9 @@ class LXMRouter: ROTATION_HEADROOM_PCT = 10 ROTATION_AR_MAX = 0.5 + PROPAGATION_COST = 12 + PROPAGATION_COST_MIN = 10 + PROPAGATION_COST_FLEX = 3 PROPAGATION_LIMIT = 256 SYNC_LIMIT = PROPAGATION_LIMIT*40 DELIVERY_LIMIT = 1000 @@ -76,7 +79,8 @@ class LXMRouter: def __init__(self, identity=None, storagepath=None, autopeer=AUTOPEER, autopeer_maxdepth=None, propagation_limit=PROPAGATION_LIMIT, delivery_limit=DELIVERY_LIMIT, sync_limit=SYNC_LIMIT, enforce_ratchets=False, enforce_stamps=False, static_peers = [], max_peers=None, - from_static_only=False, sync_strategy=LXMPeer.STRATEGY_PERSISTENT): + from_static_only=False, sync_strategy=LXMPeer.STRATEGY_PERSISTENT, + propagation_cost=PROPAGATION_COST, propagation_cost_flexibility=PROPAGATION_COST_FLEX): random.seed(os.urandom(10)) @@ -101,8 +105,7 @@ class LXMRouter: self.propagation_node = False self.propagation_node_start_time = None - if storagepath == None: - raise ValueError("LXMF cannot be initialised without a storage path") + if storagepath == None: raise ValueError("LXMF cannot be initialised without a storage path") else: self.storagepath = storagepath+"/lxmf" self.ratchetpath = self.storagepath+"/ratchets" @@ -117,6 +120,8 @@ class LXMRouter: self.propagation_per_transfer_limit = propagation_limit self.propagation_per_sync_limit = sync_limit self.delivery_per_transfer_limit = delivery_limit + self.propagation_stamp_cost = propagation_cost + self.propagation_stamp_cost_flexibility = propagation_cost_flexibility self.enforce_ratchets = enforce_ratchets self._enforce_stamps = enforce_stamps self.pending_deferred_stamps = {} @@ -153,34 +158,24 @@ class LXMRouter: self.unpeered_propagation_incoming = 0 self.unpeered_propagation_rx_bytes = 0 - if autopeer != None: - self.autopeer = autopeer - else: - self.autopeer = LXMRouter.AUTOPEER + if autopeer != None: self.autopeer = autopeer + else: self.autopeer = LXMRouter.AUTOPEER - if autopeer_maxdepth != None: - self.autopeer_maxdepth = autopeer_maxdepth - else: - self.autopeer_maxdepth = LXMRouter.AUTOPEER_MAXDEPTH + if autopeer_maxdepth != None: self.autopeer_maxdepth = autopeer_maxdepth + else: self.autopeer_maxdepth = LXMRouter.AUTOPEER_MAXDEPTH - if max_peers == None: - self.max_peers = LXMRouter.MAX_PEERS + if max_peers == None: self.max_peers = LXMRouter.MAX_PEERS else: - if type(max_peers) == int and max_peers >= 0: - self.max_peers = max_peers - else: - raise ValueError(f"Invalid value for max_peers: {max_peers}") + if type(max_peers) == int and max_peers >= 0: self.max_peers = max_peers + else: raise ValueError(f"Invalid value for max_peers: {max_peers}") self.from_static_only = from_static_only - if type(static_peers) != list: - raise ValueError(f"Invalid type supplied for static peer list: {type(static_peers)}") + if type(static_peers) != list: raise ValueError(f"Invalid type supplied for static peer list: {type(static_peers)}") else: for static_peer in static_peers: - if type(static_peer) != bytes: - raise ValueError(f"Invalid static peer destination hash: {static_peer}") + if type(static_peer) != bytes: raise ValueError(f"Invalid static peer destination hash: {static_peer}") else: - if len(static_peer) != RNS.Reticulum.TRUNCATED_HASHLENGTH//8: - raise ValueError(f"Invalid static peer destination hash: {static_peer}") + if len(static_peer) != RNS.Reticulum.TRUNCATED_HASHLENGTH//8: raise ValueError(f"Invalid static peer destination hash: {static_peer}") self.static_peers = static_peers @@ -288,11 +283,12 @@ class LXMRouter: def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) node_state = self.propagation_node and not self.from_static_only + stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility] announce_data = [ node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes - None, # How many more inbound peers this node wants + stamp_cost, # Propagation stamp cost for this node self.propagation_per_sync_limit, # Limit for incoming propagation node syncs ] @@ -716,6 +712,8 @@ class LXMRouter: "str": int(peer.sync_transfer_rate), "transfer_limit": peer.propagation_transfer_limit, "sync_limit": peer.propagation_sync_limit, + "target_stamp_cost": peer.propagation_stamp_cost, + "stamp_cost_flexibility": peer.propagation_stamp_cost_flexibility, "network_distance": RNS.Transport.hops_to(peer_id), "rx_bytes": peer.rx_bytes, "tx_bytes": peer.tx_bytes, @@ -734,6 +732,8 @@ class LXMRouter: "delivery_limit": self.delivery_per_transfer_limit, "propagation_limit": self.propagation_per_transfer_limit, "sync_limit": self.propagation_per_sync_limit, + "target_stamp_cost": self.propagation_stamp_cost, + "stamp_cost_flexibility": self.propagation_stamp_cost_flexibility, "autopeer_maxdepth": self.autopeer_maxdepth, "from_static_only": self.from_static_only, "messagestore": { @@ -757,12 +757,9 @@ class LXMRouter: return node_stats def stats_get_request(self, path, data, request_id, remote_identity, requested_at): - if remote_identity == None: - return LXMPeer.ERROR_NO_IDENTITY - elif remote_identity.hash != self.identity.hash: - return LXMPeer.ERROR_NO_ACCESS - else: - return self.compile_stats() + if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY + elif remote_identity.hash != self.identity.hash: return LXMPeer.ERROR_NO_ACCESS + else: return self.compile_stats() ### Utility & Maintenance ############################# diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index e49bd7a..03d1282 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -150,6 +150,20 @@ def apply_config(): else: active_configuration["propagation_sync_max_accepted_size"] = 256*40 + if "propagation" in lxmd_config and "propagation_stamp_cost_target" in lxmd_config["propagation"]: + active_configuration["propagation_stamp_cost_target"] = lxmd_config["propagation"].as_int("propagation_stamp_cost_target") + if active_configuration["propagation_stamp_cost_target"] < LXMF.LXMRouter.PROPAGATION_COST_MIN: + active_configuration["propagation_stamp_cost_target"] = LXMF.LXMRouter.PROPAGATION_COST_MIN + else: + active_configuration["propagation_stamp_cost_target"] = LXMF.LXMRouter.PROPAGATION_COST + + if "propagation" in lxmd_config and "propagation_stamp_cost_flexibility" in lxmd_config["propagation"]: + active_configuration["propagation_stamp_cost_flexibility"] = lxmd_config["propagation"].as_int("propagation_stamp_cost_flexibility") + if active_configuration["propagation_stamp_cost_flexibility"] < 0: + active_configuration["propagation_stamp_cost_flexibility"] = 0 + else: + active_configuration["propagation_stamp_cost_flexibility"] = LXMF.LXMRouter.PROPAGATION_COST_FLEX + if "propagation" in lxmd_config and "prioritise_destinations" in lxmd_config["propagation"]: active_configuration["prioritised_lxmf_destinations"] = lxmd_config["propagation"].as_list("prioritise_destinations") else: @@ -337,6 +351,8 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo autopeer = active_configuration["autopeer"], autopeer_maxdepth = active_configuration["autopeer_maxdepth"], propagation_limit = active_configuration["propagation_transfer_max_accepted_size"], + propagation_cost = active_configuration["propagation_stamp_cost_target"], + propagation_cost_flexibility = active_configuration["propagation_stamp_cost_flexibility"], sync_limit = active_configuration["propagation_sync_max_accepted_size"], delivery_limit = active_configuration["delivery_transfer_max_accepted_size"], max_peers = active_configuration["max_peers"], @@ -557,13 +573,15 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"\nLXMF Propagation Node running on {dhs}, uptime is {uts}") if show_status: - msb = RNS.prettysize(s["messagestore"]["bytes"]); msl = RNS.prettysize(s["messagestore"]["limit"]) - ptl = RNS.prettysize(s["propagation_limit"]*1000); uprx = RNS.prettysize(s["unpeered_propagation_rx_bytes"]) + msb = RNS.prettysize(s["messagestore"]["bytes"]); msl = RNS.prettysize(s["messagestore"]["limit"]) + ptl = RNS.prettysize(s["propagation_limit"]*1000); uprx = RNS.prettysize(s["unpeered_propagation_rx_bytes"]) mscnt = s["messagestore"]["count"]; stp = s["total_peers"]; smp = s["max_peers"]; sdp = s["discovered_peers"] - ssp = s["static_peers"]; cprr = s["clients"]["client_propagation_messages_received"] - cprs = s["clients"]["client_propagation_messages_served"]; upi = s["unpeered_propagation_incoming"] + ssp = s["static_peers"]; cprr = s["clients"]["client_propagation_messages_received"] + cprs = s["clients"]["client_propagation_messages_served"]; upi = s["unpeered_propagation_incoming"] + psc = s["target_stamp_cost"]; scf = s["stamp_cost_flexibility"] print(f"Messagestore contains {mscnt} messages, {msb} ({ms_util} utilised of {msl})") print(f"Accepting propagated messages from {who_str}, {ptl} per-transfer limit") + print(f"Required propagation stamp cost is {psc}, flexibility is {scf}") print(f"") print(f"Peers : {stp} total (peer limit is {smp})") print(f" {sdp} discovered, {ssp} static") @@ -690,24 +708,6 @@ autopeer = yes autopeer_maxdepth = 4 -# The maximum accepted transfer size per in- -# coming propagation message, in kilobytes. -# This sets the upper limit for the size of -# single messages accepted onto this node. - -propagation_message_max_accepted_size = 256 - -# The maximum accepted transfer size per in- -# coming propagation node sync. -# -# If a node wants to propagate a larger number -# of messages to this node, than what can fit -# within this limit, it will prioritise sending -# the smallest messages first, and try again -# with any remaining messages at a later point. - -propagation_sync_max_accepted_size = 256 - # The maximum amount of storage to use for # the LXMF Propagation Node message store, # specified in megabytes. When this limit @@ -720,6 +720,38 @@ propagation_sync_max_accepted_size = 256 # message_storage_limit = 500 +# The maximum accepted transfer size per in- +# coming propagation message, in kilobytes. +# This sets the upper limit for the size of +# single messages accepted onto this node. + +# propagation_message_max_accepted_size = 256 + +# The maximum accepted transfer size per in- +# coming propagation node sync. +# +# If a node wants to propagate a larger number +# of messages to this node, than what can fit +# within this limit, it will prioritise sending +# the smallest messages first, and try again +# with any remaining messages at a later point. + +# propagation_sync_max_accepted_size = 10240 + +# You can configure the target stamp cost +# required to deliver messages via this node. + +# propagation_stamp_cost_target = 12 + +# If set higher than 0, the stamp cost flexi- +# bility option will make this node accept +# messages with a lower stamp cost than the +# target from other propagation nodes (but +# not from peers directly). This allows the +# network to gradually adjust stamp cost. + +# propagation_stamp_cost_flexibility = 3 + # You can tell the LXMF message router to # prioritise storage for one or more # destinations. If the message store reaches From 8f54d40abf150a4efe3c3480f0982aa6abd291e7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 14:09:54 +0100 Subject: [PATCH 065/123] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 8088f75..3e2f46a 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.8.1" +__version__ = "0.9.0" From 3f91e44a6d6ffec26e7c74e5c434b854e60b1cee Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 14:10:14 +0100 Subject: [PATCH 066/123] Updated RNS version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 568ade3..16d8d3c 100644 --- a/setup.py +++ b/setup.py @@ -26,6 +26,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=["rns>=1.0.0"], + install_requires=["rns>=1.0.1"], python_requires=">=3.7", ) From 0f2d3b06c2bb02d0a096c896b21f6f52100b3eb8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 15:18:09 +0100 Subject: [PATCH 067/123] Also receive PN path response announces, but only update peering data on live announces --- LXMF/Handlers.py | 85 ++++++++++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 42 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index e1dba8e..793ba16 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -35,53 +35,54 @@ class LXMFDeliveryAnnounceHandler: class LXMFPropagationAnnounceHandler: def __init__(self, lxmrouter): self.aspect_filter = APP_NAME+".propagation" - self.receive_path_responses = False + self.receive_path_responses = True self.lxmrouter = lxmrouter - def received_announce(self, destination_hash, announced_identity, app_data): - try: - if type(app_data) == bytes: - if self.lxmrouter.propagation_node: - data = msgpack.unpackb(app_data) - if pn_announce_data_is_valid(data): - node_timebase = data[1] - propagation_transfer_limit = None - propagation_sync_limit = None - wanted_inbound_peers = None - if len(data) >= 5: - try: propagation_sync_limit = int(data[4]) - except Exception as e: propagation_sync_limit = None + def received_announce(self, destination_hash, announced_identity, app_data, announce_packet_hash, is_path_response): + if not is_path_response: + try: + if type(app_data) == bytes: + if self.lxmrouter.propagation_node: + data = msgpack.unpackb(app_data) + if pn_announce_data_is_valid(data): + node_timebase = data[1] + propagation_transfer_limit = None + propagation_sync_limit = None + wanted_inbound_peers = None + if len(data) >= 5: + try: propagation_sync_limit = int(data[4]) + except Exception as e: propagation_sync_limit = None - if len(data) >= 4: - # TODO: Rethink, probably not necessary anymore - # try: wanted_inbound_peers = int(data[3]) - # except: wanted_inbound_peers = None - pass + if len(data) >= 4: + # TODO: Rethink, probably not necessary anymore + # try: wanted_inbound_peers = int(data[3]) + # except: wanted_inbound_peers = None + pass - if len(data) >= 3: - try: propagation_transfer_limit = float(data[2]) - except: propagation_transfer_limit = None + if len(data) >= 3: + try: propagation_transfer_limit = float(data[2]) + except: propagation_transfer_limit = None - if destination_hash in self.lxmrouter.static_peers: - self.lxmrouter.peer(destination_hash=destination_hash, - timestamp=node_timebase, - propagation_transfer_limit=propagation_transfer_limit, - propagation_sync_limit=propagation_sync_limit, - wanted_inbound_peers=wanted_inbound_peers) + if destination_hash in self.lxmrouter.static_peers: + self.lxmrouter.peer(destination_hash=destination_hash, + timestamp=node_timebase, + propagation_transfer_limit=propagation_transfer_limit, + propagation_sync_limit=propagation_sync_limit, + wanted_inbound_peers=wanted_inbound_peers) - else: - if self.lxmrouter.autopeer: - if data[0] == True: - if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash=destination_hash, - timestamp=node_timebase, - propagation_transfer_limit=propagation_transfer_limit, - propagation_sync_limit=propagation_sync_limit, - wanted_inbound_peers=wanted_inbound_peers) + else: + if self.lxmrouter.autopeer: + if data[0] == True: + if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: + self.lxmrouter.peer(destination_hash=destination_hash, + timestamp=node_timebase, + propagation_transfer_limit=propagation_transfer_limit, + propagation_sync_limit=propagation_sync_limit, + wanted_inbound_peers=wanted_inbound_peers) - elif data[0] == False: - self.lxmrouter.unpeer(destination_hash, node_timebase) + elif data[0] == False: + self.lxmrouter.unpeer(destination_hash, node_timebase) - except Exception as e: - RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) - RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG) + except Exception as e: + RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) + RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG) From bd6fe9f9d1e2095fd77eedad1cc406418198dc1e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 15:39:00 +0100 Subject: [PATCH 068/123] Handle propagation node stamp cost peering signalling --- LXMF/Handlers.py | 33 +++++++++++++-------------------- LXMF/LXMF.py | 11 ++++++++++- LXMF/LXMRouter.py | 8 ++++++-- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 793ba16..41b3200 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,42 +45,35 @@ class LXMFPropagationAnnounceHandler: if self.lxmrouter.propagation_node: data = msgpack.unpackb(app_data) if pn_announce_data_is_valid(data): - node_timebase = data[1] - propagation_transfer_limit = None - propagation_sync_limit = None - wanted_inbound_peers = None - if len(data) >= 5: - try: propagation_sync_limit = int(data[4]) - except Exception as e: propagation_sync_limit = None - - if len(data) >= 4: - # TODO: Rethink, probably not necessary anymore - # try: wanted_inbound_peers = int(data[3]) - # except: wanted_inbound_peers = None - pass - - if len(data) >= 3: - try: propagation_transfer_limit = float(data[2]) - except: propagation_transfer_limit = None - + pn_active = data[0] + node_timebase = int(data[1]) + propagation_transfer_limit = int(data[2]) + propagation_sync_limit = int(data[3]) + propagation_stamp_cost = int(data[4][0]) + propagation_stamp_cost_flexibility = int(data[4][1]) + if destination_hash in self.lxmrouter.static_peers: self.lxmrouter.peer(destination_hash=destination_hash, timestamp=node_timebase, propagation_transfer_limit=propagation_transfer_limit, propagation_sync_limit=propagation_sync_limit, + propagation_stamp_cost=propagation_stamp_cost, + propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, wanted_inbound_peers=wanted_inbound_peers) else: if self.lxmrouter.autopeer: - if data[0] == True: + if pn_active == True: if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: self.lxmrouter.peer(destination_hash=destination_hash, timestamp=node_timebase, propagation_transfer_limit=propagation_transfer_limit, propagation_sync_limit=propagation_sync_limit, + propagation_stamp_cost=propagation_stamp_cost, + propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, wanted_inbound_peers=wanted_inbound_peers) - elif data[0] == False: + elif pn_active == False: self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 93e283f..8d1aaf7 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -138,11 +138,20 @@ def stamp_cost_from_app_data(app_data=None): def pn_announce_data_is_valid(data): try: if type(data) == bytes: data = msgpack.unpackb(data) - if len(data) < 3: raise ValueError("Invalid announce data: Insufficient peer data") + if len(data) < 5: raise ValueError("Invalid announce data: Insufficient peer data") else: if data[0] != True and data[0] != False: raise ValueError("Invalid announce data: Indeterminate propagation node status") try: int(data[1]) except: raise ValueError("Invalid announce data: Could not decode peer timebase") + try: int(data[2]) + except: raise ValueError("Invalid announce data: Could not decode peer propagation transfer limit") + try: int(data[3]) + except: raise ValueError("Invalid announce data: Could not decode peer propagation sync limit") + if type(data[4]) != list: raise ValueError("Invalid announce data: Could not decode peer stamp costs") + try: int(data[4][0]) + except: raise ValueError("Invalid announce data: Could not decode peer target stamp cost") + try: int(data[4][1]) + except: raise ValueError("Invalid announce data: Could not decode peer stamp cost flexibility") except Exception as e: RNS.log(f"Could not validate propagation node announce data: {e}", RNS.LOG_DEBUG) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 5c02cf0..eef9fee 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -288,8 +288,8 @@ class LXMRouter: node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes - stamp_cost, # Propagation stamp cost for this node self.propagation_per_sync_limit, # Limit for incoming propagation node syncs + stamp_cost, # Propagation stamp cost for this node ] data = msgpack.packb(announce_data) @@ -1783,7 +1783,7 @@ class LXMRouter: ### Peer Sync & Propagation ########################### ####################################################### - def peer(self, destination_hash, timestamp, propagation_transfer_limit, propagation_sync_limit, wanted_inbound_peers = None): + def peer(self, destination_hash, timestamp, propagation_transfer_limit, propagation_sync_limit, propagation_stamp_cost, propagation_stamp_cost_flexibility): if destination_hash in self.peers: peer = self.peers[destination_hash] if timestamp > peer.peering_timebase: @@ -1792,6 +1792,8 @@ class LXMRouter: peer.next_sync_attempt = 0 peer.peering_timebase = timestamp peer.last_heard = time.time() + peer.propagation_stamp_cost = propagation_stamp_cost + peer.propagation_stamp_cost_flexibility = propagation_stamp_cost_flexibility peer.propagation_transfer_limit = propagation_transfer_limit if propagation_sync_limit != None: peer.propagation_sync_limit = propagation_sync_limit else: peer.propagation_sync_limit = propagation_transfer_limit @@ -1803,6 +1805,8 @@ class LXMRouter: peer = LXMPeer(self, destination_hash, sync_strategy=self.default_sync_strategy) peer.alive = True peer.last_heard = time.time() + peer.propagation_stamp_cost = propagation_stamp_cost + peer.propagation_stamp_cost_flexibility = propagation_stamp_cost_flexibility peer.propagation_transfer_limit = propagation_transfer_limit if propagation_sync_limit != None: peer.propagation_sync_limit = propagation_sync_limit else: peer.propagation_sync_limit = propagation_transfer_limit From aca5bf9c14d9b0aac3156ba4138abea9790d6347 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 16:06:53 +0100 Subject: [PATCH 069/123] Cleanup --- LXMF/Handlers.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 41b3200..b5f2659 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -58,8 +58,7 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit=propagation_transfer_limit, propagation_sync_limit=propagation_sync_limit, propagation_stamp_cost=propagation_stamp_cost, - propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, - wanted_inbound_peers=wanted_inbound_peers) + propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility) else: if self.lxmrouter.autopeer: @@ -70,8 +69,7 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit=propagation_transfer_limit, propagation_sync_limit=propagation_sync_limit, propagation_stamp_cost=propagation_stamp_cost, - propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, - wanted_inbound_peers=wanted_inbound_peers) + propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility) elif pn_active == False: self.lxmrouter.unpeer(destination_hash, node_timebase) From d8b25e092f0ba8693964fa75a6a500ba220b3257 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 16:43:26 +0100 Subject: [PATCH 070/123] Added metadate structure to propagation node announces --- LXMF/Handlers.py | 23 +++++++++++++---------- LXMF/LXMF.py | 26 +++++++++++++++++++------- LXMF/LXMRouter.py | 6 ++++-- LXMF/LXStamper.py | 2 +- 4 files changed, 37 insertions(+), 20 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index b5f2659..40c416d 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,12 +45,13 @@ class LXMFPropagationAnnounceHandler: if self.lxmrouter.propagation_node: data = msgpack.unpackb(app_data) if pn_announce_data_is_valid(data): - pn_active = data[0] - node_timebase = int(data[1]) - propagation_transfer_limit = int(data[2]) - propagation_sync_limit = int(data[3]) - propagation_stamp_cost = int(data[4][0]) - propagation_stamp_cost_flexibility = int(data[4][1]) + metadata = data[0] + propagation_enabled = data[1] + node_timebase = int(data[2]) + propagation_transfer_limit = int(data[3]) + propagation_sync_limit = int(data[4]) + propagation_stamp_cost = int(data[5][0]) + propagation_stamp_cost_flexibility = int(data[5][1]) if destination_hash in self.lxmrouter.static_peers: self.lxmrouter.peer(destination_hash=destination_hash, @@ -58,20 +59,22 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit=propagation_transfer_limit, propagation_sync_limit=propagation_sync_limit, propagation_stamp_cost=propagation_stamp_cost, - propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility) + propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, + metadata=metadata) else: if self.lxmrouter.autopeer: - if pn_active == True: + if propagation_enabled == True: if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: self.lxmrouter.peer(destination_hash=destination_hash, timestamp=node_timebase, propagation_transfer_limit=propagation_transfer_limit, propagation_sync_limit=propagation_sync_limit, propagation_stamp_cost=propagation_stamp_cost, - propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility) + propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, + metadata=metadata) - elif pn_active == False: + elif propagation_enabled == False: self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 8d1aaf7..8342d33 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -91,6 +91,17 @@ RENDERER_MICRON = 0x01 RENDERER_MARKDOWN = 0x02 RENDERER_BBCODE = 0x03 +# Optional propagation node metadata fields. These +# fields may be highly unstable in allocation and +# availability until the version 1.0.0 release, so use +# at your own risk until then, and expect changes! +PN_META_VERSION = 0x00 +PN_META_NAME = 0x01 +PN_META_SYNC_STRATUM = 0x02 +PN_META_SYNC_THROTTLE = 0x03 +PN_META_AUTH_BAND = 0x04 +PN_META_UTIL_PRESSURE = 0x05 + ########################################################## # The following helper functions makes it easier to # # handle and operate on LXMF data in client programs # @@ -138,19 +149,20 @@ def stamp_cost_from_app_data(app_data=None): def pn_announce_data_is_valid(data): try: if type(data) == bytes: data = msgpack.unpackb(data) - if len(data) < 5: raise ValueError("Invalid announce data: Insufficient peer data") + if len(data) < 6: raise ValueError("Invalid announce data: Insufficient peer data") else: - if data[0] != True and data[0] != False: raise ValueError("Invalid announce data: Indeterminate propagation node status") - try: int(data[1]) - except: raise ValueError("Invalid announce data: Could not decode peer timebase") + if type(data[0]) != dict: raise ValueError("Invalid announce data: Could not decode peer metadata") + if data[1] != True and data[1] != False: raise ValueError("Invalid announce data: Indeterminate propagation node status") try: int(data[2]) - except: raise ValueError("Invalid announce data: Could not decode peer propagation transfer limit") + except: raise ValueError("Invalid announce data: Could not decode peer timebase") try: int(data[3]) + except: raise ValueError("Invalid announce data: Could not decode peer propagation transfer limit") + try: int(data[4]) except: raise ValueError("Invalid announce data: Could not decode peer propagation sync limit") if type(data[4]) != list: raise ValueError("Invalid announce data: Could not decode peer stamp costs") - try: int(data[4][0]) + try: int(data[5][0]) except: raise ValueError("Invalid announce data: Could not decode peer target stamp cost") - try: int(data[4][1]) + try: int(data[5][1]) except: raise ValueError("Invalid announce data: Could not decode peer stamp cost flexibility") except Exception as e: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index eef9fee..4f31920 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -282,9 +282,11 @@ class LXMRouter: def announce_propagation_node(self): def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) - node_state = self.propagation_node and not self.from_static_only - stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility] + node_state = self.propagation_node and not self.from_static_only + stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility] + metadata = {} announce_data = [ + metadata, # Node metadata node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index a9ca7d6..2db0598 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -76,7 +76,7 @@ def validate_pn_stamps_job_multip(transient_stamps): cores = multiprocessing.cpu_count() pool_count = min(cores, math.ceil(len(transient_stamps) / PN_VALIDATION_POOL_MIN_SIZE)) - RNS.log(f"Validating {len(transient_stamps)} stamps using {pool_count} processes...") + RNS.log(f"Validating {len(transient_stamps)} stamps using {pool_count} processes...", RNS.LOG_VERBOSE) with multiprocessing.Pool(pool_count) as p: validated_entries = p.map(_validate_single_pn_stamp_entry, transient_stamps) return validated_entries From c28d3b1432bb2783ca31207fcb9be72345f5998c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 16:44:15 +0100 Subject: [PATCH 071/123] Added metadate structure to propagation node announces --- LXMF/LXMF.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 8342d33..a57e31a 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -101,6 +101,7 @@ PN_META_SYNC_STRATUM = 0x02 PN_META_SYNC_THROTTLE = 0x03 PN_META_AUTH_BAND = 0x04 PN_META_UTIL_PRESSURE = 0x05 +PN_META_CUSTOM = 0xFF ########################################################## # The following helper functions makes it easier to # From 81a6d503a3d897550d0ae78d05e11a87e71f99d7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 16:55:44 +0100 Subject: [PATCH 072/123] Automatic legacy PN de-peering --- LXMF/Handlers.py | 6 +++--- LXMF/LXMF.py | 8 ++++---- LXMF/LXMRouter.py | 15 +++++++-------- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 40c416d..5daedd1 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,13 +45,13 @@ class LXMFPropagationAnnounceHandler: if self.lxmrouter.propagation_node: data = msgpack.unpackb(app_data) if pn_announce_data_is_valid(data): - metadata = data[0] - propagation_enabled = data[1] - node_timebase = int(data[2]) + node_timebase = int(data[1]) + propagation_enabled = data[2] propagation_transfer_limit = int(data[3]) propagation_sync_limit = int(data[4]) propagation_stamp_cost = int(data[5][0]) propagation_stamp_cost_flexibility = int(data[5][1]) + metadata = data[6] if destination_hash in self.lxmrouter.static_peers: self.lxmrouter.peer(destination_hash=destination_hash, diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index a57e31a..6369d69 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -150,12 +150,11 @@ def stamp_cost_from_app_data(app_data=None): def pn_announce_data_is_valid(data): try: if type(data) == bytes: data = msgpack.unpackb(data) - if len(data) < 6: raise ValueError("Invalid announce data: Insufficient peer data") + if len(data) < 7: raise ValueError("Invalid announce data: Insufficient peer data") else: - if type(data[0]) != dict: raise ValueError("Invalid announce data: Could not decode peer metadata") - if data[1] != True and data[1] != False: raise ValueError("Invalid announce data: Indeterminate propagation node status") - try: int(data[2]) + try: int(data[1]) except: raise ValueError("Invalid announce data: Could not decode peer timebase") + if data[2] != True and data[2] != False: raise ValueError("Invalid announce data: Indeterminate propagation node status") try: int(data[3]) except: raise ValueError("Invalid announce data: Could not decode peer propagation transfer limit") try: int(data[4]) @@ -165,6 +164,7 @@ def pn_announce_data_is_valid(data): except: raise ValueError("Invalid announce data: Could not decode peer target stamp cost") try: int(data[5][1]) except: raise ValueError("Invalid announce data: Could not decode peer stamp cost flexibility") + if type(data[6]) != dict: raise ValueError("Invalid announce data: Could not decode peer metadata") except Exception as e: RNS.log(f"Could not validate propagation node announce data: {e}", RNS.LOG_DEBUG) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 4f31920..816d949 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -285,14 +285,13 @@ class LXMRouter: node_state = self.propagation_node and not self.from_static_only stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility] metadata = {} - announce_data = [ - metadata, # Node metadata - node_state, # Boolean flag signalling propagation node state - int(time.time()), # Current node timebase - self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes - self.propagation_per_sync_limit, # Limit for incoming propagation node syncs - stamp_cost, # Propagation stamp cost for this node - ] + announce_data = [ False, # Legacy LXMF PN support + int(time.time()), # Current node timebase + node_state, # Boolean flag signalling propagation node state + self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes + self.propagation_per_sync_limit, # Limit for incoming propagation node syncs + stamp_cost, # Propagation stamp cost for this node + metadata ] # Node metadata data = msgpack.packb(announce_data) self.propagation_destination.announce(app_data=data) From 6446db4f1111a93d779879669d99a568efbf014c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 17:03:05 +0100 Subject: [PATCH 073/123] Break peering when peer moves outside auto-peering range --- LXMF/Handlers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 5daedd1..c51231c 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -74,6 +74,11 @@ class LXMFPropagationAnnounceHandler: propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, metadata=metadata) + else: + if destination_hash in self.lxmrouter.peers: + RNS.log(f"Peer {self.lxmrouter.peers[destination_hash]} moved outside auto-peering range, breaking peering...") + self.lxmrouter.unpeer(destination_hash, node_timebase) + elif propagation_enabled == False: self.lxmrouter.unpeer(destination_hash, node_timebase) From a62ffa12b1c3af3c3b69b999fc9b3f6ed25b4768 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 19:45:40 +0100 Subject: [PATCH 074/123] Cleanup --- LXMF/Handlers.py | 14 ++++---- LXMF/LXMPeer.py | 94 ++++++++++++++++++++---------------------------- 2 files changed, 45 insertions(+), 63 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index c51231c..fc980c3 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -8,13 +8,13 @@ from .LXMessage import LXMessage class LXMFDeliveryAnnounceHandler: def __init__(self, lxmrouter): - self.aspect_filter = APP_NAME+".delivery" + self.aspect_filter = APP_NAME+".delivery" self.receive_path_responses = True - self.lxmrouter = lxmrouter + self.lxmrouter = lxmrouter def received_announce(self, destination_hash, announced_identity, app_data): for lxmessage in self.lxmrouter.pending_outbound: - if destination_hash == lxmessage.destination_hash: + if destination_hash == lxmessage.destination_hash: if lxmessage.method == LXMessage.DIRECT or lxmessage.method == LXMessage.OPPORTUNISTIC: lxmessage.next_delivery_attempt = time.time() @@ -34,9 +34,9 @@ class LXMFDeliveryAnnounceHandler: class LXMFPropagationAnnounceHandler: def __init__(self, lxmrouter): - self.aspect_filter = APP_NAME+".propagation" + self.aspect_filter = APP_NAME+".propagation" self.receive_path_responses = True - self.lxmrouter = lxmrouter + self.lxmrouter = lxmrouter def received_announce(self, destination_hash, announced_identity, app_data, announce_packet_hash, is_path_response): if not is_path_response: @@ -51,7 +51,7 @@ class LXMFPropagationAnnounceHandler: propagation_sync_limit = int(data[4]) propagation_stamp_cost = int(data[5][0]) propagation_stamp_cost_flexibility = int(data[5][1]) - metadata = data[6] + metadata = data[6] if destination_hash in self.lxmrouter.static_peers: self.lxmrouter.peer(destination_hash=destination_hash, @@ -84,4 +84,4 @@ class LXMFPropagationAnnounceHandler: except Exception as e: RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) - RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG) + RNS.log(f"The contained exception was: {str(e)}", RNS.LOG_DEBUG) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 9f2519d..e86ae84 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -174,11 +174,11 @@ class LXMPeer: self.handled_messages_queue = deque() self.unhandled_messages_queue = deque() - self.offered = 0 # Messages offered to this peer - self.outgoing = 0 # Messages transferred to this peer - self.incoming = 0 # Messages received from this peer - self.rx_bytes = 0 # Bytes received from this peer - self.tx_bytes = 0 # Bytes sent to this peer + self.offered = 0 # Messages offered to this peer + self.outgoing = 0 # Messages transferred to this peer + self.incoming = 0 # Messages received from this peer + self.rx_bytes = 0 # Bytes received from this peer + self.tx_bytes = 0 # Bytes sent to this peer self._hm_count = 0 self._um_count = 0 @@ -243,29 +243,29 @@ class LXMPeer: purged_ids = [] for transient_id in self.unhandled_messages: if transient_id in self.router.propagation_entries: - unhandled_entry = [ - transient_id, - self.router.get_weight(transient_id), - self.router.get_size(transient_id), - ] + unhandled_entry = [ transient_id, + self.router.get_weight(transient_id), + self.router.get_size(transient_id) ] + unhandled_entries.append(unhandled_entry) - else: - purged_ids.append(transient_id) + + else: purged_ids.append(transient_id) for transient_id in purged_ids: RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) self.remove_unhandled_message(transient_id) unhandled_entries.sort(key=lambda e: e[1], reverse=False) - per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now - cumulative_size = 24 # Initialised to highest reasonable binary structure overhead + per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now + cumulative_size = 24 # Initialised to highest reasonable binary structure overhead RNS.log(f"Syncing to peer with per-message limit {RNS.prettysize(self.propagation_transfer_limit*1000)} and sync limit {RNS.prettysize(self.propagation_sync_limit*1000)}") # TODO: Remove debug + for unhandled_entry in unhandled_entries: - transient_id = unhandled_entry[0] - weight = unhandled_entry[1] - lxm_size = unhandled_entry[2] + transient_id = unhandled_entry[0] + weight = unhandled_entry[1] + lxm_size = unhandled_entry[2] lxm_transfer_size = lxm_size+per_message_overhead - next_size = cumulative_size + lxm_transfer_size + next_size = cumulative_size + lxm_transfer_size if self.propagation_transfer_limit != None and lxm_transfer_size > (self.propagation_transfer_limit*1000): self.remove_unhandled_message(transient_id) @@ -288,14 +288,11 @@ class LXMPeer: else: RNS.log("Postponing sync with peer "+RNS.prettyhexrep(self.destination_hash)+" for "+RNS.prettytime(self.next_sync_attempt-time.time())+" due to previous failures", RNS.LOG_DEBUG) - if self.last_sync_attempt > self.last_heard: - self.alive = False + if self.last_sync_attempt > self.last_heard: self.alive = False def request_failed(self, request_receipt): - RNS.log("Sync request to peer "+str(self.destination)+" failed", RNS.LOG_DEBUG) - if self.link != None: - self.link.teardown() - + RNS.log(f"Sync request to peer {self.destination} failed", RNS.LOG_DEBUG) + if self.link != None: self.link.teardown() self.state = LXMPeer.IDLE def offer_response(self, request_receipt): @@ -389,18 +386,16 @@ class LXMPeer: if self.currently_transferring_messages == None: RNS.log(f"Sync transfer completed on {self}, but transferred message index was unavailable. Aborting.", RNS.LOG_ERROR) if self.link != None: self.link.teardown() - self.link = None + self.link = None self.state = LXMPeer.IDLE for transient_id in self.currently_transferring_messages: self.add_handled_message(transient_id) self.remove_unhandled_message(transient_id) - if self.link != None: - self.link.teardown() - - self.link = None - self.state = LXMPeer.IDLE + if self.link != None: self.link.teardown() + self.link = None + self.state = LXMPeer.IDLE rate_str = "" if self.current_sync_transfer_started != None: @@ -408,14 +403,14 @@ class LXMPeer: rate_str = f" at {RNS.prettyspeed(self.sync_transfer_rate)}" RNS.log(f"Syncing {len(self.currently_transferring_messages)} messages to peer {RNS.prettyhexrep(self.destination_hash)} completed{rate_str}", RNS.LOG_VERBOSE) - self.alive = True + self.alive = True self.last_heard = time.time() self.offered += len(self.last_offer) self.outgoing += len(self.currently_transferring_messages) self.tx_bytes += resource.get_data_size() self.currently_transferring_messages = None - self.current_sync_transfer_started = None + self.current_sync_transfer_started = None if self.sync_strategy == self.STRATEGY_PERSISTENT: if self.unhandled_message_count > 0: self.sync() @@ -423,10 +418,10 @@ class LXMPeer: else: RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_VERBOSE) if self.link != None: self.link.teardown() - self.link = None - self.state = LXMPeer.IDLE + self.link = None + self.state = LXMPeer.IDLE self.currently_transferring_messages = None - self.current_sync_transfer_started = None + self.current_sync_transfer_started = None def link_established(self, link): self.link.identify(self.router.identity) @@ -439,7 +434,7 @@ class LXMPeer: self.sync() def link_closed(self, link): - self.link = None + self.link = None self.state = LXMPeer.IDLE def queued_items(self): @@ -452,19 +447,14 @@ class LXMPeer: self.handled_messages_queue.append(transient_id) def process_queues(self): - if len(self.unhandled_messages_queue) > 0 or len(self.handled_messages_queue) > 0: - # TODO: Remove debug - # st = time.time(); lu = len(self.unhandled_messages_queue); lh = len(self.handled_messages_queue) - + if len(self.unhandled_messages_queue) > 0 or len(self.handled_messages_queue) > 0: handled_messages = self.handled_messages unhandled_messages = self.unhandled_messages while len(self.handled_messages_queue) > 0: transient_id = self.handled_messages_queue.pop() - if not transient_id in handled_messages: - self.add_handled_message(transient_id) - if transient_id in unhandled_messages: - self.remove_unhandled_message(transient_id) + if not transient_id in handled_messages: self.add_handled_message(transient_id) + if transient_id in unhandled_messages: self.remove_unhandled_message(transient_id) while len(self.unhandled_messages_queue) > 0: transient_id = self.unhandled_messages_queue.pop() @@ -472,8 +462,6 @@ class LXMPeer: self.add_unhandled_message(transient_id) del handled_messages, unhandled_messages - # TODO: Remove debug - # RNS.log(f"{self} processed {lh}/{lu} in {RNS.prettytime(time.time()-st)}") @property def handled_messages(self): @@ -493,16 +481,12 @@ class LXMPeer: @property def handled_message_count(self): - if not self._hm_counts_synced: - self._update_counts() - + if not self._hm_counts_synced: self._update_counts() return self._hm_count @property def unhandled_message_count(self): - if not self._um_counts_synced: - self._update_counts() - + if not self._um_counts_synced: self._update_counts() return self._um_count @property @@ -541,7 +525,5 @@ class LXMPeer: self._um_counts_synced = False def __str__(self): - if self.destination_hash: - return RNS.prettyhexrep(self.destination_hash) - else: - return "" \ No newline at end of file + if self.destination_hash: return RNS.prettyhexrep(self.destination_hash) + else: return "" \ No newline at end of file From c84aea745a682e1f834ee7a6f6a5d8023792fa6d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 21:19:38 +0100 Subject: [PATCH 075/123] Improved auto-peering on inbound PN sync. Added support for persisting and loading transient message stamp status. Implemented getting transient message stamp value. --- LXMF/LXMF.py | 3 +- LXMF/LXMPeer.py | 2 +- LXMF/LXMRouter.py | 143 ++++++++++++++++++++++++++-------------------- LXMF/LXStamper.py | 10 ++-- 4 files changed, 89 insertions(+), 69 deletions(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 6369d69..b608ceb 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -149,7 +149,8 @@ def stamp_cost_from_app_data(app_data=None): def pn_announce_data_is_valid(data): try: - if type(data) == bytes: data = msgpack.unpackb(data) + if type(data) != bytes: return False + else: data = msgpack.unpackb(data) if len(data) < 7: raise ValueError("Invalid announce data: Insufficient peer data") else: try: int(data[1]) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index e86ae84..e767313 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -259,7 +259,7 @@ class LXMPeer: per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now cumulative_size = 24 # Initialised to highest reasonable binary structure overhead RNS.log(f"Syncing to peer with per-message limit {RNS.prettysize(self.propagation_transfer_limit*1000)} and sync limit {RNS.prettysize(self.propagation_sync_limit*1000)}") # TODO: Remove debug - + for unhandled_entry in unhandled_entries: transient_id = unhandled_entry[0] weight = unhandled_entry[1] diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 816d949..ace090a 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -15,6 +15,7 @@ import RNS.vendor.umsgpack as msgpack from .LXMF import APP_NAME from .LXMF import FIELD_TICKET +from .LXMF import pn_announce_data_is_valid from .LXMPeer import LXMPeer from .LXMessage import LXMessage @@ -285,13 +286,13 @@ class LXMRouter: node_state = self.propagation_node and not self.from_static_only stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility] metadata = {} - announce_data = [ False, # Legacy LXMF PN support - int(time.time()), # Current node timebase - node_state, # Boolean flag signalling propagation node state - self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes - self.propagation_per_sync_limit, # Limit for incoming propagation node syncs - stamp_cost, # Propagation stamp cost for this node - metadata ] # Node metadata + announce_data = [ False, # 0: Legacy LXMF PN support + int(time.time()), # 1: Current node timebase + node_state, # 2: Boolean flag signalling propagation node state + self.propagation_per_transfer_limit, # 3: Per-transfer limit for message propagation in kilobytes + self.propagation_per_sync_limit, # 4: Limit for incoming propagation node syncs + stamp_cost, # 5: Propagation stamp cost for this node + metadata ] # 6: Node metadata data = msgpack.packb(announce_data) self.propagation_destination.announce(app_data=data) @@ -486,19 +487,21 @@ class LXMRouter: st = time.time(); RNS.log("Indexing messagestore...", RNS.LOG_NOTICE) for filename in os.listdir(self.messagepath): components = filename.split("_") - if len(components) == 2: + if len(components) >= 2: if float(components[1]) > 0: if len(components[0]) == RNS.Identity.HASHLENGTH//8*2: try: - transient_id = bytes.fromhex(components[0]) - received = float(components[1]) - - filepath = self.messagepath+"/"+filename - msg_size = os.path.getsize(filepath) - file = open(filepath, "rb") - destination_hash = file.read(LXMessage.DESTINATION_LENGTH) + transient_id = bytes.fromhex(components[0]) + received = float(components[1]) + filepath = self.messagepath+"/"+filename + msg_size = os.path.getsize(filepath) + file = open(filepath, "rb") + destination_hash = file.read(LXMessage.DESTINATION_LENGTH) file.close() + if len(components) >= 3: stamp_value = int(components[2]) + else: stamp_value = None + self.propagation_entries[transient_id] = [ destination_hash, # 0: Destination hash filepath, # 1: Storage location @@ -506,6 +509,7 @@ class LXMRouter: msg_size, # 3: Message size [], # 4: Handled peers [], # 5: Unhandled peers + stamp_value, # 6: Stamp value ] except Exception as e: @@ -923,22 +927,26 @@ class LXMRouter: return msgpack.packb(peer_data) - def get_weight(self, transient_id): - dst_hash = self.propagation_entries[transient_id][0] - lxm_rcvd = self.propagation_entries[transient_id][2] + def get_size(self, transient_id): lxm_size = self.propagation_entries[transient_id][3] + return lxm_size - now = time.time() + def get_weight(self, transient_id): + dst_hash = self.propagation_entries[transient_id][0] + lxm_rcvd = self.propagation_entries[transient_id][2] + lxm_size = self.propagation_entries[transient_id][3] + + now = time.time() age_weight = max(1, (now - lxm_rcvd)/60/60/24/4) - if dst_hash in self.prioritised_list: - priority_weight = 0.1 - else: - priority_weight = 1.0 + if dst_hash in self.prioritised_list: priority_weight = 0.1 + else: priority_weight = 1.0 - weight = priority_weight * age_weight * lxm_size + return priority_weight * age_weight * lxm_size - return weight + def get_stamp_value(self, transient_id): + if not transient_id in self.propagation_entries: return None + else: return self.propagation_entries[transient_id][6] def generate_ticket(self, destination_hash, expiry=LXMessage.TICKET_EXPIRY): now = time.time() @@ -1003,10 +1011,6 @@ class LXMRouter: else: return available_tickets - def get_size(self, transient_id): - lxm_size = self.propagation_entries[transient_id][3] - return lxm_size - def clean_message_store(self): RNS.log("Cleaning message store", RNS.LOG_VERBOSE) # Check and remove expired messages @@ -1993,18 +1997,22 @@ class LXMRouter: def propagation_packet(self, data, packet): try: - if packet.destination_type != RNS.Destination.LINK: - pass + if packet.destination_type != RNS.Destination.LINK: return else: - data = msgpack.unpackb(data) + data = msgpack.unpackb(data) remote_timebase = data[0] + messages = data[1] + + ####################################### + # TODO: Check propagation stamps here # + ####################################### + stamps_valid = False - messages = data[1] for lxmf_data in messages: self.lxmf_propagation(lxmf_data) self.client_propagation_messages_received += 1 - packet.prove() + if stamps_valid: packet.prove() except Exception as e: RNS.log("Exception occurred while parsing incoming LXMF propagation data.", RNS.LOG_ERROR) @@ -2053,34 +2061,49 @@ class LXMRouter: if type(data) == list and len(data) == 2 and type(data[0] == float) and type(data[1]) == list: # This is a series of propagation messages from a peer or originator - remote_timebase = data[0] - remote_hash = None - remote_str = "unknown peer" remote_identity = resource.link.get_remote_identity() + remote_timebase = data[0] + messages = data[1] + remote_hash = None + remote_str = "unknown peer" if remote_identity != None: remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") - remote_hash = remote_destination.hash - remote_str = RNS.prettyhexrep(remote_hash) + remote_hash = remote_destination.hash + remote_app_data = RNS.Identity.recall_app_data(remote_hash) + remote_str = RNS.prettyhexrep(remote_hash) - if not remote_hash in self.peers: - if self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: - # TODO: Query cache for an announce and get propagation - # transfer limit from that. For now, initialise it to a - # sane default value, and wait for an announce to arrive - # that will update the peering config to the actual limit. - propagation_transfer_limit = LXMRouter.PROPAGATION_LIMIT//4 - wanted_inbound_peers = None - self.peer(remote_hash, remote_timebase, propagation_transfer_limit, wanted_inbound_peers) + if remote_hash in self.peers: remote_str = f"peer {remote_str}" else: - remote_str = f"peer {remote_str}" + if pn_announce_data_is_valid(remote_app_data): + # 1: Current node timebase + # 2: Boolean flag signalling propagation node state + # 3: Per-transfer limit for message propagation in kilobytes + # 4: Limit for incoming propagation node syncs + # 5: Propagation stamp cost for this node + # 6: Node metadata + if remote_app_data[2] and self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: + remote_timebase = remote_app_data[1] + remote_transfer_limit = remote_app_data[3] + remote_sync_limit = remote_app_data[4] + remote_stamp_cost = remote_app_data[5][0] + remote_stamp_flex = remote_app_data[5][1] + remote_metadata = remote_app_data[6] + + RNS.log(f"Auto-peering with {remote_str} discovered via incoming sync", RNS.LOG_DEBUG) # TODO: Remove debug + self.peer(remote_hash, remote_timebase, remote_transfer_limit, remote_sync_limit, remote_stamp_cost, remote_stamp_flex, remote_metadata) - messages = data[1] ms = "" if len(messages) == 1 else "s" RNS.log(f"Received {len(messages)} message{ms} from {remote_str}", RNS.LOG_VERBOSE) + + ####################################### + # TODO: Check propagation stamps here # + ####################################### + for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) + if remote_hash != None and remote_hash in self.peers: peer = self.peers[remote_hash] peer.incoming += 1 @@ -2093,8 +2116,7 @@ class LXMRouter: self.client_propagation_messages_received += 1 self.lxmf_propagation(lxmf_data, from_peer=peer) - if peer != None: - peer.queue_handled_message(transient_id) + if peer != None: peer.queue_handled_message(transient_id) else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) @@ -2121,10 +2143,9 @@ class LXMRouter: if peer != from_peer: peer.queue_unhandled_message(transient_id) - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False, is_paper_message=False, from_peer=None): - no_stamp_enforcement = False - if is_paper_message: - no_stamp_enforcement = True + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False, is_paper_message=False, from_peer=None, stamp_value=None): + if is_paper_message: no_stamp_enforcement = True + else: no_stamp_enforcement = False try: if len(lxmf_data) >= LXMessage.LXMF_OVERHEAD: @@ -2150,13 +2171,13 @@ class LXMRouter: else: if self.propagation_node: - file_path = self.messagepath+"/"+RNS.hexrep(transient_id, delimit=False)+"_"+str(received) - msg_file = open(file_path, "wb") - msg_file.write(lxmf_data) - msg_file.close() + value_component = f"_{stamp_value}" if stamp_value and stamp_value > 0 else "" + file_path = f"{self.messagepath}/{RNS.hexrep(transient_id, delimit=False)}_{received}{value_component}" + msg_file = open(file_path, "wb") + msg_file.write(lxmf_data); msg_file.close() RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_EXTREME) - self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] + self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], [], stamp_value] self.enqueue_peer_distribution(transient_id, from_peer) else: diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 2db0598..ecf75a3 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -17,12 +17,10 @@ def stamp_workblock(message_id, expand_rounds=WORKBLOCK_EXPAND_ROUNDS): wb_st = time.time() workblock = b"" for n in range(expand_rounds): - workblock += RNS.Cryptography.hkdf( - length=256, - derive_from=message_id, - salt=RNS.Identity.full_hash(message_id+msgpack.packb(n)), - context=None, - ) + workblock += RNS.Cryptography.hkdf(length=256, + derive_from=message_id, + salt=RNS.Identity.full_hash(message_id+msgpack.packb(n)), + context=None) wb_time = time.time() - wb_st # RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) From 60bf99d15108330ae443bb56417984b9c471a432 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Oct 2025 00:25:46 +0100 Subject: [PATCH 076/123] Cleanup --- LXMF/LXMRouter.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index ace090a..fd6130d 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -905,11 +905,6 @@ class LXMRouter: self.save_outbound_stamp_costs() threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() - def get_wanted_inbound_peers(self): - # TODO: Implement/rethink. - # Probably not necessary anymore. - return None - def get_announce_app_data(self, destination_hash): if destination_hash in self.delivery_destinations: delivery_destination = self.delivery_destinations[destination_hash] From ebc8bb33c25445f94305e400e674e3fab81a0104 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Oct 2025 00:28:06 +0100 Subject: [PATCH 077/123] Cleanup --- LXMF/LXMRouter.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index fd6130d..7e4627a 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -901,8 +901,7 @@ class LXMRouter: RNS.log(f"Updating outbound stamp cost for {RNS.prettyhexrep(destination_hash)} to {stamp_cost}", RNS.LOG_DEBUG) self.outbound_stamp_costs[destination_hash] = [time.time(), stamp_cost] - def job(): - self.save_outbound_stamp_costs() + def job(): self.save_outbound_stamp_costs() threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() def get_announce_app_data(self, destination_hash): From 9c646aead7f983f181c6aad5cde0359950aecb5d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Oct 2025 02:19:24 +0100 Subject: [PATCH 078/123] Stamp validation on incoming propagation node transfers --- LXMF/LXMRouter.py | 21 ++++++++----- LXMF/LXStamper.py | 79 ++++++++++++++++++++--------------------------- 2 files changed, 48 insertions(+), 52 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 7e4627a..d4108fb 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -2000,10 +2000,13 @@ class LXMRouter: ####################################### # TODO: Check propagation stamps here # ####################################### - stamps_valid = False + target_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) + validated_messages = LXStamper.validate_pn_stamps(messages, target_cost) - for lxmf_data in messages: - self.lxmf_propagation(lxmf_data) + for validated_entry in validated_messages: + lxmf_data = validated_entry[1] + stamp_value = validated_entry[2] + self.lxmf_propagation(lxmf_data, stamp_value=stamp_value) self.client_propagation_messages_received += 1 if stamps_valid: packet.prove() @@ -2093,10 +2096,14 @@ class LXMRouter: ####################################### # TODO: Check propagation stamps here # ####################################### + target_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) + validated_messages = LXStamper.validate_pn_stamps(messages, target_cost) - for lxmf_data in messages: - peer = None - transient_id = RNS.Identity.full_hash(lxmf_data) + for validated_entry in validated_messages: + transient_id = validated_entry[0] + lxmf_data = validated_entry[1] + stamp_value = validated_entry[2] + peer = None if remote_hash != None and remote_hash in self.peers: peer = self.peers[remote_hash] @@ -2109,7 +2116,7 @@ class LXMRouter: else: self.client_propagation_messages_received += 1 - self.lxmf_propagation(lxmf_data, from_peer=peer) + self.lxmf_propagation(lxmf_data, from_peer=peer, stamp_value=stamp_value) if peer != None: peer.queue_handled_message(transient_id) else: diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index ecf75a3..9d85329 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -4,6 +4,7 @@ import RNS.vendor.umsgpack as msgpack import os import time import math +import itertools import multiprocessing WORKBLOCK_EXPAND_ROUNDS = 3000 @@ -43,46 +44,42 @@ def stamp_valid(stamp, target_cost, workblock): if int.from_bytes(result, byteorder="big") > target: return False else: return True -def validate_pn_stamp(transient_id, stamp): - target_cost = 8 - workblock = stamp_workblock(transient_id, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PN) - if stamp_valid(stamp, target_cost, workblock): - RNS.log(f"Stamp on {RNS.prettyhexrep(transient_id)} validated", RNS.LOG_DEBUG) - value = stamp_value(workblock, stamp) - return True - - return False - -def validate_pn_stamps_job_simple(transient_stamps): - for entry in transient_stamps: - # Get transient ID and stamp for validation - transient_id = transient_stamps[0] - stamp = transient_stamps[1] +def validate_pn_stamp(transient_data, target_cost): + from .LXMessage import LXMessage + if len(transient_data) <= LXMessage.LXMF_OVERHEAD+STAMP_SIZE: return False, None, None + else: + lxm_data = transient_data[:-STAMP_SIZE] + stamp = transient_data[-STAMP_SIZE:] + transient_id = RNS.Identity.full_hash(lxm_data) + workblock = stamp_workblock(transient_id, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PN) - # Store validation result back into list - transient_stamps[2] = validate_pn_stamp(transient_id, stamp) + if not stamp_valid(stamp, target_cost, workblock): return False, None, None + else: + value = stamp_value(workblock, stamp) + return True, transient_id, value - return transient_stamps +def validate_pn_stamps_job_simple(transient_list, target_cost): + validated_messages = [] + for transient_data in transient_list: + stamp_valid, transient_id, value = validate_pn_stamp(transient_data, target_cost) + if stamp_valid: validated_messages.append([transient_id, transient_data, value]) -def _validate_single_pn_stamp_entry(entry): - transient_id = entry[0] - stamp = entry[1] - entry[2] = validate_pn_stamp(transient_id, stamp) - return entry + return validated_messages -def validate_pn_stamps_job_multip(transient_stamps): +def validate_pn_stamps_job_multip(transient_list, target_cost): cores = multiprocessing.cpu_count() - pool_count = min(cores, math.ceil(len(transient_stamps) / PN_VALIDATION_POOL_MIN_SIZE)) + pool_count = min(cores, math.ceil(len(transient_list) / PN_VALIDATION_POOL_MIN_SIZE)) - RNS.log(f"Validating {len(transient_stamps)} stamps using {pool_count} processes...", RNS.LOG_VERBOSE) - with multiprocessing.Pool(pool_count) as p: validated_entries = p.map(_validate_single_pn_stamp_entry, transient_stamps) + RNS.log(f"Validating {len(transient_list)} stamps using {pool_count} processes...", RNS.LOG_VERBOSE) + with multiprocessing.Pool(pool_count) as p: + validated_entries = p.starmap(validate_pn_stamp, zip(transient_list, itertools.repeat(target_cost))) - return validated_entries + return [e for e in validated_entries if e[0] == True] -def validate_pn_stamps(transient_stamps): +def validate_pn_stamps(transient_list, target_cost): non_mp_platform = RNS.vendor.platformutils.is_android() - if len(transient_stamps) <= PN_VALIDATION_POOL_MIN_SIZE or non_mp_platform: validate_pn_stamps_job_simple(transient_stamps) - else: validate_pn_stamps_job_multip(transient_stamps) + if len(transient_list) <= PN_VALIDATION_POOL_MIN_SIZE or non_mp_platform: return validate_pn_stamps_job_simple(transient_list, target_cost) + else: return validate_pn_stamps_job_multip(transient_list, target_cost) def generate_stamp(message_id, stamp_cost): RNS.log(f"Generating stamp with cost {stamp_cost} for {RNS.prettyhexrep(message_id)}...", RNS.LOG_DEBUG) @@ -93,19 +90,13 @@ def generate_stamp(message_id, stamp_cost): rounds = 0 value = 0 - if RNS.vendor.platformutils.is_windows() or RNS.vendor.platformutils.is_darwin(): - stamp, rounds = job_simple(stamp_cost, workblock, message_id) - - elif RNS.vendor.platformutils.is_android(): - stamp, rounds = job_android(stamp_cost, workblock, message_id) - - else: - stamp, rounds = job_linux(stamp_cost, workblock, message_id) + if RNS.vendor.platformutils.is_windows() or RNS.vendor.platformutils.is_darwin(): stamp, rounds = job_simple(stamp_cost, workblock, message_id) + elif RNS.vendor.platformutils.is_android(): stamp, rounds = job_android(stamp_cost, workblock, message_id) + else: stamp, rounds = job_linux(stamp_cost, workblock, message_id) duration = time.time() - start_time speed = rounds/duration - if stamp != None: - value = stamp_value(workblock, stamp) + if stamp != None: value = stamp_value(workblock, stamp) RNS.log(f"Stamp with value {value} generated in {RNS.prettytime(duration)}, {rounds} rounds, {int(speed)} rounds per second", RNS.LOG_DEBUG) @@ -161,10 +152,8 @@ def job_simple(stamp_cost, workblock, message_id): def sv(s, c, w): target = 0b1<<256-c; m = w+s result = RNS.Identity.full_hash(m) - if int.from_bytes(result, byteorder="big") > target: - return False - else: - return True + if int.from_bytes(result, byteorder="big") > target: return False + else: return True while not sv(pstamp, stamp_cost, workblock) and not active_jobs[message_id]: pstamp = os.urandom(256//8); rounds += 1 From 434267784d65682b24782eed73f00666468ed02e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Oct 2025 13:53:59 +0100 Subject: [PATCH 079/123] Implemented propagation node peering key generation and peering cost signalling --- LXMF/Handlers.py | 3 + LXMF/LXMF.py | 16 ++--- LXMF/LXMPeer.py | 133 ++++++++++++++++++++++++++++++++++------- LXMF/LXMRouter.py | 126 ++++++++++++++++++++++---------------- LXMF/LXStamper.py | 31 ++++++---- LXMF/Utilities/lxmd.py | 44 +++++++++++++- 6 files changed, 257 insertions(+), 96 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index fc980c3..f55cc76 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -51,6 +51,7 @@ class LXMFPropagationAnnounceHandler: propagation_sync_limit = int(data[4]) propagation_stamp_cost = int(data[5][0]) propagation_stamp_cost_flexibility = int(data[5][1]) + peering_cost = int(data[5][2]) metadata = data[6] if destination_hash in self.lxmrouter.static_peers: @@ -60,6 +61,7 @@ class LXMFPropagationAnnounceHandler: propagation_sync_limit=propagation_sync_limit, propagation_stamp_cost=propagation_stamp_cost, propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, + peering_cost=peering_cost, metadata=metadata) else: @@ -72,6 +74,7 @@ class LXMFPropagationAnnounceHandler: propagation_sync_limit=propagation_sync_limit, propagation_stamp_cost=propagation_stamp_cost, propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, + peering_cost=peering_cost, metadata=metadata) else: diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index b608ceb..3a20b0e 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -154,18 +154,20 @@ def pn_announce_data_is_valid(data): if len(data) < 7: raise ValueError("Invalid announce data: Insufficient peer data") else: try: int(data[1]) - except: raise ValueError("Invalid announce data: Could not decode peer timebase") + except: raise ValueError("Invalid announce data: Could not decode timebase") if data[2] != True and data[2] != False: raise ValueError("Invalid announce data: Indeterminate propagation node status") try: int(data[3]) - except: raise ValueError("Invalid announce data: Could not decode peer propagation transfer limit") + except: raise ValueError("Invalid announce data: Could not decode propagation transfer limit") try: int(data[4]) - except: raise ValueError("Invalid announce data: Could not decode peer propagation sync limit") - if type(data[4]) != list: raise ValueError("Invalid announce data: Could not decode peer stamp costs") + except: raise ValueError("Invalid announce data: Could not decode propagation sync limit") + if type(data[4]) != list: raise ValueError("Invalid announce data: Could not decode stamp costs") try: int(data[5][0]) - except: raise ValueError("Invalid announce data: Could not decode peer target stamp cost") + except: raise ValueError("Invalid announce data: Could not decode target stamp cost") try: int(data[5][1]) - except: raise ValueError("Invalid announce data: Could not decode peer stamp cost flexibility") - if type(data[6]) != dict: raise ValueError("Invalid announce data: Could not decode peer metadata") + except: raise ValueError("Invalid announce data: Could not decode stamp cost flexibility") + try: int(data[5][2]) + except: raise ValueError("Invalid announce data: Could not decode peering cost") + if type(data[6]) != dict: raise ValueError("Invalid announce data: Could not decode metadata") except Exception as e: RNS.log(f"Could not validate propagation node announce data: {e}", RNS.LOG_DEBUG) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index e767313..0fe1e74 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -1,8 +1,10 @@ import os import time +import threading import RNS import RNS.vendor.umsgpack as msgpack +import LXMF.LXStamper as LXStamper from collections import deque from .LXMF import APP_NAME @@ -20,6 +22,7 @@ class LXMPeer: ERROR_NO_IDENTITY = 0xf0 ERROR_NO_ACCESS = 0xf1 + ERROR_THROTTLED = 0xf2 ERROR_TIMEOUT = 0xfe STRATEGY_LAZY = 0x01 @@ -80,6 +83,11 @@ class LXMPeer: except: peer.propagation_stamp_cost_flexibility = None else: peer.propagation_stamp_cost_flexibility = None + if "peering_cost" in dictionary: + try: peer.peering_cost = int(dictionary["peering_cost"]) + except: peer.peering_cost = None + else: peer.peering_cost = None + if "sync_strategy" in dictionary: try: peer.sync_strategy = int(dictionary["sync_strategy"]) except: peer.sync_strategy = LXMPeer.DEFAULT_SYNC_STRATEGY @@ -97,6 +105,8 @@ class LXMPeer: else: peer.tx_bytes = 0 if "last_sync_attempt" in dictionary: peer.last_sync_attempt = dictionary["last_sync_attempt"] else: peer.last_sync_attempt = 0 + if "peering_key" in dictionary: peer.peering_key = dictionary["peering_key"] + else: peer.peering_key = None hm_count = 0 for transient_id in dictionary["handled_ids"]: @@ -123,6 +133,8 @@ class LXMPeer: dictionary["peering_timebase"] = self.peering_timebase dictionary["alive"] = self.alive dictionary["last_heard"] = self.last_heard + dictionary["sync_strategy"] = self.sync_strategy + dictionary["peering_key"] = self.peering_key dictionary["destination_hash"] = self.destination_hash dictionary["link_establishment_rate"] = self.link_establishment_rate dictionary["sync_transfer_rate"] = self.sync_transfer_rate @@ -130,7 +142,7 @@ class LXMPeer: dictionary["propagation_sync_limit"] = self.propagation_sync_limit dictionary["propagation_stamp_cost"] = self.propagation_stamp_cost dictionary["propagation_stamp_cost_flexibility"] = self.propagation_stamp_cost_flexibility - dictionary["sync_strategy"] = self.sync_strategy + dictionary["peering_cost"] = self.peering_cost dictionary["last_sync_attempt"] = self.last_sync_attempt dictionary["offered"] = self.offered dictionary["outgoing"] = self.outgoing @@ -155,16 +167,18 @@ class LXMPeer: return peer_bytes def __init__(self, router, destination_hash, sync_strategy=DEFAULT_SYNC_STRATEGY): - self.alive = False - self.last_heard = 0 + self.alive = False + self.last_heard = 0 self.sync_strategy = sync_strategy + self.peering_key = None + self.peering_cost = None - self.next_sync_attempt = 0 - self.last_sync_attempt = 0 - self.sync_backoff = 0 - self.peering_timebase = 0 + self.next_sync_attempt = 0 + self.last_sync_attempt = 0 + self.sync_backoff = 0 + self.peering_timebase = 0 self.link_establishment_rate = 0 - self.sync_transfer_rate = 0 + self.sync_transfer_rate = 0 self.propagation_transfer_limit = None self.propagation_sync_limit = None @@ -185,6 +199,8 @@ class LXMPeer: self._hm_counts_synced = False self._um_counts_synced = False + self._peering_key_lock = threading.Lock() + self.link = None self.state = LXMPeer.IDLE @@ -199,11 +215,74 @@ class LXMPeer: self.destination = None RNS.log(f"Could not recall identity for LXMF propagation peer {RNS.prettyhexrep(self.destination_hash)}, will retry identity resolution on next sync", RNS.LOG_WARNING) + def peering_key_ready(self): + if not self.peering_cost: return False + if type(self.peering_key) == list and len(self.peering_key) == 2: + value = self.peering_key[1] + if value >= self.peering_cost: return True + else: + RNS.log(f"Peering key value mismatch for {self}. Current value is {value}, but peer requires {self.peering_cost}. Scheduling regeneration...", RNS.LOG_WARNING) + self.peering_key = None + + return False + + def peering_key_value(self): + if type(self.peering_key) == list and len(self.peering_key) == 2: return self.peering_key[1] + else: return None + + def generate_peering_key(self): + if self.peering_cost == None: return False + with self._peering_key_lock: + if self.peering_key != None: return True + else: + RNS.log(f"Generating peering key for {self}", RNS.LOG_NOTICE) + if self.router.identity == None: + RNS.log(f"Could not update peering key for {self} since the local LXMF router identity is not configured", RNS.LOG_ERROR) + return False + + if self.identity == None: + self.identity = RNS.Identity.recall(destination_hash) + if self.identity == None: + RNS.log(f"Could not update peering key for {self} since its identity could not be recalled", RNS.LOG_ERROR) + return False + + key_material = self.identity.hash+self.router.identity.hash + peering_key, value = LXStamper.generate_stamp(key_material, self.peering_cost, expand_rounds=LXStamper.WORKBLOCK_EXPAND_ROUNDS_PEERING) + if value >= self.peering_cost: + self.peering_key = [peering_key, value] + RNS.log(f"Peering key successfully generated for {self}", RNS.LOG_NOTICE) + return True + + return False + def sync(self): RNS.log("Initiating LXMF Propagation Node sync with peer "+RNS.prettyhexrep(self.destination_hash), RNS.LOG_DEBUG) self.last_sync_attempt = time.time() - if time.time() > self.next_sync_attempt: + sync_time_reached = time.time() > self.next_sync_attempt + stamp_costs_known = self.propagation_stamp_cost != None and self.propagation_stamp_cost_flexibility != None and self.peering_cost != None + peering_key_ready = self.peering_key_ready() + sync_checks = sync_time_reached and stamp_costs_known and peering_key_ready + + if not sync_checks: + try: + if not sync_time_reached: + postpone_reason = " due to previous failures" + if self.last_sync_attempt > self.last_heard: self.alive = False + elif not stamp_costs_known: + postpone_reason = " since its required stamp costs are not yet known" + elif not peering_key_ready: + postpone_reason = " since a peering key has not been generated yet" + def job(): self.generate_peering_key() + threading.Thread(target=job, daemon=True).start() + + delay = self.next_sync_attempt-time.time() + postpone_delay = " for {RNS.prettytime({delay})}" if delay > 0 else "" + RNS.log(f"Postponing sync with peer {RNS.prettyhexrep(self.destination_hash)}{postpone_delay}{postpone_reason}", RNS.LOG_DEBUG) + except Exception as e: + RNS.trace_exception(e) + + else: if not RNS.Transport.has_path(self.destination_hash): RNS.log("No path to peer "+RNS.prettyhexrep(self.destination_hash)+" exists, requesting...", RNS.LOG_DEBUG) RNS.Transport.request_path(self.destination_hash) @@ -219,6 +298,10 @@ class LXMPeer: self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") if self.destination != None: + if len(self.unhandled_messages) == 0: + RNS.log(f"Sync requested for {self}, but no unhandled messages exist for peer. Sync complete.", RNS.LOG_DEBUG) + return + if len(self.unhandled_messages) > 0: if self.currently_transferring_messages != None: RNS.log(f"Sync requested for {self}, but current message transfer index was not clear. Aborting.", RNS.LOG_ERROR) @@ -236,23 +319,31 @@ class LXMPeer: self.alive = True self.last_heard = time.time() self.sync_backoff = 0 + min_accepted_cost = min(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) - RNS.log("Synchronisation link to peer "+RNS.prettyhexrep(self.destination_hash)+" established, preparing request...", RNS.LOG_DEBUG) + RNS.log("Synchronisation link to peer "+RNS.prettyhexrep(self.destination_hash)+" established, preparing sync offer...", RNS.LOG_DEBUG) unhandled_entries = [] - unhandled_ids = [] - purged_ids = [] + unhandled_ids = [] + purged_ids = [] + low_value_ids = [] for transient_id in self.unhandled_messages: if transient_id in self.router.propagation_entries: - unhandled_entry = [ transient_id, - self.router.get_weight(transient_id), - self.router.get_size(transient_id) ] - - unhandled_entries.append(unhandled_entry) + if self.router.get_stamp_value(transient_id) < min_accepted_cost: low_value_ids.append(transient_id) + else: + unhandled_entry = [ transient_id, + self.router.get_weight(transient_id), + self.router.get_size(transient_id) ] + + unhandled_entries.append(unhandled_entry) else: purged_ids.append(transient_id) for transient_id in purged_ids: - RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) + RNS.log(f"Dropping unhandled message {RNS.prettyhexrep(transient_id)} for peer {RNS.prettyhexrep(self.destination_hash)} since it no longer exists in the message store.", RNS.LOG_DEBUG) + self.remove_unhandled_message(transient_id) + + for transient_id in low_value_ids: + RNS.log(f"Dropping unhandled message {RNS.prettyhexrep(transient_id)} for peer {RNS.prettyhexrep(self.destination_hash)} since its stamp value is lower than peer requirement of {min_accepted_cost}.", RNS.LOG_DEBUG) self.remove_unhandled_message(transient_id) unhandled_entries.sort(key=lambda e: e[1], reverse=False) @@ -284,11 +375,7 @@ class LXMPeer: self.state = LXMPeer.REQUEST_SENT else: - RNS.log("Could not request sync to peer "+RNS.prettyhexrep(self.destination_hash)+" since its identity could not be recalled.", RNS.LOG_ERROR) - - else: - RNS.log("Postponing sync with peer "+RNS.prettyhexrep(self.destination_hash)+" for "+RNS.prettytime(self.next_sync_attempt-time.time())+" due to previous failures", RNS.LOG_DEBUG) - if self.last_sync_attempt > self.last_heard: self.alive = False + RNS.log(f"Could not request sync to peer {RNS.prettyhexrep(self.destination_hash)} since its identity could not be recalled.", RNS.LOG_ERROR) def request_failed(self, request_receipt): RNS.log(f"Sync request to peer {self.destination} failed", RNS.LOG_DEBUG) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index d4108fb..ce6b685 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -45,9 +45,11 @@ class LXMRouter: ROTATION_HEADROOM_PCT = 10 ROTATION_AR_MAX = 0.5 - PROPAGATION_COST = 12 - PROPAGATION_COST_MIN = 10 + PEERING_COST = 10 + MAX_PEERING_COST = 12 + PROPAGATION_COST_MIN = 13 PROPAGATION_COST_FLEX = 3 + PROPAGATION_COST = 16 PROPAGATION_LIMIT = 256 SYNC_LIMIT = PROPAGATION_LIMIT*40 DELIVERY_LIMIT = 1000 @@ -81,7 +83,8 @@ class LXMRouter: propagation_limit=PROPAGATION_LIMIT, delivery_limit=DELIVERY_LIMIT, sync_limit=SYNC_LIMIT, enforce_ratchets=False, enforce_stamps=False, static_peers = [], max_peers=None, from_static_only=False, sync_strategy=LXMPeer.STRATEGY_PERSISTENT, - propagation_cost=PROPAGATION_COST, propagation_cost_flexibility=PROPAGATION_COST_FLEX): + propagation_cost=PROPAGATION_COST, propagation_cost_flexibility=PROPAGATION_COST_FLEX, + peering_cost=PEERING_COST): random.seed(os.urandom(10)) @@ -115,17 +118,20 @@ class LXMRouter: self.outbound_propagation_link = None if delivery_limit == None: delivery_limit = LXMRouter.DELIVERY_LIMIT + if propagation_cost < LXMRouter.PROPAGATION_COST_MIN: propagation_cost = LXMRouter.PROPAGATION_COST_MIN - self.message_storage_limit = None - self.information_storage_limit = None - self.propagation_per_transfer_limit = propagation_limit - self.propagation_per_sync_limit = sync_limit - self.delivery_per_transfer_limit = delivery_limit - self.propagation_stamp_cost = propagation_cost + self.message_storage_limit = None + self.information_storage_limit = None + self.propagation_per_transfer_limit = propagation_limit + self.propagation_per_sync_limit = sync_limit + self.delivery_per_transfer_limit = delivery_limit + self.propagation_stamp_cost = propagation_cost self.propagation_stamp_cost_flexibility = propagation_cost_flexibility - self.enforce_ratchets = enforce_ratchets - self._enforce_stamps = enforce_stamps - self.pending_deferred_stamps = {} + self.peering_cost = peering_cost + self.max_peering_cost = LXMRouter.MAX_PEERING_COST + self.enforce_ratchets = enforce_ratchets + self._enforce_stamps = enforce_stamps + self.pending_deferred_stamps = {} if sync_limit == None or self.propagation_per_sync_limit < self.propagation_per_transfer_limit: self.propagation_per_sync_limit = self.propagation_per_transfer_limit @@ -284,7 +290,7 @@ class LXMRouter: def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) node_state = self.propagation_node and not self.from_static_only - stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility] + stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility, self.peering_cost] metadata = {} announce_data = [ False, # 0: Legacy LXMF PN support int(time.time()), # 1: Current node timebase @@ -719,6 +725,8 @@ class LXMRouter: "sync_limit": peer.propagation_sync_limit, "target_stamp_cost": peer.propagation_stamp_cost, "stamp_cost_flexibility": peer.propagation_stamp_cost_flexibility, + "peering_cost": peer.peering_cost, + "peering_key": peer.peering_key_value(), "network_distance": RNS.Transport.hops_to(peer_id), "rx_bytes": peer.rx_bytes, "tx_bytes": peer.tx_bytes, @@ -739,6 +747,8 @@ class LXMRouter: "sync_limit": self.propagation_per_sync_limit, "target_stamp_cost": self.propagation_stamp_cost, "stamp_cost_flexibility": self.propagation_stamp_cost_flexibility, + "peering_cost": self.peering_cost, + "max_peering_cost": self.max_peering_cost, "autopeer_maxdepth": self.autopeer_maxdepth, "from_static_only": self.from_static_only, "messagestore": { @@ -1782,39 +1792,48 @@ class LXMRouter: ### Peer Sync & Propagation ########################### ####################################################### - def peer(self, destination_hash, timestamp, propagation_transfer_limit, propagation_sync_limit, propagation_stamp_cost, propagation_stamp_cost_flexibility): - if destination_hash in self.peers: - peer = self.peers[destination_hash] - if timestamp > peer.peering_timebase: - peer.alive = True - peer.sync_backoff = 0 - peer.next_sync_attempt = 0 - peer.peering_timebase = timestamp - peer.last_heard = time.time() - peer.propagation_stamp_cost = propagation_stamp_cost - peer.propagation_stamp_cost_flexibility = propagation_stamp_cost_flexibility - peer.propagation_transfer_limit = propagation_transfer_limit - if propagation_sync_limit != None: peer.propagation_sync_limit = propagation_sync_limit - else: peer.propagation_sync_limit = propagation_transfer_limit - - RNS.log(f"Peering config updated for {RNS.prettyhexrep(destination_hash)}", RNS.LOG_VERBOSE) - - else: - if len(self.peers) < self.max_peers: - peer = LXMPeer(self, destination_hash, sync_strategy=self.default_sync_strategy) - peer.alive = True - peer.last_heard = time.time() - peer.propagation_stamp_cost = propagation_stamp_cost - peer.propagation_stamp_cost_flexibility = propagation_stamp_cost_flexibility - peer.propagation_transfer_limit = propagation_transfer_limit - if propagation_sync_limit != None: peer.propagation_sync_limit = propagation_sync_limit - else: peer.propagation_sync_limit = propagation_transfer_limit - - self.peers[destination_hash] = peer - RNS.log(f"Peered with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_NOTICE) - + def peer(self, destination_hash, timestamp, propagation_transfer_limit, propagation_sync_limit, propagation_stamp_cost, propagation_stamp_cost_flexibility, peering_cost, metadata): + if peering_cost > self.max_peering_cost: + if destination_hash in self.peers: + RNS.log(f"Peer {RNS.prettyhexrep(destination_hash)} increased peering cost beyond local accepted maximum, breaking peering...", RNS.LOG_NOTICE) + self.unpeer(destination_hash, timestamp) else: - RNS.log(f"Max peers reached, not peering with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_DEBUG) + RNS.log(f"Not peering with {RNS.prettyhexrep(destination_hash)}, since its peering cost of {peering_cost} exceeds local maximum of {self.max_peering_cost}", RNS.LOG_NOTICE) + + else: + if destination_hash in self.peers: + peer = self.peers[destination_hash] + if timestamp > peer.peering_timebase: + peer.alive = True + peer.sync_backoff = 0 + peer.next_sync_attempt = 0 + peer.peering_timebase = timestamp + peer.last_heard = time.time() + peer.propagation_stamp_cost = propagation_stamp_cost + peer.propagation_stamp_cost_flexibility = propagation_stamp_cost_flexibility + peer.peering_cost = peering_cost + peer.propagation_transfer_limit = propagation_transfer_limit + if propagation_sync_limit != None: peer.propagation_sync_limit = propagation_sync_limit + else: peer.propagation_sync_limit = propagation_transfer_limit + + RNS.log(f"Peering config updated for {RNS.prettyhexrep(destination_hash)}", RNS.LOG_VERBOSE) + + else: + if len(self.peers) >= self.max_peers: RNS.log(f"Max peers reached, not peering with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_DEBUG) + else: + peer = LXMPeer(self, destination_hash, sync_strategy=self.default_sync_strategy) + peer.alive = True + peer.last_heard = time.time() + peer.propagation_stamp_cost = propagation_stamp_cost + peer.propagation_stamp_cost_flexibility = propagation_stamp_cost_flexibility + peer.peering_cost = peering_cost + peer.propagation_transfer_limit = propagation_transfer_limit + if propagation_sync_limit != None: peer.propagation_sync_limit = propagation_sync_limit + else: peer.propagation_sync_limit = propagation_transfer_limit + + self.peers[destination_hash] = peer + RNS.log(f"Peered with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_NOTICE) + def unpeer(self, destination_hash, timestamp = None): if timestamp == None: @@ -2000,8 +2019,8 @@ class LXMRouter: ####################################### # TODO: Check propagation stamps here # ####################################### - target_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) - validated_messages = LXStamper.validate_pn_stamps(messages, target_cost) + min_accepted_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) + validated_messages = LXStamper.validate_pn_stamps(messages, min_accepted_cost) for validated_entry in validated_messages: lxmf_data = validated_entry[1] @@ -2077,7 +2096,7 @@ class LXMRouter: # 2: Boolean flag signalling propagation node state # 3: Per-transfer limit for message propagation in kilobytes # 4: Limit for incoming propagation node syncs - # 5: Propagation stamp cost for this node + # 5: Propagation stamp costs for this node # 6: Node metadata if remote_app_data[2] and self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: remote_timebase = remote_app_data[1] @@ -2085,10 +2104,11 @@ class LXMRouter: remote_sync_limit = remote_app_data[4] remote_stamp_cost = remote_app_data[5][0] remote_stamp_flex = remote_app_data[5][1] + remote_peering_cost = remote_app_data[5][2] remote_metadata = remote_app_data[6] RNS.log(f"Auto-peering with {remote_str} discovered via incoming sync", RNS.LOG_DEBUG) # TODO: Remove debug - self.peer(remote_hash, remote_timebase, remote_transfer_limit, remote_sync_limit, remote_stamp_cost, remote_stamp_flex, remote_metadata) + self.peer(remote_hash, remote_timebase, remote_transfer_limit, remote_sync_limit, remote_stamp_cost, remote_stamp_flex, remote_peering_cost, remote_metadata) ms = "" if len(messages) == 1 else "s" RNS.log(f"Received {len(messages)} message{ms} from {remote_str}", RNS.LOG_VERBOSE) @@ -2096,8 +2116,8 @@ class LXMRouter: ####################################### # TODO: Check propagation stamps here # ####################################### - target_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) - validated_messages = LXStamper.validate_pn_stamps(messages, target_cost) + min_accepted_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) + validated_messages = LXStamper.validate_pn_stamps(messages, min_accepted_cost) for validated_entry in validated_messages: transient_id = validated_entry[0] @@ -2177,13 +2197,13 @@ class LXMRouter: msg_file = open(file_path, "wb") msg_file.write(lxmf_data); msg_file.close() - RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_EXTREME) + RNS.log(f"Received propagated LXMF message {RNS.prettyhexrep(transient_id)} with stamp value {stamp_value}, adding to peer distribution queues...", RNS.LOG_EXTREME) self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], [], stamp_value] self.enqueue_peer_distribution(transient_id, from_peer) else: # TODO: Add message to sneakernet queues when implemented - RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", but this instance is not hosting a propagation node, discarding message.", RNS.LOG_DEBUG) + RNS.log(f"Received propagated LXMF message {RNS.prettyhexrep(transient_id)}, but this instance is not hosting a propagation node, discarding message.", RNS.LOG_DEBUG) return True diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 9d85329..4d2e38c 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -7,23 +7,24 @@ import math import itertools import multiprocessing -WORKBLOCK_EXPAND_ROUNDS = 3000 -WORKBLOCK_EXPAND_ROUNDS_PN = 1000 -STAMP_SIZE = RNS.Identity.HASHLENGTH -PN_VALIDATION_POOL_MIN_SIZE = 256 +WORKBLOCK_EXPAND_ROUNDS = 3000 +WORKBLOCK_EXPAND_ROUNDS_PEERING = 20000 +WORKBLOCK_EXPAND_ROUNDS_PN = 1000 +STAMP_SIZE = RNS.Identity.HASHLENGTH +PN_VALIDATION_POOL_MIN_SIZE = 256 active_jobs = {} -def stamp_workblock(message_id, expand_rounds=WORKBLOCK_EXPAND_ROUNDS): +def stamp_workblock(material, expand_rounds=WORKBLOCK_EXPAND_ROUNDS): wb_st = time.time() workblock = b"" for n in range(expand_rounds): workblock += RNS.Cryptography.hkdf(length=256, - derive_from=message_id, - salt=RNS.Identity.full_hash(message_id+msgpack.packb(n)), + derive_from=material, + salt=RNS.Identity.full_hash(material+msgpack.packb(n)), context=None) wb_time = time.time() - wb_st - # RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) + RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) return workblock @@ -81,9 +82,9 @@ def validate_pn_stamps(transient_list, target_cost): if len(transient_list) <= PN_VALIDATION_POOL_MIN_SIZE or non_mp_platform: return validate_pn_stamps_job_simple(transient_list, target_cost) else: return validate_pn_stamps_job_multip(transient_list, target_cost) -def generate_stamp(message_id, stamp_cost): +def generate_stamp(message_id, stamp_cost, expand_rounds=WORKBLOCK_EXPAND_ROUNDS): RNS.log(f"Generating stamp with cost {stamp_cost} for {RNS.prettyhexrep(message_id)}...", RNS.LOG_DEBUG) - workblock = stamp_workblock(message_id) + workblock = stamp_workblock(message_id, expand_rounds=expand_rounds) start_time = time.time() stamp = None @@ -362,4 +363,12 @@ if __name__ == "__main__": RNS.loglevel = RNS.LOG_DEBUG RNS.log("Testing LXMF stamp generation", RNS.LOG_DEBUG) message_id = os.urandom(32) - generate_stamp(message_id, cost) \ No newline at end of file + generate_stamp(message_id, cost) + + RNS.log("Testing propagation stamp generation", RNS.LOG_DEBUG) + message_id = os.urandom(32) + generate_stamp(message_id, cost, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PN) + + RNS.log("Testing peering key generation", RNS.LOG_DEBUG) + message_id = os.urandom(32) + generate_stamp(message_id, cost, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PEERING) \ No newline at end of file diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 03d1282..b2bc302 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -164,6 +164,20 @@ def apply_config(): else: active_configuration["propagation_stamp_cost_flexibility"] = LXMF.LXMRouter.PROPAGATION_COST_FLEX + if "propagation" in lxmd_config and "peering_cost" in lxmd_config["propagation"]: + active_configuration["peering_cost"] = lxmd_config["propagation"].as_int("peering_cost") + if active_configuration["peering_cost"] < 0: + active_configuration["peering_cost"] = 0 + else: + active_configuration["peering_cost"] = LXMF.LXMRouter.PEERING_COST + + if "propagation" in lxmd_config and "remote_peering_cost_max" in lxmd_config["propagation"]: + active_configuration["remote_peering_cost_max"] = lxmd_config["propagation"].as_int("remote_peering_cost_max") + if active_configuration["remote_peering_cost_max"] < 0: + active_configuration["remote_peering_cost_max"] = 0 + else: + active_configuration["remote_peering_cost_max"] = LXMF.LXMRouter.MAX_PEERING_COST + if "propagation" in lxmd_config and "prioritise_destinations" in lxmd_config["propagation"]: active_configuration["prioritised_lxmf_destinations"] = lxmd_config["propagation"].as_list("prioritise_destinations") else: @@ -579,9 +593,11 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = ssp = s["static_peers"]; cprr = s["clients"]["client_propagation_messages_received"] cprs = s["clients"]["client_propagation_messages_served"]; upi = s["unpeered_propagation_incoming"] psc = s["target_stamp_cost"]; scf = s["stamp_cost_flexibility"] + pc = s["peering_cost"]; pcm = s["max_peering_cost"] print(f"Messagestore contains {mscnt} messages, {msb} ({ms_util} utilised of {msl})") print(f"Accepting propagated messages from {who_str}, {ptl} per-transfer limit") print(f"Required propagation stamp cost is {psc}, flexibility is {scf}") + print(f"Peering cost is {pc}, max remote peering cost is {pcm}") print(f"") print(f"Peers : {stp} total (peer limit is {smp})") print(f" {sdp} discovered, {ssp} static") @@ -613,7 +629,13 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = h = max(time.time()-p["last_heard"], 0) hops = p["network_distance"] hs = "hops unknown" if hops == RNS.Transport.PATHFINDER_M else f"{hops} hop away" if hops == 1 else f"{hops} hops away" - pm = p["messages"] + pm = p["messages"]; pk = p["peering_key"] + pc = p["peering_cost"]; psc = p["target_stamp_cost"]; psf = p["stamp_cost_flexibility"] + if pc == None: pc = "unknown" + if psc == None: psc = "unknown" + if psf == None: psf = "unknown" + if pk == None: pk = "Not generated" + else: pk = f"Generated, value is {pk}" if p["last_sync_attempt"] != 0: lsa = p["last_sync_attempt"] ls = f"last synced {RNS.prettytime(max(time.time()-lsa, 0))} ago" @@ -622,9 +644,11 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]); stl = RNS.prettysize(p["transfer_limit"]*1000) srxb = RNS.prettysize(p["rx_bytes"]); stxb = RNS.prettysize(p["tx_bytes"]); pmo = pm["offered"]; pmout = pm["outgoing"] - pmi = pm["incoming"]; pmuh = pm["unhandled"] + pmi = pm["incoming"]; pmuh = pm["unhandled"] print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") + print(f"{ind*2}Costs : Propagation {psc} (flex {psf}), peering {pc}") + print(f"{ind*2}Sync key : {pk}") print(f"{ind*2}Speeds : {sstr} STR, {sler} LER, {stl} transfer limit") print(f"{ind*2}Messages : {pmo} offered, {pmout} outgoing, {pmi} incoming") print(f"{ind*2}Traffic : {srxb} received, {stxb} sent") @@ -752,6 +776,22 @@ autopeer_maxdepth = 4 # propagation_stamp_cost_flexibility = 3 +# The peering_cost option configures the target +# value required for a remote node to peer with +# and deliver messages to this node. + +# peering_cost = 10 + +# You can configure the maximum peering cost +# of remote nodes that this node will peer with. +# Setting this to a higher number will allow +# this node to peer with other nodes requiring +# a high peering key value, but will require +# more computation time during initial peering +# when generating the peering key. + +# remote_peering_cost_max = 12 + # You can tell the LXMF message router to # prioritise storage for one or more # destinations. If the message store reaches From a44c1f368a28e7a73a4d083ec622e52865d43923 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Oct 2025 17:02:34 +0100 Subject: [PATCH 080/123] Validate peering key on incoming sync offer --- LXMF/LXMPeer.py | 8 ++++++-- LXMF/LXMRouter.py | 36 +++++++++++++++++++++--------------- LXMF/LXStamper.py | 16 +++++++++++++++- 3 files changed, 42 insertions(+), 18 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 0fe1e74..7d851f2 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -22,7 +22,9 @@ class LXMPeer: ERROR_NO_IDENTITY = 0xf0 ERROR_NO_ACCESS = 0xf1 - ERROR_THROTTLED = 0xf2 + ERROR_INVALID_KEY = 0xf3 + ERROR_INVALID_DATA = 0xf4 + ERROR_THROTTLED = 0xf5 ERROR_TIMEOUT = 0xfe STRATEGY_LAZY = 0x01 @@ -369,9 +371,11 @@ class LXMPeer: cumulative_size += lxm_transfer_size unhandled_ids.append(transient_id) + offer = [self.peering_key[0], unhandled_ids] + RNS.log(f"Offering {len(unhandled_ids)} messages to peer {RNS.prettyhexrep(self.destination.hash)} ({RNS.prettysize(len(msgpack.packb(unhandled_ids)))})", RNS.LOG_VERBOSE) self.last_offer = unhandled_ids - self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) + self.link.request(LXMPeer.OFFER_REQUEST_PATH, offer, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT else: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index ce6b685..bac498b 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -45,8 +45,8 @@ class LXMRouter: ROTATION_HEADROOM_PCT = 10 ROTATION_AR_MAX = 0.5 - PEERING_COST = 10 - MAX_PEERING_COST = 12 + PEERING_COST = 18 + MAX_PEERING_COST = 24 PROPAGATION_COST_MIN = 13 PROPAGATION_COST_FLEX = 3 PROPAGATION_COST = 16 @@ -2047,21 +2047,30 @@ class LXMRouter: return LXMPeer.ERROR_NO_ACCESS try: - transient_ids = data - wanted_ids = [] + if type(data) != list and len(data) < 2: return LXMPeer.ERROR_INVALID_DATA - for transient_id in transient_ids: - if not transient_id in self.propagation_entries: - wanted_ids.append(transient_id) + peering_id = self.identity.hash+remote_identity + target_cost = self.peering_cost + peering_key = data[0] + transient_ids = data[1] + wanted_ids = [] - if len(wanted_ids) == 0: - return False + ts = time.time() + peering_key_valid = LXStamper.validate_peering_key(peering_id, peering_key, target_cost) + td = time.time() - ts - elif len(wanted_ids) == len(transient_ids): - return True + if not peering_key_valid: + RNS.log(f"Invalid peering key for incoming sync offer", RNS.LOG_DEBUG) + return LXMPeer.ERROR_INVALID_KEY else: - return wanted_ids + RNS.log(f"Peering key validated for incoming offer in {RNS.prettytime(td)}", RNS.LOG_DEBUG) + for transient_id in transient_ids: + if not transient_id in self.propagation_entries: wanted_ids.append(transient_id) + + if len(wanted_ids) == 0: return False + elif len(wanted_ids) == len(transient_ids): return True + else: return wanted_ids except Exception as e: RNS.log("Error occurred while generating response for sync request, the contained exception was: "+str(e), RNS.LOG_DEBUG) @@ -2069,9 +2078,6 @@ class LXMRouter: def propagation_resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: - # TODO: The peer this was received from should - # have the transient id added to its list of - # already handled messages. try: data = msgpack.unpackb(resource.data.read()) diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 4d2e38c..2a74295 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -8,8 +8,8 @@ import itertools import multiprocessing WORKBLOCK_EXPAND_ROUNDS = 3000 -WORKBLOCK_EXPAND_ROUNDS_PEERING = 20000 WORKBLOCK_EXPAND_ROUNDS_PN = 1000 +WORKBLOCK_EXPAND_ROUNDS_PEERING = 25 STAMP_SIZE = RNS.Identity.HASHLENGTH PN_VALIDATION_POOL_MIN_SIZE = 256 @@ -45,6 +45,11 @@ def stamp_valid(stamp, target_cost, workblock): if int.from_bytes(result, byteorder="big") > target: return False else: return True +def validate_peering_key(peering_id, peering_key, target_cost): + workblock = stamp_workblock(peering_id, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PEERING) + if not stamp_valid(peering_key, target_cost, workblock): return False + else: return True + def validate_pn_stamp(transient_data, target_cost): from .LXMessage import LXMessage if len(transient_data) <= LXMessage.LXMF_OVERHEAD+STAMP_SIZE: return False, None, None @@ -348,6 +353,13 @@ def job_android(stamp_cost, workblock, message_id): return stamp, total_rounds +# def stamp_value_linear(workblock, stamp): +# value = 0 +# bits = 256 +# material = RNS.Identity.full_hash(workblock+stamp) +# s = int.from_bytes(material, byteorder="big") +# return s.bit_count() + if __name__ == "__main__": import sys if len(sys.argv) < 2: @@ -365,10 +377,12 @@ if __name__ == "__main__": message_id = os.urandom(32) generate_stamp(message_id, cost) + RNS.log("", RNS.LOG_DEBUG) RNS.log("Testing propagation stamp generation", RNS.LOG_DEBUG) message_id = os.urandom(32) generate_stamp(message_id, cost, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PN) + RNS.log("", RNS.LOG_DEBUG) RNS.log("Testing peering key generation", RNS.LOG_DEBUG) message_id = os.urandom(32) generate_stamp(message_id, cost, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PEERING) \ No newline at end of file From 606a723e315faf404a5da4bc42cbe565dd4af8a1 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Oct 2025 17:11:17 +0100 Subject: [PATCH 081/123] Implemented getting static peer configuration from network if unknown at launch --- LXMF/Handlers.py | 75 ++++++++++++++++++++++++----------------------- LXMF/LXMRouter.py | 17 +++-------- 2 files changed, 42 insertions(+), 50 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index f55cc76..a65978c 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -39,22 +39,23 @@ class LXMFPropagationAnnounceHandler: self.lxmrouter = lxmrouter def received_announce(self, destination_hash, announced_identity, app_data, announce_packet_hash, is_path_response): - if not is_path_response: - try: - if type(app_data) == bytes: - if self.lxmrouter.propagation_node: - data = msgpack.unpackb(app_data) - if pn_announce_data_is_valid(data): - node_timebase = int(data[1]) - propagation_enabled = data[2] - propagation_transfer_limit = int(data[3]) - propagation_sync_limit = int(data[4]) - propagation_stamp_cost = int(data[5][0]) - propagation_stamp_cost_flexibility = int(data[5][1]) - peering_cost = int(data[5][2]) - metadata = data[6] - - if destination_hash in self.lxmrouter.static_peers: + try: + if type(app_data) == bytes: + if self.lxmrouter.propagation_node: + data = msgpack.unpackb(app_data) + if pn_announce_data_is_valid(data): + node_timebase = int(data[1]) + propagation_enabled = data[2] + propagation_transfer_limit = int(data[3]) + propagation_sync_limit = int(data[4]) + propagation_stamp_cost = int(data[5][0]) + propagation_stamp_cost_flexibility = int(data[5][1]) + peering_cost = int(data[5][2]) + metadata = data[6] + + if destination_hash in self.lxmrouter.static_peers: + static_peer = self.lxmrouter.static_peers[destination_hash] + if not is_path_response or static_peer.last_heard == 0: self.lxmrouter.peer(destination_hash=destination_hash, timestamp=node_timebase, propagation_transfer_limit=propagation_transfer_limit, @@ -64,27 +65,27 @@ class LXMFPropagationAnnounceHandler: peering_cost=peering_cost, metadata=metadata) - else: - if self.lxmrouter.autopeer: - if propagation_enabled == True: - if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash=destination_hash, - timestamp=node_timebase, - propagation_transfer_limit=propagation_transfer_limit, - propagation_sync_limit=propagation_sync_limit, - propagation_stamp_cost=propagation_stamp_cost, - propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, - peering_cost=peering_cost, - metadata=metadata) + else: + if self.lxmrouter.autopeer and not is_path_response: + if propagation_enabled == True: + if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: + self.lxmrouter.peer(destination_hash=destination_hash, + timestamp=node_timebase, + propagation_transfer_limit=propagation_transfer_limit, + propagation_sync_limit=propagation_sync_limit, + propagation_stamp_cost=propagation_stamp_cost, + propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, + peering_cost=peering_cost, + metadata=metadata) - else: - if destination_hash in self.lxmrouter.peers: - RNS.log(f"Peer {self.lxmrouter.peers[destination_hash]} moved outside auto-peering range, breaking peering...") - self.lxmrouter.unpeer(destination_hash, node_timebase) + else: + if destination_hash in self.lxmrouter.peers: + RNS.log(f"Peer {self.lxmrouter.peers[destination_hash]} moved outside auto-peering range, breaking peering...") + self.lxmrouter.unpeer(destination_hash, node_timebase) - elif propagation_enabled == False: - self.lxmrouter.unpeer(destination_hash, node_timebase) + elif propagation_enabled == False: + self.lxmrouter.unpeer(destination_hash, node_timebase) - except Exception as e: - RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) - RNS.log(f"The contained exception was: {str(e)}", RNS.LOG_DEBUG) + except Exception as e: + RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) + RNS.log(f"The contained exception was: {str(e)}", RNS.LOG_DEBUG) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index bac498b..798cd5a 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -540,9 +540,6 @@ class LXMRouter: peer = LXMPeer.from_bytes(serialised_peer, self) del serialised_peer if peer.destination_hash in self.static_peers and peer.last_heard == 0: - # TODO: Allow path request responses through announce handler - # momentarily here, so peering config can be updated even if - # the static peer is not available to directly send an announce. RNS.Transport.request_path(peer.destination_hash) if peer.identity != None: self.peers[peer.destination_hash] = peer @@ -2012,14 +2009,11 @@ class LXMRouter: try: if packet.destination_type != RNS.Destination.LINK: return else: - data = msgpack.unpackb(data) - remote_timebase = data[0] - messages = data[1] + data = msgpack.unpackb(data) + remote_timebase = data[0] + messages = data[1] - ####################################### - # TODO: Check propagation stamps here # - ####################################### - min_accepted_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) + min_accepted_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) validated_messages = LXStamper.validate_pn_stamps(messages, min_accepted_cost) for validated_entry in validated_messages: @@ -2119,9 +2113,6 @@ class LXMRouter: ms = "" if len(messages) == 1 else "s" RNS.log(f"Received {len(messages)} message{ms} from {remote_str}", RNS.LOG_VERBOSE) - ####################################### - # TODO: Check propagation stamps here # - ####################################### min_accepted_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) validated_messages = LXStamper.validate_pn_stamps(messages, min_accepted_cost) From 704b37dc167abe6dcc59d3afd36ed62b1882278f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Oct 2025 21:45:40 +0100 Subject: [PATCH 082/123] Implemented client-side propagation stamp generation and inclusion in outbound propagation messages --- LXMF/Handlers.py | 4 +- LXMF/LXMF.py | 2 +- LXMF/LXMPeer.py | 3 +- LXMF/LXMRouter.py | 191 +++++++++++++++++++++++++++++++---------- LXMF/LXMessage.py | 90 +++++++++++++------ LXMF/LXStamper.py | 16 ++-- LXMF/Utilities/lxmd.py | 12 ++- 7 files changed, 228 insertions(+), 90 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index a65978c..aa39ea2 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -42,8 +42,8 @@ class LXMFPropagationAnnounceHandler: try: if type(app_data) == bytes: if self.lxmrouter.propagation_node: - data = msgpack.unpackb(app_data) - if pn_announce_data_is_valid(data): + if pn_announce_data_is_valid(app_data): + data = msgpack.unpackb(app_data) node_timebase = int(data[1]) propagation_enabled = data[2] propagation_transfer_limit = int(data[3]) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 3a20b0e..fd2abf0 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -160,7 +160,7 @@ def pn_announce_data_is_valid(data): except: raise ValueError("Invalid announce data: Could not decode propagation transfer limit") try: int(data[4]) except: raise ValueError("Invalid announce data: Could not decode propagation sync limit") - if type(data[4]) != list: raise ValueError("Invalid announce data: Could not decode stamp costs") + if type(data[5]) != list: raise ValueError("Invalid announce data: Could not decode stamp costs") try: int(data[5][0]) except: raise ValueError("Invalid announce data: Could not decode target stamp cost") try: int(data[5][1]) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 7d851f2..199ee2d 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -24,7 +24,8 @@ class LXMPeer: ERROR_NO_ACCESS = 0xf1 ERROR_INVALID_KEY = 0xf3 ERROR_INVALID_DATA = 0xf4 - ERROR_THROTTLED = 0xf5 + ERROR_INVALID_STAMP = 0xf5 + ERROR_THROTTLED = 0xf6 ERROR_TIMEOUT = 0xfe STRATEGY_LAZY = 0x01 diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 798cd5a..39ee7ec 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -159,6 +159,7 @@ class LXMRouter: self.identity = identity self.propagation_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation") + self.propagation_destination.set_default_app_data(self.get_propagation_node_app_data) self.control_destination = None self.client_propagation_messages_received = 0 self.client_propagation_messages_served = 0 @@ -286,22 +287,24 @@ class LXMRouter: if destination_hash in self.delivery_destinations: self.delivery_destinations[destination_hash].announce(app_data=self.get_announce_app_data(destination_hash), attached_interface=attached_interface) + def get_propagation_node_app_data(self): + node_state = self.propagation_node and not self.from_static_only + stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility, self.peering_cost] + metadata = {} + announce_data = [ False, # 0: Legacy LXMF PN support + int(time.time()), # 1: Current node timebase + node_state, # 2: Boolean flag signalling propagation node state + self.propagation_per_transfer_limit, # 3: Per-transfer limit for message propagation in kilobytes + self.propagation_per_sync_limit, # 4: Limit for incoming propagation node syncs + stamp_cost, # 5: Propagation stamp cost for this node + metadata ] # 6: Node metadata + + return msgpack.packb(announce_data) + def announce_propagation_node(self): def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) - node_state = self.propagation_node and not self.from_static_only - stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility, self.peering_cost] - metadata = {} - announce_data = [ False, # 0: Legacy LXMF PN support - int(time.time()), # 1: Current node timebase - node_state, # 2: Boolean flag signalling propagation node state - self.propagation_per_transfer_limit, # 3: Per-transfer limit for message propagation in kilobytes - self.propagation_per_sync_limit, # 4: Limit for incoming propagation node syncs - stamp_cost, # 5: Propagation stamp cost for this node - metadata ] # 6: Node metadata - - data = msgpack.packb(announce_data) - self.propagation_destination.announce(app_data=data) + self.propagation_destination.announce(app_data=self.get_propagation_node_app_data()) da_thread = threading.Thread(target=delayed_announce) da_thread.setDaemon(True) @@ -380,6 +383,29 @@ class LXMRouter: def get_outbound_propagation_node(self): return self.outbound_propagation_node + def get_outbound_propagation_cost(self): + target_propagation_cost = None + pn_destination_hash = self.get_outbound_propagation_node() + pn_app_data = RNS.Identity.recall_app_data(pn_destination_hash) + if pn_announce_data_is_valid(pn_app_data): + pn_config = msgpack.unpackb(pn_app_data) + target_propagation_cost = pn_config[5][0] + + if not target_propagation_cost: + RNS.log(f"Could not retrieve cached propagation node config. Requesting path to propagation node to get target propagation cost...", RNS.LOG_DEBUG) + RNS.Transport.request_path(pn_destination_hash) + timeout = time.time() + LXMRouter.PATH_REQUEST_WAIT + while not RNS.Identity.recall_app_data(pn_destination_hash) and time.time() < timeout: + time.sleep(0.5) + + pn_app_data = RNS.Identity.recall_app_data(pn_destination_hash) + if pn_announce_data_is_valid(pn_app_data): + pn_config = msgpack.unpackb(pn_app_data) + target_propagation_cost = pn_config[5][0] + + if not target_propagation_cost: RNS.log("Propagation node stamp cost still unavailable after path request", RNS.LOG_ERROR) + return target_propagation_cost + def set_inbound_propagation_node(self, destination_hash): # TODO: Implement raise NotImplementedError("Inbound/outbound propagation node differentiation is currently not implemented") @@ -1525,12 +1551,12 @@ class LXMRouter: else: return False - def cancel_outbound(self, message_id): + def cancel_outbound(self, message_id, cancel_state=LXMessage.CANCELLED): try: if message_id in self.pending_deferred_stamps: lxm = self.pending_deferred_stamps[message_id] RNS.log(f"Cancelling deferred stamp generation for {lxm}", RNS.LOG_DEBUG) - lxm.state = LXMessage.CANCELLED + lxm.state = cancel_state LXStamper.cancel_work(message_id) lxmessage = None @@ -1539,7 +1565,7 @@ class LXMRouter: lxmessage = lxm if lxmessage != None: - lxmessage.state = LXMessage.CANCELLED + lxmessage.state = cancel_state if lxmessage in self.pending_outbound: RNS.log(f"Cancelling {lxmessage} in outbound queue", RNS.LOG_DEBUG) if lxmessage.representation == LXMessage.RESOURCE: @@ -1574,11 +1600,9 @@ class LXMRouter: # destination to reply without generating a stamp. if lxmessage.include_ticket: ticket = self.generate_ticket(lxmessage.destination_hash) - if ticket: - lxmessage.fields[FIELD_TICKET] = ticket + if ticket: lxmessage.fields[FIELD_TICKET] = ticket - if not lxmessage.packed: - lxmessage.pack() + if not lxmessage.packed: lxmessage.pack() unknown_path_requested = False if not RNS.Transport.has_path(destination_hash) and lxmessage.method == LXMessage.OPPORTUNISTIC: @@ -1593,16 +1617,13 @@ class LXMRouter: RNS.log(f"Deferred stamp generation was requested for {lxmessage}, but no stamp is required, processing immediately", RNS.LOG_DEBUG) lxmessage.defer_stamp = False - if not lxmessage.defer_stamp: - while not unknown_path_requested and self.processing_outbound: - time.sleep(0.05) + if not lxmessage.defer_stamp and not (lxmessage.desired_method == LXMessage.PROPAGATED and lxmessage.defer_propagation_stamp): + while not unknown_path_requested and self.processing_outbound: time.sleep(0.05) self.pending_outbound.append(lxmessage) - if not unknown_path_requested: - self.process_outbound() + if not unknown_path_requested: self.process_outbound() - else: - self.pending_deferred_stamps[lxmessage.message_id] = lxmessage + else: self.pending_deferred_stamps[lxmessage.message_id] = lxmessage def get_outbound_progress(self, lxm_hash): for lxm in self.pending_outbound: @@ -1626,6 +1647,17 @@ class LXMRouter: return None + def get_outbound_lxm_propagation_stamp_cost(self, lxm_hash): + for lxm in self.pending_outbound: + if lxm.hash == lxm_hash: + return lxm.propagation_target_cost + + for lxm_id in self.pending_deferred_stamps: + if self.pending_deferred_stamps[lxm_id].hash == lxm_hash: + return self.pending_deferred_stamps[lxm_id].stamp_cost + + return None + ### Message Routing & Delivery ######################## ####################################################### @@ -2022,7 +2054,12 @@ class LXMRouter: self.lxmf_propagation(lxmf_data, stamp_value=stamp_value) self.client_propagation_messages_received += 1 - if stamps_valid: packet.prove() + if len(validated_messages) == len(messages): packet.prove() + else: + RNS.log("Propagation transfer from client contained messages with invalid stamps", RNS.LOG_NOTICE) + reject_data = msgpack.packb([LXMPeer.ERROR_INVALID_STAMP]) + RNS.Packet(packet.link, reject_data).send() + packet.link.teardown() except Exception as e: RNS.log("Exception occurred while parsing incoming LXMF propagation data.", RNS.LOG_ERROR) @@ -2281,29 +2318,87 @@ class LXMRouter: return - RNS.log(f"Starting stamp generation for {selected_lxm}...", RNS.LOG_DEBUG) - generated_stamp = selected_lxm.get_stamp() - if generated_stamp: - selected_lxm.stamp = generated_stamp - selected_lxm.defer_stamp = False - selected_lxm.packed = None - selected_lxm.pack() - self.pending_deferred_stamps.pop(selected_message_id) - self.pending_outbound.append(selected_lxm) - RNS.log(f"Stamp generation completed for {selected_lxm}", RNS.LOG_DEBUG) - else: - if selected_lxm.state == LXMessage.CANCELLED: - RNS.log(f"Message cancelled during deferred stamp generation for {selected_lxm}.", RNS.LOG_DEBUG) - selected_lxm.stamp_generation_failed = True - self.pending_deferred_stamps.pop(selected_message_id) - if selected_lxm.failed_callback != None and callable(selected_lxm.failed_callback): - selected_lxm.failed_callback(lxmessage) + if selected_lxm.stamp == None: stamp_generation_success = False + else: stamp_generation_success = True + + if selected_lxm.desired_method == LXMessage.PROPAGATED: + if selected_lxm.propagation_stamp == None: propagation_stamp_generation_success = False + else: propagation_stamp_generation_success = True + else: propagation_stamp_generation_success = True + + if stamp_generation_success == False: + RNS.log(f"Starting stamp generation for {selected_lxm}...", RNS.LOG_DEBUG) + generated_stamp = selected_lxm.get_stamp() + if generated_stamp: + selected_lxm.stamp = generated_stamp + selected_lxm.defer_stamp = False + selected_lxm.packed = None + selected_lxm.pack() + stamp_generation_success = True + RNS.log(f"Stamp generation completed for {selected_lxm}", RNS.LOG_DEBUG) else: - RNS.log(f"Deferred stamp generation did not succeed. Failing {selected_lxm}.", RNS.LOG_ERROR) + if selected_lxm.state == LXMessage.CANCELLED: + RNS.log(f"Message cancelled during deferred stamp generation for {selected_lxm}.", RNS.LOG_DEBUG) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + if selected_lxm.failed_callback != None and callable(selected_lxm.failed_callback): + selected_lxm.failed_callback(lxmessage) + else: + RNS.log(f"Deferred stamp generation did not succeed. Failing {selected_lxm}.", RNS.LOG_ERROR) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + self.fail_message(selected_lxm) + + if propagation_stamp_generation_success == False: + RNS.log(f"Starting propagation stamp generation for {selected_lxm}...", RNS.LOG_DEBUG) + pn_target_cost = self.get_outbound_propagation_cost() + if pn_target_cost == None: + RNS.log("Failed to get propagation node stamp cost, cannot generate propagation stamp", RNS.LOG_ERROR) selected_lxm.stamp_generation_failed = True self.pending_deferred_stamps.pop(selected_message_id) self.fail_message(selected_lxm) + else: + propagation_stamp = selected_lxm.get_propagation_stamp(target_cost=pn_target_cost) + RNS.log(f"Generated propagation stamp: {RNS.hexrep(propagation_stamp)}") + if propagation_stamp: + selected_lxm.propagation_stamp = propagation_stamp + selected_lxm.defer_propagation_stamp = False + selected_lxm.packed = None + selected_lxm.pack() + propagation_stamp_generation_success = True + RNS.log(f"Propagation stamp generation completed for {selected_lxm}", RNS.LOG_DEBUG) + else: + if selected_lxm.state == LXMessage.CANCELLED: + RNS.log(f"Message cancelled during deferred propagation stamp generation for {selected_lxm}.", RNS.LOG_DEBUG) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + if selected_lxm.failed_callback != None and callable(selected_lxm.failed_callback): + selected_lxm.failed_callback(lxmessage) + else: + RNS.log(f"Deferred propagation stamp generation did not succeed. Failing {selected_lxm}.", RNS.LOG_ERROR) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + self.fail_message(selected_lxm) + + if stamp_generation_success and propagation_stamp_generation_success: + self.pending_deferred_stamps.pop(selected_message_id) + self.pending_outbound.append(selected_lxm) + + def propagation_transfer_signalling_packet(self, data, packet): + try: + unpacked = msgpack.unpackb(data) + if type(unpacked) == list and len(unpacked) >= 1: + signal = unpacked[0] + if signal == LXMPeer.ERROR_INVALID_STAMP: + RNS.log("Message rejected by propagation node", RNS.LOG_ERROR) + if hasattr(packet, "link") and hasattr(packet.link, "for_lxmessage"): + lxm = packet.link.for_lxmessage + RNS.log(f"Invalid propagation stamp on {lxm}", RNS.LOG_ERROR) + self.cancel_outbound(lxm.message_id, cancel_state=LXMessage.REJECTED) + + except Exception as e: + RNS.log(f"An error occurred while processing propagation transfer signalling. The contained exception was: {e}", RNS.LOG_ERROR) def process_outbound(self, sender = None): if self.processing_outbound: @@ -2347,7 +2442,7 @@ class LXMRouter: elif lxmessage.state == LXMessage.REJECTED: RNS.log("Receiver rejected "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) - self.pending_outbound.remove(lxmessage) + if lxmessage in self.pending_outbound: self.pending_outbound.remove(lxmessage) if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): lxmessage.failed_callback(lxmessage) @@ -2512,6 +2607,8 @@ class LXMRouter: propagation_node_identity = RNS.Identity.recall(self.outbound_propagation_node) propagation_node_destination = RNS.Destination(propagation_node_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") self.outbound_propagation_link = RNS.Link(propagation_node_destination, established_callback=self.process_outbound) + self.outbound_propagation_link.set_packet_callback(self.propagation_transfer_signalling_packet) + self.outbound_propagation_link.for_lxmessage = lxmessage else: RNS.log("No path known for propagation attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(self.outbound_propagation_node)+". Requesting path...", RNS.LOG_DEBUG) RNS.Transport.request_path(self.outbound_propagation_node) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 4739f30..0533f07 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -145,26 +145,32 @@ class LXMessage: self.set_fields(fields) - self.payload = None - self.timestamp = None - self.signature = None - self.hash = None - self.packed = None - self.state = LXMessage.GENERATING - self.method = LXMessage.UNKNOWN - self.progress = 0.0 - self.rssi = None - self.snr = None - self.q = None + self.payload = None + self.timestamp = None + self.signature = None + self.hash = None + self.transient_id = None + self.packed = None + self.state = LXMessage.GENERATING + self.method = LXMessage.UNKNOWN + self.progress = 0.0 + self.rssi = None + self.snr = None + self.q = None - self.stamp = None - self.stamp_cost = stamp_cost - self.stamp_value = None - self.stamp_valid = False - self.stamp_checked = False - self.defer_stamp = True - self.outbound_ticket = None - self.include_ticket = include_ticket + self.stamp = None + self.stamp_cost = stamp_cost + self.stamp_value = None + self.stamp_valid = False + self.stamp_checked = False + self.propagation_stamp = None + self.propagation_stamp_value = None + self.propagation_stamp_valid = False + self.propagation_target_cost = None + self.defer_stamp = True + self.defer_propagation_stamp = True + self.outbound_ticket = None + self.include_ticket = include_ticket self.propagation_packed = None self.paper_packed = None @@ -184,6 +190,7 @@ class LXMessage: self.resource_representation = None self.__delivery_destination = None self.__delivery_callback = None + self.__pn_encrypted_data = None self.failed_callback = None self.deferred_stamp_generating = False @@ -324,10 +331,35 @@ class LXMessage: else: return None + def get_propagation_stamp(self, target_cost, timeout=None): + # If a stamp was already generated, return + # it immediately. + if self.propagation_stamp != None: + return self.propagation_stamp + + # Otherwise, we will need to generate a + # valid stamp according to the cost that + # the propagation node has specified. + else: + self.propagation_target_cost = target_cost + if self.propagation_target_cost == None: + raise ValueError("Cannot generate propagation stamp without configured target propagation cost") + + + if not self.transient_id: self.pack() + generated_stamp, value = LXStamper.generate_stamp(self.transient_id, target_cost, expand_rounds=LXStamper.WORKBLOCK_EXPAND_ROUNDS_PN) + if generated_stamp: + self.propagation_stamp = generated_stamp + self.propagation_stamp_value = value + self.propagation_stamp_valid = True + return generated_stamp + + else: + return None + def pack(self): if not self.packed: - if self.timestamp == None: - self.timestamp = time.time() + if self.timestamp == None: self.timestamp = time.time() self.propagation_packed = None self.paper_packed = None @@ -343,9 +375,8 @@ class LXMessage: if not self.defer_stamp: self.stamp = self.get_stamp() - if self.stamp != None: - self.payload.append(self.stamp) - + if self.stamp != None: self.payload.append(self.stamp) + signed_part = b"" signed_part += hashed_part signed_part += self.hash @@ -400,9 +431,14 @@ class LXMessage: elif self.desired_method == LXMessage.PROPAGATED: single_packet_content_limit = LXMessage.LINK_PACKET_MAX_CONTENT - encrypted_data = self.__destination.encrypt(self.packed[LXMessage.DESTINATION_LENGTH:]) - self.ratchet_id = self.__destination.latest_ratchet_id - self.propagation_packed = msgpack.packb([time.time(), [self.packed[:LXMessage.DESTINATION_LENGTH]+encrypted_data]]) + if self.__pn_encrypted_data == None: + self.__pn_encrypted_data = self.__destination.encrypt(self.packed[LXMessage.DESTINATION_LENGTH:]) + self.ratchet_id = self.__destination.latest_ratchet_id + + lxmf_data = self.packed[:LXMessage.DESTINATION_LENGTH]+self.__pn_encrypted_data + self.transient_id = RNS.Identity.full_hash(lxmf_data) + if self.propagation_stamp != None: lxmf_data += self.propagation_stamp + self.propagation_packed = msgpack.packb([time.time(), [lxmf_data]]) content_size = len(self.propagation_packed) if content_size <= single_packet_content_limit: diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 2a74295..de71101 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -10,7 +10,7 @@ import multiprocessing WORKBLOCK_EXPAND_ROUNDS = 3000 WORKBLOCK_EXPAND_ROUNDS_PN = 1000 WORKBLOCK_EXPAND_ROUNDS_PEERING = 25 -STAMP_SIZE = RNS.Identity.HASHLENGTH +STAMP_SIZE = RNS.Identity.HASHLENGTH//8 PN_VALIDATION_POOL_MIN_SIZE = 256 active_jobs = {} @@ -24,7 +24,7 @@ def stamp_workblock(material, expand_rounds=WORKBLOCK_EXPAND_ROUNDS): salt=RNS.Identity.full_hash(material+msgpack.packb(n)), context=None) wb_time = time.time() - wb_st - RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) + # RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) return workblock @@ -52,23 +52,23 @@ def validate_peering_key(peering_id, peering_key, target_cost): def validate_pn_stamp(transient_data, target_cost): from .LXMessage import LXMessage - if len(transient_data) <= LXMessage.LXMF_OVERHEAD+STAMP_SIZE: return False, None, None + if len(transient_data) <= LXMessage.LXMF_OVERHEAD+STAMP_SIZE: return None, None, None else: lxm_data = transient_data[:-STAMP_SIZE] stamp = transient_data[-STAMP_SIZE:] transient_id = RNS.Identity.full_hash(lxm_data) workblock = stamp_workblock(transient_id, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PN) - if not stamp_valid(stamp, target_cost, workblock): return False, None, None + if not stamp_valid(stamp, target_cost, workblock): return None, None, None else: value = stamp_value(workblock, stamp) - return True, transient_id, value + return transient_id, lxm_data, value def validate_pn_stamps_job_simple(transient_list, target_cost): validated_messages = [] for transient_data in transient_list: - stamp_valid, transient_id, value = validate_pn_stamp(transient_data, target_cost) - if stamp_valid: validated_messages.append([transient_id, transient_data, value]) + transient_id, lxm_data, value = validate_pn_stamp(transient_data, target_cost) + if transient_id: validated_messages.append([transient_id, lxm_data, value]) return validated_messages @@ -80,7 +80,7 @@ def validate_pn_stamps_job_multip(transient_list, target_cost): with multiprocessing.Pool(pool_count) as p: validated_entries = p.starmap(validate_pn_stamp, zip(transient_list, itertools.repeat(target_cost))) - return [e for e in validated_entries if e[0] == True] + return [e for e in validated_entries if e[0] != None] def validate_pn_stamps(transient_list, target_cost): non_mp_platform = RNS.vendor.platformutils.is_android() diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index b2bc302..69bb26e 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -588,16 +588,18 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = if show_status: msb = RNS.prettysize(s["messagestore"]["bytes"]); msl = RNS.prettysize(s["messagestore"]["limit"]) - ptl = RNS.prettysize(s["propagation_limit"]*1000); uprx = RNS.prettysize(s["unpeered_propagation_rx_bytes"]) + ptl = RNS.prettysize(s["propagation_limit"]*1000); psl = RNS.prettysize(s["sync_limit"]*1000); + uprx = RNS.prettysize(s["unpeered_propagation_rx_bytes"]) mscnt = s["messagestore"]["count"]; stp = s["total_peers"]; smp = s["max_peers"]; sdp = s["discovered_peers"] ssp = s["static_peers"]; cprr = s["clients"]["client_propagation_messages_received"] cprs = s["clients"]["client_propagation_messages_served"]; upi = s["unpeered_propagation_incoming"] psc = s["target_stamp_cost"]; scf = s["stamp_cost_flexibility"] pc = s["peering_cost"]; pcm = s["max_peering_cost"] print(f"Messagestore contains {mscnt} messages, {msb} ({ms_util} utilised of {msl})") - print(f"Accepting propagated messages from {who_str}, {ptl} per-transfer limit") print(f"Required propagation stamp cost is {psc}, flexibility is {scf}") print(f"Peering cost is {pc}, max remote peering cost is {pcm}") + print(f"Accepting propagated messages from {who_str}") + print(f"{ptl} message limit, {psl} sync limit") print(f"") print(f"Peers : {stp} total (peer limit is {smp})") print(f" {sdp} discovered, {ssp} static") @@ -642,14 +644,16 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = else: ls = "never synced" - sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]); stl = RNS.prettysize(p["transfer_limit"]*1000) + sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]) + stl = RNS.prettysize(p["transfer_limit"]*1000); ssl = RNS.prettysize(p["sync_limit"]*1000) srxb = RNS.prettysize(p["rx_bytes"]); stxb = RNS.prettysize(p["tx_bytes"]); pmo = pm["offered"]; pmout = pm["outgoing"] pmi = pm["incoming"]; pmuh = pm["unhandled"] print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") print(f"{ind*2}Costs : Propagation {psc} (flex {psf}), peering {pc}") print(f"{ind*2}Sync key : {pk}") - print(f"{ind*2}Speeds : {sstr} STR, {sler} LER, {stl} transfer limit") + print(f"{ind*2}Speeds : {sstr} STR, {sler} LER") + print(f"{ind*2}Limits : {stl} message limit, {ssl} sync limit") print(f"{ind*2}Messages : {pmo} offered, {pmout} outgoing, {pmi} incoming") print(f"{ind*2}Traffic : {srxb} received, {stxb} sent") ms = "" if pm["unhandled"] == 1 else "s" From 0a5edb28952e1f17bc7485ce4adbcac1386b56ce Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Oct 2025 22:24:55 +0100 Subject: [PATCH 083/123] Implemented propagation node name configuration and inclusion in announce metadata --- LXMF/LXMF.py | 22 ++++++++++++++++++++++ LXMF/LXMPeer.py | 14 ++++++++++++++ LXMF/LXMRouter.py | 16 +++++++++++++--- LXMF/Utilities/lxmd.py | 19 +++++++++++++++++-- 4 files changed, 66 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index fd2abf0..dbc0ab1 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -147,6 +147,28 @@ def stamp_cost_from_app_data(app_data=None): # Original announce format else: return None +def pn_name_from_app_data(app_data=None): + if app_data == None: return None + else: + if pn_announce_data_is_valid(app_data): + data = msgpack.unpackb(app_data) + metadata = data[6] + if not PN_META_NAME in metadata: return None + else: + try: return metadata[PN_META_NAME].decode("utf-8") + except: return None + + return None + +def pn_stamp_cost_from_app_data(app_data=None): + if app_data == None: return None + else: + if pn_announce_data_is_valid(app_data): + data = msgpack.unpackb(app_data) + return data[5][0] + else: + return None + def pn_announce_data_is_valid(data): try: if type(data) != bytes: return False diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 199ee2d..a67d3b2 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -8,6 +8,7 @@ import LXMF.LXStamper as LXStamper from collections import deque from .LXMF import APP_NAME +from .LXMF import PN_META_NAME class LXMPeer: OFFER_REQUEST_PATH = "/offer" @@ -110,6 +111,8 @@ class LXMPeer: else: peer.last_sync_attempt = 0 if "peering_key" in dictionary: peer.peering_key = dictionary["peering_key"] else: peer.peering_key = None + if "metadata" in dictionary: peer.metadata = dictionary["metadata"] + else: peer.metadata = None hm_count = 0 for transient_id in dictionary["handled_ids"]: @@ -135,6 +138,7 @@ class LXMPeer: dictionary = {} dictionary["peering_timebase"] = self.peering_timebase dictionary["alive"] = self.alive + dictionary["metadata"] = self.metadata dictionary["last_heard"] = self.last_heard dictionary["sync_strategy"] = self.sync_strategy dictionary["peering_key"] = self.peering_key @@ -175,6 +179,7 @@ class LXMPeer: self.sync_strategy = sync_strategy self.peering_key = None self.peering_cost = None + self.metadata = None self.next_sync_attempt = 0 self.last_sync_attempt = 0 @@ -616,6 +621,15 @@ class LXMPeer: self.router.propagation_entries[transient_id][5].remove(self.destination_hash) self._um_counts_synced = False + @property + def name(self): + if type(self.metadata) != dict: return None + else: + if not PN_META_NAME in self.metadata: return None + else: + try: return self.metadata[PN_META_NAME].decode("utf-8") + except: return None + def __str__(self): if self.destination_hash: return RNS.prettyhexrep(self.destination_hash) else: return "" \ No newline at end of file diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 39ee7ec..3ab85e8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -15,6 +15,7 @@ import RNS.vendor.umsgpack as msgpack from .LXMF import APP_NAME from .LXMF import FIELD_TICKET +from .LXMF import PN_META_NAME from .LXMF import pn_announce_data_is_valid from .LXMPeer import LXMPeer @@ -84,7 +85,7 @@ class LXMRouter: enforce_ratchets=False, enforce_stamps=False, static_peers = [], max_peers=None, from_static_only=False, sync_strategy=LXMPeer.STRATEGY_PERSISTENT, propagation_cost=PROPAGATION_COST, propagation_cost_flexibility=PROPAGATION_COST_FLEX, - peering_cost=PEERING_COST): + peering_cost=PEERING_COST, name=None): random.seed(os.urandom(10)) @@ -105,9 +106,10 @@ class LXMRouter: self.processing_outbound = False self.processing_inbound = False self.processing_count = 0 + self.name = name self.propagation_node = False - self.propagation_node_start_time = None + self.propagation_node_start_time = None if storagepath == None: raise ValueError("LXMF cannot be initialised without a storage path") else: @@ -287,10 +289,15 @@ class LXMRouter: if destination_hash in self.delivery_destinations: self.delivery_destinations[destination_hash].announce(app_data=self.get_announce_app_data(destination_hash), attached_interface=attached_interface) + def get_propagation_node_announce_metadata(self): + metadata = {} + if self.name: metadata[PN_META_NAME] = str(self.name).encode("utf-8") + return metadata + def get_propagation_node_app_data(self): + metadata = self.get_propagation_node_announce_metadata() node_state = self.propagation_node and not self.from_static_only stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility, self.peering_cost] - metadata = {} announce_data = [ False, # 0: Legacy LXMF PN support int(time.time()), # 1: Current node timebase node_state, # 2: Boolean flag signalling propagation node state @@ -737,6 +744,7 @@ class LXMRouter: "type": "static" if peer_id in self.static_peers else "discovered", "state": peer.state, "alive": peer.alive, + "name": peer.name, "last_heard": int(peer.last_heard), "next_sync_attempt": peer.next_sync_attempt, "last_sync_attempt": peer.last_sync_attempt, @@ -1834,6 +1842,7 @@ class LXMRouter: peer = self.peers[destination_hash] if timestamp > peer.peering_timebase: peer.alive = True + peer.metadata = metadata peer.sync_backoff = 0 peer.next_sync_attempt = 0 peer.peering_timebase = timestamp @@ -1852,6 +1861,7 @@ class LXMRouter: else: peer = LXMPeer(self, destination_hash, sync_strategy=self.default_sync_strategy) peer.alive = True + peer.metadata = metadata peer.last_heard = time.time() peer.propagation_stamp_cost = propagation_stamp_cost peer.propagation_stamp_cost_flexibility = propagation_stamp_cost_flexibility diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 69bb26e..5d21bd3 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -97,6 +97,11 @@ def apply_config(): else: active_configuration["enable_propagation_node"] = False + if "propagation" in lxmd_config and "node_name" in lxmd_config["propagation"]: + active_configuration["node_name"] = lxmd_config["propagation"].get("node_name") + else: + active_configuration["node_name"] = None + if "propagation" in lxmd_config and "auth_required" in lxmd_config["propagation"]: active_configuration["auth_required"] = lxmd_config["propagation"].as_bool("auth_required") else: @@ -371,7 +376,8 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo delivery_limit = active_configuration["delivery_transfer_max_accepted_size"], max_peers = active_configuration["max_peers"], static_peers = active_configuration["static_peers"], - from_static_only = active_configuration["from_static_only"]) + from_static_only = active_configuration["from_static_only"], + name = active_configuration["node_name"]) message_router.register_delivery_callback(lxmf_delivery) @@ -647,8 +653,12 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]) stl = RNS.prettysize(p["transfer_limit"]*1000); ssl = RNS.prettysize(p["sync_limit"]*1000) srxb = RNS.prettysize(p["rx_bytes"]); stxb = RNS.prettysize(p["tx_bytes"]); pmo = pm["offered"]; pmout = pm["outgoing"] - pmi = pm["incoming"]; pmuh = pm["unhandled"] + pmi = pm["incoming"]; pmuh = pm["unhandled"]; + if p["name"] == None: nn = "" + else: nn = p["name"].strip().replace("\n", "").replace("\r", "") + if len(nn) > 45: nn = f"{nn[:45]}..." print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") + if len(nn): print(f"{ind*2}Name : {nn}") print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") print(f"{ind*2}Costs : Propagation {psc} (flex {psf}), peering {pc}") print(f"{ind*2}Sync key : {pk}") @@ -717,6 +727,11 @@ __default_lxmd_config__ = """# This is an example LXM Daemon config file. enable_node = no +# An optional name for this node, included +# in announces. + +# node_name = Anonymous Propagation Node + # Automatic announce interval in minutes. # 6 hours by default. From 4afb92bf3e724b3f81b8536dc83eb5a1eb78db14 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 1 Nov 2025 00:09:23 +0100 Subject: [PATCH 084/123] Added peer sync option to lxmd command line interface --- LXMF/Utilities/lxmd.py | 130 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 123 insertions(+), 7 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 5d21bd3..88f08f7 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -467,6 +467,112 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() +def _request_sync(identity, destination_hash, timeout=5, exit_on_fail=False): + control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + if exit_on_fail: + RNS.log("Requesting lxmd peer sync timed out, exiting now", RNS.LOG_ERROR) + exit(200) + else: + return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT + else: + time.sleep(0.1) + + if not RNS.Transport.has_path(control_destination.hash): + RNS.Transport.request_path(control_destination.hash) + while not RNS.Transport.has_path(control_destination.hash): + tc = check_timeout() + if tc: + return tc + + link = RNS.Link(control_destination) + while not link.status == RNS.Link.ACTIVE: + tc = check_timeout() + if tc: + return tc + + link.identify(identity) + request_receipt = link.request(LXMF.LXMRouter.SYNC_REQUEST_PATH, data=destination_hash, response_callback=None, failed_callback=None) + while not request_receipt.get_status() == RNS.RequestReceipt.READY: + tc = check_timeout() + if tc: + return tc + + link.teardown() + return request_receipt.get_response() + +def request_sync(target, configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=15, identity_path=None): + global configpath, identitypath, storagedir, lxmdir + global lxmd_config, active_configuration, targetloglevel + targetlogdest = RNS.LOG_STDOUT + + if identity_path == None: + if configdir == None: + if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): + configdir = "/etc/lxmd" + elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): + configdir = RNS.Reticulum.userdir+"/.config/lxmd" + else: + configdir = RNS.Reticulum.userdir+"/.lxmd" + + configpath = configdir+"/config" + identitypath = configdir+"/identity" + identity = None + + if not os.path.isdir(configdir): + RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) + exit(201) + if not os.path.isfile(identitypath): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identitypath) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) + exit(4) + + else: + if not os.path.isfile(identity_path): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identity_path) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identity_path, RNS.LOG_ERROR) + exit(4) + + if targetloglevel == None: targetloglevel = 3 + if verbosity != 0 or quietness != 0: targetloglevel = targetloglevel+verbosity-quietness + + try: + destination_hash = bytes.fromhex(target) + if len(destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH//8: raise ValueError(f"Destination hash length must be {RNS.Identity.TRUNCATED_HASHLENGTH//8*2} characters") + except Exception as e: + print(f"Invalid peer destination hash: {e}") + exit(203) + + reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) + response = _request_sync(identity, destination_hash, timeout=timeout, exit_on_fail=True) + + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: + print("Remote received no identity") + exit(203) + elif response == LXMF.LXMPeer.LXMPeer.ERROR_NO_ACCESS: + print("Access denied") + exit(204) + elif response == LXMF.LXMPeer.LXMPeer.ERROR_INVALID_DATA: + print("Invalid data received by remote") + exit(205) + elif response == LXMF.LXMPeer.LXMPeer.ERROR_NOT_FOUND: + print("The requested peer was not found") + exit(206) + else: + print(f"Sync requested for peer {RNS.prettyhexrep(destination_hash)}") + exit(0) + def query_status(identity, timeout=5, exit_on_fail=False): control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") @@ -683,6 +789,7 @@ def main(): parser.add_argument("-s", "--service", action="store_true", default=False, help="lxmd is running as a service and should log to file") parser.add_argument("--status", action="store_true", default=False, help="display node status") parser.add_argument("--peers", action="store_true", default=False, help="display peered nodes") + parser.add_argument("--sync", action="store", default=None, help="request a sync with the specified peer", type=str) parser.add_argument("--timeout", action="store", default=5, help="timeout in seconds for query operations", type=float) parser.add_argument("--identity", action="store", default=None, help="path to identity used for query request", type=str) parser.add_argument("--exampleconfig", action="store_true", default=False, help="print verbose configuration example to stdout and exit") @@ -696,15 +803,24 @@ def main(): if args.status or args.peers: get_status(configdir = args.config, - rnsconfigdir=args.rnsconfig, - verbosity=args.verbose, - quietness=args.quiet, - timeout=args.timeout, - show_status=args.status, - show_peers=args.peers, - identity_path=args.identity) + rnsconfigdir=args.rnsconfig, + verbosity=args.verbose, + quietness=args.quiet, + timeout=args.timeout, + show_status=args.status, + show_peers=args.peers, + identity_path=args.identity) exit() + if args.sync: + request_sync(target=args.sync, + configdir = args.config, + rnsconfigdir=args.rnsconfig, + verbosity=args.verbose, + quietness=args.quiet, + timeout=args.timeout, + identity_path=args.identity) + program_setup(configdir = args.config, rnsconfigdir=args.rnsconfig, run_pn=args.propagation_node, From df6271a02637087c78e0ab227d5e48ebd858258d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 1 Nov 2025 00:10:30 +0100 Subject: [PATCH 085/123] Handle client message download for stamped propagation messages --- LXMF/LXMPeer.py | 1 + LXMF/LXMRouter.py | 62 +++++++++++++++++++++++++++++------------------ LXMF/LXStamper.py | 10 ++++---- 3 files changed, 44 insertions(+), 29 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a67d3b2..786514d 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -27,6 +27,7 @@ class LXMPeer: ERROR_INVALID_DATA = 0xf4 ERROR_INVALID_STAMP = 0xf5 ERROR_THROTTLED = 0xf6 + ERROR_NOT_FOUND = 0xfd ERROR_TIMEOUT = 0xfe STRATEGY_LAZY = 0x01 diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 3ab85e8..c7c6051 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -75,6 +75,7 @@ class LXMRouter: PR_ALL_MESSAGES = 0x00 STATS_GET_PATH = "/pn/get/stats" + SYNC_REQUEST_PATH = "/pn/peer/sync" ### Developer-facing API ############################## @@ -627,6 +628,7 @@ class LXMRouter: self.control_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") self.control_destination.register_request_handler(LXMRouter.STATS_GET_PATH, self.stats_get_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=[self.identity.hash]) + self.control_destination.register_request_handler(LXMRouter.SYNC_REQUEST_PATH, self.peer_sync_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=[self.identity.hash]) if self.message_storage_limit != None: limit_str = ", limit is "+RNS.prettysize(self.message_storage_limit) @@ -807,6 +809,18 @@ class LXMRouter: elif remote_identity.hash != self.identity.hash: return LXMPeer.ERROR_NO_ACCESS else: return self.compile_stats() + def peer_sync_request(self, path, data, request_id, remote_identity, requested_at): + if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY + elif remote_identity.hash != self.identity.hash: return LXMPeer.ERROR_NO_ACCESS + else: + if type(data) != bytes: return LXMPeer.ERROR_INVALID_DATA + elif len(data) != RNS.Identity.TRUNCATED_HASHLENGTH//8: return LXMPeer.ERROR_INVALID_DATA + else: + if not data in self.peers: return LXMPeer.ERROR_NOT_FOUND + else: + self.peers[data].sync() + return True + ### Utility & Maintenance ############################# ####################################################### @@ -1364,12 +1378,8 @@ class LXMRouter: return True def message_get_request(self, path, data, request_id, remote_identity, requested_at): - if remote_identity == None: - return LXMPeer.ERROR_NO_IDENTITY - - elif not self.identity_allowed(remote_identity): - return LXMPeer.ERROR_NO_ACCESS - + if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY + elif not self.identity_allowed(remote_identity): return LXMPeer.ERROR_NO_ACCESS else: try: remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "delivery") @@ -1388,9 +1398,7 @@ class LXMRouter: available_messages.sort(key=lambda e: e[1], reverse=False) transient_ids = [] - for available_entry in available_messages: - transient_ids.append(available_entry[0]) - + for available_entry in available_messages: transient_ids.append(available_entry[0]) return transient_ids else: @@ -1416,8 +1424,7 @@ class LXMRouter: try: client_transfer_limit = float(data[2])*1000 RNS.log("Client indicates transfer limit of "+RNS.prettysize(client_transfer_limit), RNS.LOG_DEBUG) - except: - pass + except: pass per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now cumulative_size = 24 # Initialised to highest reasonable binary structure overhead @@ -1434,10 +1441,9 @@ class LXMRouter: lxm_size = len(lxmf_data) next_size = cumulative_size + (lxm_size+per_message_overhead) - if client_transfer_limit != None and next_size > client_transfer_limit: - pass + if client_transfer_limit != None and next_size > client_transfer_limit: pass else: - response_messages.append(lxmf_data) + response_messages.append(lxmf_data[:-LXStamper.STAMP_SIZE]) cumulative_size += (lxm_size+per_message_overhead) except Exception as e: @@ -1446,7 +1452,6 @@ class LXMRouter: self.client_propagation_messages_served += len(response_messages) return response_messages - except Exception as e: RNS.log("Error occurred while generating response for download request, the contained exception was: "+str(e), RNS.LOG_DEBUG) return None @@ -2061,7 +2066,8 @@ class LXMRouter: for validated_entry in validated_messages: lxmf_data = validated_entry[1] stamp_value = validated_entry[2] - self.lxmf_propagation(lxmf_data, stamp_value=stamp_value) + stamp_data = validated_entry[3] + self.lxmf_propagation(lxmf_data, stamp_value=stamp_value, stamp_data=stamp_data) self.client_propagation_messages_received += 1 if len(validated_messages) == len(messages): packet.prove() @@ -2090,7 +2096,7 @@ class LXMRouter: try: if type(data) != list and len(data) < 2: return LXMPeer.ERROR_INVALID_DATA - peering_id = self.identity.hash+remote_identity + peering_id = self.identity.hash+remote_identity.hash target_cost = self.peering_cost peering_key = data[0] transient_ids = data[1] @@ -2115,6 +2121,7 @@ class LXMRouter: except Exception as e: RNS.log("Error occurred while generating response for sync request, the contained exception was: "+str(e), RNS.LOG_DEBUG) + RNS.trace_exception(e) return None def propagation_resource_concluded(self, resource): @@ -2158,15 +2165,18 @@ class LXMRouter: self.peer(remote_hash, remote_timebase, remote_transfer_limit, remote_sync_limit, remote_stamp_cost, remote_stamp_flex, remote_peering_cost, remote_metadata) ms = "" if len(messages) == 1 else "s" - RNS.log(f"Received {len(messages)} message{ms} from {remote_str}", RNS.LOG_VERBOSE) + RNS.log(f"Received {len(messages)} message{ms} from {remote_str}, validating stamps...", RNS.LOG_VERBOSE) min_accepted_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) validated_messages = LXStamper.validate_pn_stamps(messages, min_accepted_cost) + if len(validated_messages) == len(messages): RNS.log(f"All message stamps validated from {remote_str}", RNS.LOG_VERBOSE) + else: RNS.log(f"Transfer from {remote_str} contained {len(messages)-len(validated_messages)} invalid stamps", RNS.LOG_WARNING) for validated_entry in validated_messages: transient_id = validated_entry[0] lxmf_data = validated_entry[1] stamp_value = validated_entry[2] + stamp_data = validated_entry[3] peer = None if remote_hash != None and remote_hash in self.peers: @@ -2180,7 +2190,7 @@ class LXMRouter: else: self.client_propagation_messages_received += 1 - self.lxmf_propagation(lxmf_data, from_peer=peer, stamp_value=stamp_value) + self.lxmf_propagation(lxmf_data, from_peer=peer, stamp_value=stamp_value, stamp_data=stamp_data) if peer != None: peer.queue_handled_message(transient_id) else: @@ -2208,7 +2218,8 @@ class LXMRouter: if peer != from_peer: peer.queue_unhandled_message(transient_id) - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False, is_paper_message=False, from_peer=None, stamp_value=None): + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False, is_paper_message=False, + from_peer=None, stamp_value=None, stamp_data=None): if is_paper_message: no_stamp_enforcement = True else: no_stamp_enforcement = False @@ -2236,13 +2247,14 @@ class LXMRouter: else: if self.propagation_node: + stamped_data = lxmf_data+stamp_data value_component = f"_{stamp_value}" if stamp_value and stamp_value > 0 else "" file_path = f"{self.messagepath}/{RNS.hexrep(transient_id, delimit=False)}_{received}{value_component}" msg_file = open(file_path, "wb") - msg_file.write(lxmf_data); msg_file.close() + msg_file.write(stamped_data); msg_file.close() RNS.log(f"Received propagated LXMF message {RNS.prettyhexrep(transient_id)} with stamp value {stamp_value}, adding to peer distribution queues...", RNS.LOG_EXTREME) - self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], [], stamp_value] + self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(stamped_data), [], [], stamp_value] self.enqueue_peer_distribution(transient_id, from_peer) else: @@ -2328,8 +2340,10 @@ class LXMRouter: return - if selected_lxm.stamp == None: stamp_generation_success = False - else: stamp_generation_success = True + if selected_lxm.defer_stamp: + if selected_lxm.stamp == None: stamp_generation_success = False + else: stamp_generation_success = True + else: stamp_generation_success = True if selected_lxm.desired_method == LXMessage.PROPAGATED: if selected_lxm.propagation_stamp == None: propagation_stamp_generation_success = False diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index de71101..8ebefd7 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -52,23 +52,23 @@ def validate_peering_key(peering_id, peering_key, target_cost): def validate_pn_stamp(transient_data, target_cost): from .LXMessage import LXMessage - if len(transient_data) <= LXMessage.LXMF_OVERHEAD+STAMP_SIZE: return None, None, None + if len(transient_data) <= LXMessage.LXMF_OVERHEAD+STAMP_SIZE: return None, None, None, None else: lxm_data = transient_data[:-STAMP_SIZE] stamp = transient_data[-STAMP_SIZE:] transient_id = RNS.Identity.full_hash(lxm_data) workblock = stamp_workblock(transient_id, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PN) - if not stamp_valid(stamp, target_cost, workblock): return None, None, None + if not stamp_valid(stamp, target_cost, workblock): return None, None, None, None else: value = stamp_value(workblock, stamp) - return transient_id, lxm_data, value + return transient_id, lxm_data, value, stamp def validate_pn_stamps_job_simple(transient_list, target_cost): validated_messages = [] for transient_data in transient_list: - transient_id, lxm_data, value = validate_pn_stamp(transient_data, target_cost) - if transient_id: validated_messages.append([transient_id, lxm_data, value]) + transient_id, lxm_data, value, stamp_data = validate_pn_stamp(transient_data, target_cost) + if transient_id: validated_messages.append([transient_id, lxm_data, value, stamp_data]) return validated_messages From b35b9213a6b92f9b54bf2defc31ca0fa04e74df1 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 1 Nov 2025 01:35:15 +0100 Subject: [PATCH 086/123] Implemented throttling for naughty propagation node peers --- LXMF/LXMPeer.py | 8 +++++++- LXMF/LXMRouter.py | 45 ++++++++++++++++++++++++++++++++++----------- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 786514d..0dbf8ce 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -286,7 +286,7 @@ class LXMPeer: threading.Thread(target=job, daemon=True).start() delay = self.next_sync_attempt-time.time() - postpone_delay = " for {RNS.prettytime({delay})}" if delay > 0 else "" + postpone_delay = f" for {RNS.prettytime(delay)}" if delay > 0 else "" RNS.log(f"Postponing sync with peer {RNS.prettyhexrep(self.destination_hash)}{postpone_delay}{postpone_reason}", RNS.LOG_DEBUG) except Exception as e: RNS.trace_exception(e) @@ -414,6 +414,12 @@ class LXMPeer: self.router.unpeer(self.destination_hash) return + elif response == LXMPeer.ERROR_THROTTLED: + throttle_time = self.router.PN_STAMP_THROTTLE + RNS.log(f"Remote indicated that we're throttled, postponing sync for {RNS.prettytime(throttle_time)}", RNS.LOG_VERBOSE) + self.next_sync_attempt = time.time()+throttle_time + return + elif response == False: # Peer already has all advertised messages for transient_id in self.last_offer: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index c7c6051..f25c8f0 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -56,6 +56,7 @@ class LXMRouter: DELIVERY_LIMIT = 1000 PR_PATH_TIMEOUT = 10 + PN_STAMP_THROTTLE = 180 PR_IDLE = 0x00 PR_PATH_REQUESTED = 0x01 @@ -135,6 +136,7 @@ class LXMRouter: self.enforce_ratchets = enforce_ratchets self._enforce_stamps = enforce_stamps self.pending_deferred_stamps = {} + self.throttled_peers = {} if sync_limit == None or self.propagation_per_sync_limit < self.propagation_per_transfer_limit: self.propagation_per_sync_limit = self.propagation_per_transfer_limit @@ -850,20 +852,17 @@ class LXMRouter: self.clean_transient_id_caches() if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: - if self.propagation_node == True: - self.clean_message_store() + if self.propagation_node == True: self.clean_message_store() if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: - if self.propagation_node == True: - self.flush_queues() + if self.propagation_node == True: self.flush_queues() if self.processing_count % LXMRouter.JOB_ROTATE_INTERVAL == 0: - if self.propagation_node == True: - self.rotate_peers() + if self.propagation_node == True: self.rotate_peers() if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: - if self.propagation_node == True: - self.sync_peers() + if self.propagation_node == True: self.sync_peers() + self.clean_throttled_peers() def jobloop(self): while (True): @@ -1060,6 +1059,14 @@ class LXMRouter: else: return available_tickets + def clean_throttled_peers(self): + expired_entries = [] + now = time.time() + for peer_hash in self.throttled_peers: + if now > self.throttled_peers[peer_hash]: expired_entries.append(peer_hash) + + for peer_hash in expired_entries: self.throttled_peers.pop(peer_hash) + def clean_message_store(self): RNS.log("Cleaning message store", RNS.LOG_VERBOSE) # Check and remove expired messages @@ -2085,10 +2092,18 @@ class LXMRouter: if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY else: + remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) + + if remote_hash in self.throttled_peers: + throttle_remaining = self.throttled_peers[remote_hash]-time.time() + if throttle_remaining > 0: + RNS.log(f"Propagation offer from node {remote_str} rejected, throttled for {RNS.prettytime(throttle_remaining)} more", RNS.LOG_NOTICE) + return LXMPeer.ERROR_THROTTLED + else: self.throttled_peers.pop(remote_hash) + if self.from_static_only: - remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") - remote_hash = remote_destination.hash - remote_str = RNS.prettyhexrep(remote_hash) if not remote_hash in self.static_peers: RNS.log(f"Rejecting propagation request from {remote_str} not in static peers list", RNS.LOG_DEBUG) return LXMPeer.ERROR_NO_ACCESS @@ -2193,6 +2208,14 @@ class LXMRouter: self.lxmf_propagation(lxmf_data, from_peer=peer, stamp_value=stamp_value, stamp_data=stamp_data) if peer != None: peer.queue_handled_message(transient_id) + invalid_message_count = len(messages) - len(validated_messages) + if invalid_message_count > 0: + resource.link.teardown() + throttle_time = LXMRouter.PN_STAMP_THROTTLE + self.throttled_peers[remote_hash] = time.time()+throttle_time + ms = "" if invalid_message_count == 1 else "s" + RNS.log(f"Propagation transfer from {remote_str} contained {invalid_message_count} message{ms} with invalid stamps, throttled for {RNS.prettytime(throttle_time)}", RNS.LOG_NOTICE) + else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) From 0cebd5886d54b12b412a530f225e7a11862bd16c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 1 Nov 2025 12:49:32 +0100 Subject: [PATCH 087/123] Allow specifying remote lxmd destination for status and control --- LXMF/Utilities/lxmd.py | 219 ++++++++++++++++++++--------------------- 1 file changed, 105 insertions(+), 114 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 88f08f7..b4599df 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -467,14 +467,14 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() -def _request_sync(identity, destination_hash, timeout=5, exit_on_fail=False): - control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") +def _request_sync(identity, destination_hash, remote_identity, timeout=15, exit_on_fail=False): + control_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") timeout = time.time()+timeout def check_timeout(): if time.time() > timeout: if exit_on_fail: - RNS.log("Requesting lxmd peer sync timed out, exiting now", RNS.LOG_ERROR) + print("Requesting lxmd peer sync timed out, exiting now") exit(200) else: return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT @@ -504,58 +504,20 @@ def _request_sync(identity, destination_hash, timeout=5, exit_on_fail=False): link.teardown() return request_receipt.get_response() -def request_sync(target, configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=15, identity_path=None): + +def request_sync(target, remote=None, configdir=None, rnsconfigdir=None, verbosity=0, quietness=0, timeout=15, identity_path=None): global configpath, identitypath, storagedir, lxmdir global lxmd_config, active_configuration, targetloglevel - targetlogdest = RNS.LOG_STDOUT - - if identity_path == None: - if configdir == None: - if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): - configdir = "/etc/lxmd" - elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): - configdir = RNS.Reticulum.userdir+"/.config/lxmd" - else: - configdir = RNS.Reticulum.userdir+"/.lxmd" - - configpath = configdir+"/config" - identitypath = configdir+"/identity" - identity = None - - if not os.path.isdir(configdir): - RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) - exit(201) - if not os.path.isfile(identitypath): - RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) - exit(202) - else: - identity = RNS.Identity.from_file(identitypath) - if identity == None: - RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) - exit(4) - - else: - if not os.path.isfile(identity_path): - RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) - exit(202) - else: - identity = RNS.Identity.from_file(identity_path) - if identity == None: - RNS.log("Could not load the Primary Identity from "+identity_path, RNS.LOG_ERROR) - exit(4) - - if targetloglevel == None: targetloglevel = 3 - if verbosity != 0 or quietness != 0: targetloglevel = targetloglevel+verbosity-quietness try: - destination_hash = bytes.fromhex(target) - if len(destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH//8: raise ValueError(f"Destination hash length must be {RNS.Identity.TRUNCATED_HASHLENGTH//8*2} characters") + peer_destination_hash = bytes.fromhex(target) + if len(peer_destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH//8: raise ValueError(f"Destination hash length must be {RNS.Identity.TRUNCATED_HASHLENGTH//8*2} characters") except Exception as e: print(f"Invalid peer destination hash: {e}") exit(203) - - reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) - response = _request_sync(identity, destination_hash, timeout=timeout, exit_on_fail=True) + remote + _remote_init(configdir, rnsconfigdir, verbosity, quietness, identity_path) + response = _request_sync(identity, peer_destination_hash, remote_identity=_get_target_identity(remote), timeout=timeout, exit_on_fail=True) if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: print("Remote received no identity") @@ -570,99 +532,53 @@ def request_sync(target, configdir = None, rnsconfigdir = None, verbosity = 0, q print("The requested peer was not found") exit(206) else: - print(f"Sync requested for peer {RNS.prettyhexrep(destination_hash)}") + print(f"Sync requested for peer {RNS.prettyhexrep(peer_destination_hash)}") exit(0) -def query_status(identity, timeout=5, exit_on_fail=False): - control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") +def query_status(identity, remote_identity, timeout=5, exit_on_fail=False): + control_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") timeout = time.time()+timeout def check_timeout(): if time.time() > timeout: if exit_on_fail: - RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) + print("Getting lxmd statistics timed out, exiting now") exit(200) - else: - return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT - else: - time.sleep(0.1) + else: return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT + else: time.sleep(0.1) if not RNS.Transport.has_path(control_destination.hash): RNS.Transport.request_path(control_destination.hash) while not RNS.Transport.has_path(control_destination.hash): tc = check_timeout() - if tc: - return tc + if tc: return tc link = RNS.Link(control_destination) while not link.status == RNS.Link.ACTIVE: tc = check_timeout() - if tc: - return tc + if tc: return tc link.identify(identity) request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) while not request_receipt.get_status() == RNS.RequestReceipt.READY: tc = check_timeout() - if tc: - return tc + if tc: return tc link.teardown() return request_receipt.get_response() -def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): - global configpath, identitypath, storagedir, lxmdir - global lxmd_config, active_configuration, targetloglevel - targetlogdest = RNS.LOG_STDOUT +def get_status(remote=None, configdir=None, rnsconfigdir=None, verbosity=0, quietness=0, timeout=5, + show_status=False, show_peers=False, identity_path=None): - if identity_path == None: - if configdir == None: - if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): - configdir = "/etc/lxmd" - elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): - configdir = RNS.Reticulum.userdir+"/.config/lxmd" - else: - configdir = RNS.Reticulum.userdir+"/.lxmd" - - configpath = configdir+"/config" - identitypath = configdir+"/identity" - identity = None - - if not os.path.isdir(configdir): - RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) - exit(201) - if not os.path.isfile(identitypath): - RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) - exit(202) - else: - identity = RNS.Identity.from_file(identitypath) - if identity == None: - RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) - exit(4) - - else: - if not os.path.isfile(identity_path): - RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) - exit(202) - else: - identity = RNS.Identity.from_file(identity_path) - if identity == None: - RNS.log("Could not load the Primary Identity from "+identity_path, RNS.LOG_ERROR) - exit(4) - - if targetloglevel == None: - targetloglevel = 3 - if verbosity != 0 or quietness != 0: - targetloglevel = targetloglevel+verbosity-quietness - - reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) - response = query_status(identity, timeout=timeout, exit_on_fail=True) + global identity + _remote_init(configdir, rnsconfigdir, verbosity, quietness, identity_path) + response = query_status(identity, remote_identity=_get_target_identity(remote), timeout=timeout, exit_on_fail=True) if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: - RNS.log("Remote received no identity") + print("Remote received no identity") exit(203) if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_ACCESS: - RNS.log("Access denied") + print("Access denied") exit(204) else: s = response @@ -776,6 +692,76 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"{ind*2}Sync state : {pmuh} unhandled message{ms}, {ls}") print("") +def _get_target_identity(remote=None, timeout=5): + global identity + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + print("Resolving remote identity timed out, exiting now") + exit(200) + else: time.sleep(0.1) + + if remote == None: return identity + else: + try: + destination_hash = bytes.fromhex(remote) + if len(destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH//8: raise ValueError(f"Destination hash length must be {RNS.Identity.TRUNCATED_HASHLENGTH//8*2} characters") + except Exception as e: + print(f"Invalid remote destination hash: {e}") + exit(203) + + remote_identity = RNS.Identity.recall(destination_hash) + if remote_identity: return remote_identity + else: + if not RNS.Transport.has_path(destination_hash): + RNS.Transport.request_path(destination_hash) + while not RNS.Transport.has_path(destination_hash): + tc = check_timeout() + if tc: return tc + + return RNS.Identity.recall(destination_hash) + +def _remote_init(configdir=None, rnsconfigdir=None, verbosity=0, quietness=0, identity_path=None): + global configpath, identitypath, storagedir, lxmdir, identity + global lxmd_config, active_configuration, targetloglevel + targetlogdest = RNS.LOG_STDOUT + + if identity_path == None: + if configdir == None: + if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): configdir = "/etc/lxmd" + elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): configdir = RNS.Reticulum.userdir+"/.config/lxmd" + else: configdir = RNS.Reticulum.userdir+"/.lxmd" + + configpath = configdir+"/config" + identitypath = configdir+"/identity" + identity = None + + if not os.path.isdir(configdir): + RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) + exit(201) + if not os.path.isfile(identitypath): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identitypath) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) + exit(4) + + else: + if not os.path.isfile(identity_path): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identity_path) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identity_path, RNS.LOG_ERROR) + exit(4) + + if targetloglevel == None: targetloglevel = 3 + if verbosity != 0 or quietness != 0: targetloglevel = targetloglevel+verbosity-quietness + + reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) def main(): try: @@ -790,8 +776,9 @@ def main(): parser.add_argument("--status", action="store_true", default=False, help="display node status") parser.add_argument("--peers", action="store_true", default=False, help="display peered nodes") parser.add_argument("--sync", action="store", default=None, help="request a sync with the specified peer", type=str) - parser.add_argument("--timeout", action="store", default=5, help="timeout in seconds for query operations", type=float) - parser.add_argument("--identity", action="store", default=None, help="path to identity used for query request", type=str) + parser.add_argument("--timeout", action="store", default=None, help="timeout in seconds for query operations", type=float) + parser.add_argument("-r", "--remote", action="store", default=None, help="remote propagation node destination hash", type=str) + parser.add_argument("--identity", action="store", default=None, help="path to identity used for remote requests", type=str) parser.add_argument("--exampleconfig", action="store_true", default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version="lxmd {version}".format(version=__version__)) @@ -802,6 +789,7 @@ def main(): exit() if args.status or args.peers: + if not args.timeout: args.timeout = 5 get_status(configdir = args.config, rnsconfigdir=args.rnsconfig, verbosity=args.verbose, @@ -809,17 +797,20 @@ def main(): timeout=args.timeout, show_status=args.status, show_peers=args.peers, - identity_path=args.identity) + identity_path=args.identity, + remote=args.remote) exit() if args.sync: + if not args.timeout: args.timeout = 10 request_sync(target=args.sync, configdir = args.config, rnsconfigdir=args.rnsconfig, verbosity=args.verbose, quietness=args.quiet, timeout=args.timeout, - identity_path=args.identity) + identity_path=args.identity, + remote=args.remote) program_setup(configdir = args.config, rnsconfigdir=args.rnsconfig, From fa9fd2ae013e39bc604d68d1e516a652b5c66916 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 1 Nov 2025 13:10:28 +0100 Subject: [PATCH 088/123] Added remote status and control by allow-list for lxmd --- LXMF/LXMRouter.py | 28 ++++++++++++++++++++-------- LXMF/Utilities/lxmd.py | 24 +++++++++++++++++++----- 2 files changed, 39 insertions(+), 13 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index f25c8f0..4f033b7 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -101,6 +101,7 @@ class LXMRouter: self.prioritised_list = [] self.ignored_list = [] self.allowed_list = [] + self.control_allowed_list = [] self.auth_required = False self.retain_synced_on_node = False @@ -450,6 +451,16 @@ class LXMRouter: else: raise ValueError("Disallowed identity hash must be "+str(RNS.Identity.TRUNCATED_HASHLENGTH//8)+" bytes") + def allow_control(self, identity_hash=None): + if isinstance(identity_hash, bytes) and len(identity_hash) == RNS.Identity.TRUNCATED_HASHLENGTH//8: + if not identity_hash in self.control_allowed_list: self.control_allowed_list.append(identity_hash) + else: raise ValueError("Allowed identity hash must be "+str(RNS.Identity.TRUNCATED_HASHLENGTH//8)+" bytes") + + def disallow_control(self, identity_hash=None): + if isinstance(identity_hash, bytes) and len(identity_hash) == RNS.Identity.TRUNCATED_HASHLENGTH//8: + if identity_hash in self.control_allowed_list: self.control_allowed_list.pop(identity_hash) + else: raise ValueError("Disallowed identity hash must be "+str(RNS.Identity.TRUNCATED_HASHLENGTH//8)+" bytes") + def prioritise(self, destination_hash=None): if isinstance(destination_hash, bytes) and len(destination_hash) == RNS.Reticulum.TRUNCATED_HASHLENGTH//8: if not destination_hash in self.prioritised_list: @@ -628,9 +639,10 @@ class LXMRouter: self.propagation_destination.register_request_handler(LXMPeer.OFFER_REQUEST_PATH, self.offer_request, allow = RNS.Destination.ALLOW_ALL) self.propagation_destination.register_request_handler(LXMPeer.MESSAGE_GET_PATH, self.message_get_request, allow = RNS.Destination.ALLOW_ALL) - self.control_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") - self.control_destination.register_request_handler(LXMRouter.STATS_GET_PATH, self.stats_get_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=[self.identity.hash]) - self.control_destination.register_request_handler(LXMRouter.SYNC_REQUEST_PATH, self.peer_sync_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=[self.identity.hash]) + self.control_allowed_list = [self.identity.hash] + self.control_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + self.control_destination.register_request_handler(LXMRouter.STATS_GET_PATH, self.stats_get_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=self.control_allowed_list) + self.control_destination.register_request_handler(LXMRouter.SYNC_REQUEST_PATH, self.peer_sync_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=self.control_allowed_list) if self.message_storage_limit != None: limit_str = ", limit is "+RNS.prettysize(self.message_storage_limit) @@ -807,13 +819,13 @@ class LXMRouter: return node_stats def stats_get_request(self, path, data, request_id, remote_identity, requested_at): - if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY - elif remote_identity.hash != self.identity.hash: return LXMPeer.ERROR_NO_ACCESS - else: return self.compile_stats() + if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY + elif remote_identity.hash not in self.control_allowed_list: return LXMPeer.ERROR_NO_ACCESS + else: return self.compile_stats() def peer_sync_request(self, path, data, request_id, remote_identity, requested_at): - if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY - elif remote_identity.hash != self.identity.hash: return LXMPeer.ERROR_NO_ACCESS + if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY + elif remote_identity.hash not in self.control_allowed_list: return LXMPeer.ERROR_NO_ACCESS else: if type(data) != bytes: return LXMPeer.ERROR_INVALID_DATA elif len(data) != RNS.Identity.TRUNCATED_HASHLENGTH//8: return LXMPeer.ERROR_INVALID_DATA diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index b4599df..1d95fd7 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -187,6 +187,11 @@ def apply_config(): active_configuration["prioritised_lxmf_destinations"] = lxmd_config["propagation"].as_list("prioritise_destinations") else: active_configuration["prioritised_lxmf_destinations"] = [] + + if "propagation" in lxmd_config and "control_allowed" in lxmd_config["propagation"]: + active_configuration["control_allowed_identities"] = lxmd_config["propagation"].as_list("control_allowed") + else: + active_configuration["control_allowed_identities"] = [] if "propagation" in lxmd_config and "static_peers" in lxmd_config["propagation"]: static_peers = lxmd_config["propagation"].as_list("static_peers") @@ -410,13 +415,16 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo for dest_str in active_configuration["prioritised_lxmf_destinations"]: try: dest_hash = bytes.fromhex(dest_str) - if len(dest_hash) == RNS.Reticulum.TRUNCATED_HASHLENGTH//8: - message_router.prioritise(dest_hash) - - except Exception as e: - RNS.log("Cannot prioritise "+str(dest_str)+", it is not a valid destination hash", RNS.LOG_ERROR) + if len(dest_hash) == RNS.Reticulum.TRUNCATED_HASHLENGTH//8: message_router.prioritise(dest_hash) + except Exception as e: RNS.log("Cannot prioritise "+str(dest_str)+", it is not a valid destination hash", RNS.LOG_ERROR) message_router.enable_propagation() + + for ident_str in active_configuration["control_allowed_identities"]: + try: + identity_hash = bytes.fromhex(ident_str) + if len(identity_hash) == RNS.Reticulum.TRUNCATED_HASHLENGTH//8: message_router.allow_control(identity_hash) + except Exception as e: RNS.log(f"Cannot allow control from {ident_str}, it is not a valid identity hash", RNS.LOG_ERROR) RNS.log("LXMF Propagation Node started on "+RNS.prettyhexrep(message_router.propagation_destination.hash)) @@ -834,6 +842,12 @@ __default_lxmd_config__ = """# This is an example LXM Daemon config file. enable_node = no +# You can specify identity hashes for remotes +# that are allowed to control and query status +# for this propagation node. + +# control_allowed = 7d7e542829b40f32364499b27438dba8, 437229f8e29598b2282b88bad5e44698 + # An optional name for this node, included # in announces. From 9dc998f149780b3b9acc0c791a759b6caaf09675 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 1 Nov 2025 17:32:45 +0100 Subject: [PATCH 089/123] Added peering break option to lxmd command line interface --- LXMF/LXMRouter.py | 18 +++++++- LXMF/Utilities/lxmd.py | 93 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 108 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 4f033b7..3e29938 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -39,7 +39,7 @@ class LXMRouter: NODE_ANNOUNCE_DELAY = 20 - MAX_PEERS = 50 + MAX_PEERS = 20 AUTOPEER = True AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 @@ -77,6 +77,7 @@ class LXMRouter: STATS_GET_PATH = "/pn/get/stats" SYNC_REQUEST_PATH = "/pn/peer/sync" + UNPEER_REQUEST_PATH = "/pn/peer/unpeer" ### Developer-facing API ############################## @@ -643,6 +644,7 @@ class LXMRouter: self.control_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") self.control_destination.register_request_handler(LXMRouter.STATS_GET_PATH, self.stats_get_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=self.control_allowed_list) self.control_destination.register_request_handler(LXMRouter.SYNC_REQUEST_PATH, self.peer_sync_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=self.control_allowed_list) + self.control_destination.register_request_handler(LXMRouter.UNPEER_REQUEST_PATH, self.peer_unpeer_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=self.control_allowed_list) if self.message_storage_limit != None: limit_str = ", limit is "+RNS.prettysize(self.message_storage_limit) @@ -835,6 +837,18 @@ class LXMRouter: self.peers[data].sync() return True + def peer_unpeer_request(self, path, data, request_id, remote_identity, requested_at): + if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY + elif remote_identity.hash not in self.control_allowed_list: return LXMPeer.ERROR_NO_ACCESS + else: + if type(data) != bytes: return LXMPeer.ERROR_INVALID_DATA + elif len(data) != RNS.Identity.TRUNCATED_HASHLENGTH//8: return LXMPeer.ERROR_INVALID_DATA + else: + if not data in self.peers: return LXMPeer.ERROR_NOT_FOUND + else: + self.unpeer(data) + return True + ### Utility & Maintenance ############################# ####################################################### @@ -1318,6 +1332,8 @@ class LXMRouter: self.propagation_destination.deregister_request_handler(LXMPeer.OFFER_REQUEST_PATH) self.propagation_destination.deregister_request_handler(LXMPeer.MESSAGE_GET_PATH) self.propagation_destination.deregister_request_handler(LXMRouter.STATS_GET_PATH) + self.propagation_destination.deregister_request_handler(LXMRouter.SYNC_REQUEST_PATH) + self.propagation_destination.deregister_request_handler(LXMRouter.UNPEER_REQUEST_PATH) for link in self.active_propagation_links: try: if link.status == RNS.Link.ACTIVE: diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 1d95fd7..5a8c9aa 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -539,10 +539,82 @@ def request_sync(target, remote=None, configdir=None, rnsconfigdir=None, verbosi elif response == LXMF.LXMPeer.LXMPeer.ERROR_NOT_FOUND: print("The requested peer was not found") exit(206) + elif response == None: + print("Empty response received") + exit(207) else: print(f"Sync requested for peer {RNS.prettyhexrep(peer_destination_hash)}") exit(0) +def _request_unpeer(identity, destination_hash, remote_identity, timeout=15, exit_on_fail=False): + control_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + if exit_on_fail: + print("Requesting lxmd peering break timed out, exiting now") + exit(200) + else: return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT + else: time.sleep(0.1) + + if not RNS.Transport.has_path(control_destination.hash): + RNS.Transport.request_path(control_destination.hash) + while not RNS.Transport.has_path(control_destination.hash): + tc = check_timeout() + if tc: + return tc + + link = RNS.Link(control_destination) + while not link.status == RNS.Link.ACTIVE: + tc = check_timeout() + if tc: + return tc + + link.identify(identity) + request_receipt = link.request(LXMF.LXMRouter.UNPEER_REQUEST_PATH, data=destination_hash, response_callback=None, failed_callback=None) + while not request_receipt.get_status() == RNS.RequestReceipt.READY: + tc = check_timeout() + if tc: + return tc + + link.teardown() + return request_receipt.get_response() + + +def request_unpeer(target, remote=None, configdir=None, rnsconfigdir=None, verbosity=0, quietness=0, timeout=15, identity_path=None): + global configpath, identitypath, storagedir, lxmdir + global lxmd_config, active_configuration, targetloglevel + + try: + peer_destination_hash = bytes.fromhex(target) + if len(peer_destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH//8: raise ValueError(f"Destination hash length must be {RNS.Identity.TRUNCATED_HASHLENGTH//8*2} characters") + except Exception as e: + print(f"Invalid peer destination hash: {e}") + exit(203) + remote + _remote_init(configdir, rnsconfigdir, verbosity, quietness, identity_path) + response = _request_unpeer(identity, peer_destination_hash, remote_identity=_get_target_identity(remote), timeout=timeout, exit_on_fail=True) + + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: + print("Remote received no identity") + exit(203) + elif response == LXMF.LXMPeer.LXMPeer.ERROR_NO_ACCESS: + print("Access denied") + exit(204) + elif response == LXMF.LXMPeer.LXMPeer.ERROR_INVALID_DATA: + print("Invalid data received by remote") + exit(205) + elif response == LXMF.LXMPeer.LXMPeer.ERROR_NOT_FOUND: + print("The requested peer was not found") + exit(206) + elif response == None: + print("Empty response received") + exit(207) + else: + print(f"Broke peering with {RNS.prettyhexrep(peer_destination_hash)}") + exit(0) + def query_status(identity, remote_identity, timeout=5, exit_on_fail=False): control_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") @@ -588,6 +660,9 @@ def get_status(remote=None, configdir=None, rnsconfigdir=None, verbosity=0, quie if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_ACCESS: print("Access denied") exit(204) + elif response == None: + print("Empty response received") + exit(207) else: s = response mutil = round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2) @@ -784,6 +859,7 @@ def main(): parser.add_argument("--status", action="store_true", default=False, help="display node status") parser.add_argument("--peers", action="store_true", default=False, help="display peered nodes") parser.add_argument("--sync", action="store", default=None, help="request a sync with the specified peer", type=str) + parser.add_argument("-b", "--break", dest="unpeer", action="store", default=None, help="break peering with the specified peer", type=str) parser.add_argument("--timeout", action="store", default=None, help="timeout in seconds for query operations", type=float) parser.add_argument("-r", "--remote", action="store", default=None, help="remote propagation node destination hash", type=str) parser.add_argument("--identity", action="store", default=None, help="path to identity used for remote requests", type=str) @@ -819,6 +895,19 @@ def main(): timeout=args.timeout, identity_path=args.identity, remote=args.remote) + exit() + + if args.unpeer: + if not args.timeout: args.timeout = 10 + request_unpeer(target=args.unpeer, + configdir = args.config, + rnsconfigdir=args.rnsconfig, + verbosity=args.verbose, + quietness=args.quiet, + timeout=args.timeout, + identity_path=args.identity, + remote=args.remote) + exit() program_setup(configdir = args.config, rnsconfigdir=args.rnsconfig, @@ -944,9 +1033,9 @@ autopeer_maxdepth = 4 # You can configure the maximum number of other # propagation nodes that this node will peer -# with automatically. The default is 50. +# with automatically. The default is 20. -# max_peers = 25 +# max_peers = 20 # You can configure a list of static propagation # node peers, that this node will always be From 4350a239e4d7e09eaea5f31aaa1769081c892719 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 1 Nov 2025 17:35:36 +0100 Subject: [PATCH 090/123] Cleanup --- LXMF/LXMRouter.py | 35 ++++++++++------------------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 3e29938..b1676f0 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -713,27 +713,15 @@ class LXMRouter: def set_information_storage_limit(self, kilobytes = None, megabytes = None, gigabytes = None): limit_bytes = 0 - - if kilobytes != None: - limit_bytes += kilobytes*1000 - - if megabytes != None: - limit_bytes += megabytes*1000*1000 - - if gigabytes != None: - limit_bytes += gigabytes*1000*1000*1000 - - if limit_bytes == 0: - limit_bytes = None + if kilobytes != None: limit_bytes += kilobytes*1000 + if megabytes != None: limit_bytes += megabytes*1000*1000 + if gigabytes != None: limit_bytes += gigabytes*1000*1000*1000 + if limit_bytes == 0: limit_bytes = None try: - if limit_bytes == None or int(limit_bytes) > 0: - self.information_storage_limit = int(limit_bytes) - else: - raise ValueError("Cannot set LXMF information storage limit to "+str(limit_bytes)) - - except Exception as e: - raise ValueError("Cannot set LXMF information storage limit to "+str(limit_bytes)) + if limit_bytes == None or int(limit_bytes) > 0: self.information_storage_limit = int(limit_bytes) + else: raise ValueError("Cannot set LXMF information storage limit to "+str(limit_bytes)) + except Exception as e: raise ValueError("Cannot set LXMF information storage limit to "+str(limit_bytes)) def information_storage_limit(self): return self.information_storage_limit @@ -742,18 +730,15 @@ class LXMRouter: pass def delivery_link_available(self, destination_hash): - if destination_hash in self.direct_links or destination_hash in self.backchannel_links: - return True - else: - return False + if destination_hash in self.direct_links or destination_hash in self.backchannel_links: return True + else: return False ### Propagation Node Control ########################## ####################################################### def compile_stats(self): - if not self.propagation_node: - return None + if not self.propagation_node: return None else: peer_stats = {} for peer_id in self.peers.copy(): From 401328fa1689a130a017dcc8d2cbadf438d9bb71 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 1 Nov 2025 18:27:11 +0100 Subject: [PATCH 091/123] Allow configuring max remote peering cost --- LXMF/LXMF.py | 2 +- LXMF/LXMRouter.py | 5 ++--- LXMF/Utilities/lxmd.py | 6 ++++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index dbc0ab1..ede9c3a 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -173,7 +173,7 @@ def pn_announce_data_is_valid(data): try: if type(data) != bytes: return False else: data = msgpack.unpackb(data) - if len(data) < 7: raise ValueError("Invalid announce data: Insufficient peer data") + if len(data) < 7: raise ValueError("Invalid announce data: Insufficient peer data, likely from deprecated LXMF version") else: try: int(data[1]) except: raise ValueError("Invalid announce data: Could not decode timebase") diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index b1676f0..1d9cd5c 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -88,7 +88,7 @@ class LXMRouter: enforce_ratchets=False, enforce_stamps=False, static_peers = [], max_peers=None, from_static_only=False, sync_strategy=LXMPeer.STRATEGY_PERSISTENT, propagation_cost=PROPAGATION_COST, propagation_cost_flexibility=PROPAGATION_COST_FLEX, - peering_cost=PEERING_COST, name=None): + peering_cost=PEERING_COST, max_peering_cost=MAX_PEERING_COST, name=None): random.seed(os.urandom(10)) @@ -134,7 +134,7 @@ class LXMRouter: self.propagation_stamp_cost = propagation_cost self.propagation_stamp_cost_flexibility = propagation_cost_flexibility self.peering_cost = peering_cost - self.max_peering_cost = LXMRouter.MAX_PEERING_COST + self.max_peering_cost = max_peering_cost self.enforce_ratchets = enforce_ratchets self._enforce_stamps = enforce_stamps self.pending_deferred_stamps = {} @@ -2420,7 +2420,6 @@ class LXMRouter: else: propagation_stamp = selected_lxm.get_propagation_stamp(target_cost=pn_target_cost) - RNS.log(f"Generated propagation stamp: {RNS.hexrep(propagation_stamp)}") if propagation_stamp: selected_lxm.propagation_stamp = propagation_stamp selected_lxm.defer_propagation_stamp = False diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 5a8c9aa..59736db 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -377,6 +377,8 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo propagation_limit = active_configuration["propagation_transfer_max_accepted_size"], propagation_cost = active_configuration["propagation_stamp_cost_target"], propagation_cost_flexibility = active_configuration["propagation_stamp_cost_flexibility"], + peering_cost = active_configuration["peering_cost"], + max_peering_cost = active_configuration["remote_peering_cost_max"], sync_limit = active_configuration["propagation_sync_max_accepted_size"], delivery_limit = active_configuration["delivery_transfer_max_accepted_size"], max_peers = active_configuration["max_peers"], @@ -1009,7 +1011,7 @@ autopeer_maxdepth = 4 # value required for a remote node to peer with # and deliver messages to this node. -# peering_cost = 10 +# peering_cost = 18 # You can configure the maximum peering cost # of remote nodes that this node will peer with. @@ -1019,7 +1021,7 @@ autopeer_maxdepth = 4 # more computation time during initial peering # when generating the peering key. -# remote_peering_cost_max = 12 +# remote_peering_cost_max = 24 # You can tell the LXMF message router to # prioritise storage for one or more From d0f3385f75572d20dccf66e14a27068259057e40 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 1 Nov 2025 18:48:31 +0100 Subject: [PATCH 092/123] Added acceptance rate to lxmd stats output --- LXMF/LXMRouter.py | 4 +++- LXMF/Utilities/lxmd.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 1d9cd5c..10e1892 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -764,6 +764,7 @@ class LXMRouter: "network_distance": RNS.Transport.hops_to(peer_id), "rx_bytes": peer.rx_bytes, "tx_bytes": peer.tx_bytes, + "acceptance_rate": peer.acceptance_rate, "messages": { "offered": peer.offered, "outgoing": peer.outgoing, @@ -1430,7 +1431,8 @@ class LXMRouter: filepath = self.propagation_entries[transient_id][1] self.propagation_entries.pop(transient_id) os.unlink(filepath) - RNS.log("Client "+RNS.prettyhexrep(remote_destination.hash)+" purged message "+RNS.prettyhexrep(transient_id)+" at "+str(filepath), RNS.LOG_DEBUG) + # TODO: Remove debug + # RNS.log("Client "+RNS.prettyhexrep(remote_destination.hash)+" purged message "+RNS.prettyhexrep(transient_id)+" at "+str(filepath), RNS.LOG_DEBUG) except Exception as e: RNS.log("Error while processing message purge request from "+RNS.prettyhexrep(remote_destination.hash)+". The contained exception was: "+str(e), RNS.LOG_ERROR) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 59736db..8c7cd82 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -760,7 +760,7 @@ def get_status(remote=None, configdir=None, rnsconfigdir=None, verbosity=0, quie sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]) stl = RNS.prettysize(p["transfer_limit"]*1000); ssl = RNS.prettysize(p["sync_limit"]*1000) srxb = RNS.prettysize(p["rx_bytes"]); stxb = RNS.prettysize(p["tx_bytes"]); pmo = pm["offered"]; pmout = pm["outgoing"] - pmi = pm["incoming"]; pmuh = pm["unhandled"]; + pmi = pm["incoming"]; pmuh = pm["unhandled"]; ar = round(p["acceptance_rate"]*100, 2) if p["name"] == None: nn = "" else: nn = p["name"].strip().replace("\n", "").replace("\r", "") if len(nn) > 45: nn = f"{nn[:45]}..." @@ -771,7 +771,7 @@ def get_status(remote=None, configdir=None, rnsconfigdir=None, verbosity=0, quie print(f"{ind*2}Sync key : {pk}") print(f"{ind*2}Speeds : {sstr} STR, {sler} LER") print(f"{ind*2}Limits : {stl} message limit, {ssl} sync limit") - print(f"{ind*2}Messages : {pmo} offered, {pmout} outgoing, {pmi} incoming") + print(f"{ind*2}Messages : {pmo} offered, {pmout} outgoing, {pmi} incoming, {ar}% acceptance rate") print(f"{ind*2}Traffic : {srxb} received, {stxb} sent") ms = "" if pm["unhandled"] == 1 else "s" print(f"{ind*2}Sync state : {pmuh} unhandled message{ms}, {ls}") From 747ddbddd528b6492a98f6eccd719cdce1df99a3 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Nov 2025 02:27:57 +0100 Subject: [PATCH 093/123] Implemented duplicate signalling on PN message download --- LXMF/Handlers.py | 2 +- LXMF/LXMRouter.py | 16 ++++++++++++---- LXMF/Utilities/lxmd.py | 3 ++- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index aa39ea2..01841f9 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -54,7 +54,7 @@ class LXMFPropagationAnnounceHandler: metadata = data[6] if destination_hash in self.lxmrouter.static_peers: - static_peer = self.lxmrouter.static_peers[destination_hash] + static_peer = self.lxmrouter.peers[destination_hash] if not is_path_response or static_peer.last_heard == 0: self.lxmrouter.peer(destination_hash=destination_hash, timestamp=node_timebase, diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 10e1892..0cc59b1 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -75,6 +75,8 @@ class LXMRouter: PR_ALL_MESSAGES = 0x00 + DUPLICATE_SIGNAL = "lxmf_duplicate" + STATS_GET_PATH = "/pn/get/stats" SYNC_REQUEST_PATH = "/pn/peer/sync" UNPEER_REQUEST_PATH = "/pn/peer/unpeer" @@ -1537,10 +1539,12 @@ class LXMRouter: self.propagation_transfer_state = LXMRouter.PR_NO_ACCESS else: + duplicates = 0 if request_receipt.response != None and len(request_receipt.response) > 0: haves = [] for lxmf_data in request_receipt.response: - self.lxmf_propagation(lxmf_data) + result = self.lxmf_propagation(lxmf_data, signal_duplicate=LXMRouter.DUPLICATE_SIGNAL) + if result == LXMRouter.DUPLICATE_SIGNAL: duplicates += 1 haves.append(RNS.Identity.full_hash(lxmf_data)) # Return a list of successfully received messages to the node. @@ -1556,6 +1560,7 @@ class LXMRouter: self.propagation_transfer_state = LXMRouter.PR_COMPLETE self.propagation_transfer_progress = 1.0 + self.propagation_transfer_last_duplicates = duplicates self.propagation_transfer_last_result = len(request_receipt.response) self.save_locally_delivered_transient_ids() @@ -1674,11 +1679,14 @@ class LXMRouter: def get_outbound_lxm_stamp_cost(self, lxm_hash): for lxm in self.pending_outbound: if lxm.hash == lxm_hash: - return lxm.stamp_cost + if lxm.outbound_ticket: return None + else: return lxm.stamp_cost for lxm_id in self.pending_deferred_stamps: if self.pending_deferred_stamps[lxm_id].hash == lxm_hash: - return self.pending_deferred_stamps[lxm_id].stamp_cost + lxm = self.pending_deferred_stamps[lxm_id] + if lxm.outbound_ticket: return None + else: return lxm.stamp_cost return None @@ -1689,7 +1697,7 @@ class LXMRouter: for lxm_id in self.pending_deferred_stamps: if self.pending_deferred_stamps[lxm_id].hash == lxm_hash: - return self.pending_deferred_stamps[lxm_id].stamp_cost + return self.pending_deferred_stamps[lxm_id].propagation_target_cost return None diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 8c7cd82..de69ab1 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -758,7 +758,8 @@ def get_status(remote=None, configdir=None, rnsconfigdir=None, verbosity=0, quie ls = "never synced" sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]) - stl = RNS.prettysize(p["transfer_limit"]*1000); ssl = RNS.prettysize(p["sync_limit"]*1000) + stl = RNS.prettysize(p["transfer_limit"]*1000) if p["transfer_limit"] else "Unknown" + ssl = RNS.prettysize(p["sync_limit"]*1000) if p["sync_limit"] else "unknown" srxb = RNS.prettysize(p["rx_bytes"]); stxb = RNS.prettysize(p["tx_bytes"]); pmo = pm["offered"]; pmout = pm["outgoing"] pmi = pm["incoming"]; pmuh = pm["unhandled"]; ar = round(p["acceptance_rate"]*100, 2) if p["name"] == None: nn = "" From f383450b3716b589bdf220fc70b50ab8fd29eb28 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Nov 2025 02:29:32 +0100 Subject: [PATCH 094/123] Implemented duplicate signalling on PN message download --- LXMF/LXMRouter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 0cc59b1..6c215f8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -150,6 +150,7 @@ class LXMRouter: self.propagation_transfer_state = LXMRouter.PR_IDLE self.propagation_transfer_progress = 0.0 self.propagation_transfer_last_result = None + self.propagation_transfer_last_duplicates = None self.propagation_transfer_max_messages = None self.prioritise_rotating_unreachable_peers = False self.active_propagation_links = [] From 8e3ffb0d2ae9f2dac7480d4d6e7cdd86d797fd09 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Nov 2025 02:48:19 +0100 Subject: [PATCH 095/123] Stat query default identity as local --- LXMF/Utilities/lxmd.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index de69ab1..af667a5 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -617,7 +617,8 @@ def request_unpeer(target, remote=None, configdir=None, rnsconfigdir=None, verbo print(f"Broke peering with {RNS.prettyhexrep(peer_destination_hash)}") exit(0) -def query_status(identity, remote_identity, timeout=5, exit_on_fail=False): +def query_status(identity, remote_identity=None, timeout=5, exit_on_fail=False): + if remote_identity == None: remote_identity = identity control_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") timeout = time.time()+timeout From e1905b85d7de82d92bab2a278795270c9abecbbe Mon Sep 17 00:00:00 2001 From: bus Date: Sun, 2 Nov 2025 08:12:33 +0000 Subject: [PATCH 096/123] Fix stored messages always getting deleted if config path has underscores --- LXMF/LXMRouter.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 6c215f8..27ea071 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1090,9 +1090,10 @@ class LXMRouter: for transient_id in self.propagation_entries.copy(): entry = self.propagation_entries[transient_id] filepath = entry[1] - components = filepath.split("_") + filename = os.path.split(filepath)[-1] + components = filename.split("_") - if len(components) == 2 and float(components[1]) > 0 and len(os.path.split(components[0])[1]) == (RNS.Identity.HASHLENGTH//8)*2: + if len(components) == 2 and float(components[1]) > 0 and len(components[0]) == (RNS.Identity.HASHLENGTH//8)*2: timestamp = float(components[1]) if now > timestamp+LXMRouter.MESSAGE_EXPIRY: RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_EXTREME) From 383d953e064d88d1d057fc29cc4e3e3c5adf8829 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Nov 2025 11:34:22 +0100 Subject: [PATCH 097/123] Don't hog all cores on high core-count machines --- LXMF/LXStamper.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 8ebefd7..56e2500 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -178,7 +178,8 @@ def job_linux(stamp_cost, workblock, message_id): allow_kill = True stamp = None total_rounds = 0 - jobs = multiprocessing.cpu_count() + cores = multiprocessing.cpu_count() + jobs = cores if cores <= 12 else int(cores/2) stop_event = multiprocessing.Event() result_queue = multiprocessing.Queue(1) rounds_queue = multiprocessing.Queue() From 9c5fa4a0667b8db75b592ee79e58477d9f7680ef Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Nov 2025 11:35:12 +0100 Subject: [PATCH 098/123] Take stamp value into account when cleaning message store --- LXMF/LXMRouter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 6c215f8..41b9618 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1090,9 +1090,10 @@ class LXMRouter: for transient_id in self.propagation_entries.copy(): entry = self.propagation_entries[transient_id] filepath = entry[1] + stamp_value = entry[6] components = filepath.split("_") - if len(components) == 2 and float(components[1]) > 0 and len(os.path.split(components[0])[1]) == (RNS.Identity.HASHLENGTH//8)*2: + if len(components) >= 3 and float(components[1]) > 0 and len(os.path.split(components[0])[1]) == (RNS.Identity.HASHLENGTH//8)*2 and int(components[2]) == stamp_value: timestamp = float(components[1]) if now > timestamp+LXMRouter.MESSAGE_EXPIRY: RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_EXTREME) From 99830b6e8bcdee4c4cbe830c79c60aa866baad6b Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Nov 2025 12:02:21 +0100 Subject: [PATCH 099/123] Only index messages with stamp value set --- LXMF/LXMRouter.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index bde1cda..b3bf0f2 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -544,21 +544,19 @@ class LXMRouter: st = time.time(); RNS.log("Indexing messagestore...", RNS.LOG_NOTICE) for filename in os.listdir(self.messagepath): components = filename.split("_") - if len(components) >= 2: + if len(components) >= 3: if float(components[1]) > 0: if len(components[0]) == RNS.Identity.HASHLENGTH//8*2: try: transient_id = bytes.fromhex(components[0]) received = float(components[1]) + stamp_value = int(components[2]) filepath = self.messagepath+"/"+filename msg_size = os.path.getsize(filepath) file = open(filepath, "rb") destination_hash = file.read(LXMessage.DESTINATION_LENGTH) file.close() - if len(components) >= 3: stamp_value = int(components[2]) - else: stamp_value = None - self.propagation_entries[transient_id] = [ destination_hash, # 0: Destination hash filepath, # 1: Storage location From bc3f4ecff528205aa29dc7fae829f33838c7c377 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Nov 2025 18:35:15 +0100 Subject: [PATCH 100/123] Handle client propagation transfer limits separately from PN peers --- LXMF/LXMRouter.py | 92 ++++++++++++++++++++++++++++------------------- 1 file changed, 56 insertions(+), 36 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index b3bf0f2..792fa08 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -154,6 +154,7 @@ class LXMRouter: self.propagation_transfer_max_messages = None self.prioritise_rotating_unreachable_peers = False self.active_propagation_links = [] + self.validated_peer_links = {} self.locally_delivered_transient_ids = {} self.locally_processed_transient_ids = {} self.outbound_stamp_costs = {} @@ -910,6 +911,8 @@ class LXMRouter: if inactive_time > LXMRouter.LINK_MAX_INACTIVITY: link.teardown() closed_links.append(link_hash) + if link.link_id in self.validated_peer_links: + self.validated_peer_links.pop(link.link_id) for link_hash in closed_links: cleaned_link = self.direct_links.pop(link_hash) @@ -2101,7 +2104,10 @@ class LXMRouter: self.lxmf_propagation(lxmf_data, stamp_value=stamp_value, stamp_data=stamp_data) self.client_propagation_messages_received += 1 - if len(validated_messages) == len(messages): packet.prove() + if len(validated_messages) == len(messages): + ms = "" if len(messages) == 1 else "s" + RNS.log(f"Received {len(messages)} propagation message{ms} from client with valid stamp{ms}", RNS.LOG_DEBUG) + packet.prove() else: RNS.log("Propagation transfer from client contained messages with invalid stamps", RNS.LOG_NOTICE) reject_data = msgpack.packb([LXMPeer.ERROR_INVALID_STAMP]) @@ -2151,6 +2157,7 @@ class LXMRouter: else: RNS.log(f"Peering key validated for incoming offer in {RNS.prettytime(td)}", RNS.LOG_DEBUG) + self.validated_peer_links[link_id] = True for transient_id in transient_ids: if not transient_id in self.propagation_entries: wanted_ids.append(transient_id) @@ -2174,7 +2181,7 @@ class LXMRouter: remote_timebase = data[0] messages = data[1] remote_hash = None - remote_str = "unknown peer" + remote_str = "unknown client" if remote_identity != None: remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") @@ -2203,42 +2210,55 @@ class LXMRouter: RNS.log(f"Auto-peering with {remote_str} discovered via incoming sync", RNS.LOG_DEBUG) # TODO: Remove debug self.peer(remote_hash, remote_timebase, remote_transfer_limit, remote_sync_limit, remote_stamp_cost, remote_stamp_flex, remote_peering_cost, remote_metadata) - ms = "" if len(messages) == 1 else "s" - RNS.log(f"Received {len(messages)} message{ms} from {remote_str}, validating stamps...", RNS.LOG_VERBOSE) + peering_key_valid = False + if remote_identity != None: + if resource.link.link_id in self.validated_peer_links and self.validated_peer_links[resource.link.link_id] == True: + peering_key_valid = True - min_accepted_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) - validated_messages = LXStamper.validate_pn_stamps(messages, min_accepted_cost) - if len(validated_messages) == len(messages): RNS.log(f"All message stamps validated from {remote_str}", RNS.LOG_VERBOSE) - else: RNS.log(f"Transfer from {remote_str} contained {len(messages)-len(validated_messages)} invalid stamps", RNS.LOG_WARNING) - - for validated_entry in validated_messages: - transient_id = validated_entry[0] - lxmf_data = validated_entry[1] - stamp_value = validated_entry[2] - stamp_data = validated_entry[3] - peer = None - - if remote_hash != None and remote_hash in self.peers: - peer = self.peers[remote_hash] - peer.incoming += 1 - peer.rx_bytes += len(lxmf_data) - else: - if remote_identity != None: - self.unpeered_propagation_incoming += 1 - self.unpeered_propagation_rx_bytes += len(lxmf_data) - else: - self.client_propagation_messages_received += 1 - - self.lxmf_propagation(lxmf_data, from_peer=peer, stamp_value=stamp_value, stamp_data=stamp_data) - if peer != None: peer.queue_handled_message(transient_id) - - invalid_message_count = len(messages) - len(validated_messages) - if invalid_message_count > 0: + if not peering_key_valid and len(messages) > 1: resource.link.teardown() - throttle_time = LXMRouter.PN_STAMP_THROTTLE - self.throttled_peers[remote_hash] = time.time()+throttle_time - ms = "" if invalid_message_count == 1 else "s" - RNS.log(f"Propagation transfer from {remote_str} contained {invalid_message_count} message{ms} with invalid stamps, throttled for {RNS.prettytime(throttle_time)}", RNS.LOG_NOTICE) + RNS.log(f"Received multiple propagation messages from {remote_str} without valid peering key presentation. This is not supposed to happen, ignoring.", RNS.LOG_WARNING) + RNS.log(f"Clients and peers without a valid peering key can only deliver 1 message per transfer.", RNS.LOG_WARNING) + else: + ms = "" if len(messages) == 1 else "s" + RNS.log(f"Received {len(messages)} message{ms} from {remote_str}, validating stamps...", RNS.LOG_VERBOSE) + + min_accepted_cost = max(0, self.propagation_stamp_cost-self.propagation_stamp_cost_flexibility) + validated_messages = LXStamper.validate_pn_stamps(messages, min_accepted_cost) + invalid_stamps = len(messages)-len(validated_messages) + ms = "" if invalid_stamps == 1 else "s" + if len(validated_messages) == len(messages): RNS.log(f"All message stamps validated from {remote_str}", RNS.LOG_VERBOSE) + else: RNS.log(f"Transfer from {remote_str} contained {invalid_stamps} invalid stamp{ms}", RNS.LOG_WARNING) + + for validated_entry in validated_messages: + transient_id = validated_entry[0] + lxmf_data = validated_entry[1] + stamp_value = validated_entry[2] + stamp_data = validated_entry[3] + peer = None + + if remote_hash != None and remote_hash in self.peers: + peer = self.peers[remote_hash] + peer.incoming += 1 + peer.rx_bytes += len(lxmf_data) + else: + if remote_identity != None: + self.unpeered_propagation_incoming += 1 + self.unpeered_propagation_rx_bytes += len(lxmf_data) + else: + self.client_propagation_messages_received += 1 + + self.lxmf_propagation(lxmf_data, from_peer=peer, stamp_value=stamp_value, stamp_data=stamp_data) + if peer != None: peer.queue_handled_message(transient_id) + + invalid_message_count = len(messages) - len(validated_messages) + if invalid_message_count > 0: + resource.link.teardown() + if remote_identity != None: + throttle_time = LXMRouter.PN_STAMP_THROTTLE + self.throttled_peers[remote_hash] = time.time()+throttle_time + ms = "" if invalid_message_count == 1 else "s" + RNS.log(f"Propagation transfer from {remote_str} contained {invalid_message_count} message{ms} with invalid stamps, throttled for {RNS.prettytime(throttle_time)}", RNS.LOG_NOTICE) else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) From 0b067914ea27ba0a42a674589d4207d199ef686b Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Nov 2025 20:41:03 +0100 Subject: [PATCH 101/123] Adjusted default max peering cost --- LXMF/LXMRouter.py | 2 +- LXMF/Utilities/lxmd.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 792fa08..08c11f8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -47,7 +47,7 @@ class LXMRouter: ROTATION_AR_MAX = 0.5 PEERING_COST = 18 - MAX_PEERING_COST = 24 + MAX_PEERING_COST = 26 PROPAGATION_COST_MIN = 13 PROPAGATION_COST_FLEX = 3 PROPAGATION_COST = 16 diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index af667a5..032f151 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -1023,7 +1023,7 @@ autopeer_maxdepth = 4 # more computation time during initial peering # when generating the peering key. -# remote_peering_cost_max = 24 +# remote_peering_cost_max = 26 # You can tell the LXMF message router to # prioritise storage for one or more From d7a2979dd0a2fc6dcee6b1d413b58150da49838a Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Nov 2025 23:07:43 +0100 Subject: [PATCH 102/123] Cleanup --- LXMF/Utilities/lxmd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 032f151..b09e7ae 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -998,7 +998,7 @@ autopeer_maxdepth = 4 # You can configure the target stamp cost # required to deliver messages via this node. -# propagation_stamp_cost_target = 12 +# propagation_stamp_cost_target = 16 # If set higher than 0, the stamp cost flexi- # bility option will make this node accept @@ -1019,7 +1019,7 @@ autopeer_maxdepth = 4 # of remote nodes that this node will peer with. # Setting this to a higher number will allow # this node to peer with other nodes requiring -# a high peering key value, but will require +# a higher peering key value, but will require # more computation time during initial peering # when generating the peering key. From f18ce9ea99c6376872f07ce8d2629e44aa59fc92 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 3 Nov 2025 00:08:50 +0100 Subject: [PATCH 103/123] Cleanup --- LXMF/Utilities/lxmd.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index b09e7ae..ab8e30f 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 -# MIT License +# Reticulum License # -# Copyright (c) 2016-2022 Mark Qvist / unsigned.io +# Copyright (c) 2020-2025 Mark Qvist # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -11,8 +11,16 @@ # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. +# - The Software shall not be used in any kind of system which includes amongst +# its functions the ability to purposefully do harm to human beings. +# +# - The Software shall not be used, directly or indirectly, in the creation of +# an artificial intelligence, machine learning or language model training +# dataset, including but not limited to any use that contributes to the +# training or development of such a model or algorithm. +# +# - The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, From fa2d78c3510c47d4903cdfca1dacb49e69164ae5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 3 Nov 2025 22:19:20 +0100 Subject: [PATCH 104/123] Fixed message stamps getting overwritten if propagation stamp was also present --- LXMF/LXMRouter.py | 2 +- LXMF/LXMessage.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 08c11f8..7afbc73 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -2424,7 +2424,7 @@ class LXMRouter: selected_lxm.stamp = generated_stamp selected_lxm.defer_stamp = False selected_lxm.packed = None - selected_lxm.pack() + selected_lxm.pack(payload_updated=True) stamp_generation_success = True RNS.log(f"Stamp generation completed for {selected_lxm}", RNS.LOG_DEBUG) else: diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 0533f07..baf951a 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -357,7 +357,7 @@ class LXMessage: else: return None - def pack(self): + def pack(self, payload_updated=False): if not self.packed: if self.timestamp == None: self.timestamp = time.time() @@ -431,7 +431,7 @@ class LXMessage: elif self.desired_method == LXMessage.PROPAGATED: single_packet_content_limit = LXMessage.LINK_PACKET_MAX_CONTENT - if self.__pn_encrypted_data == None: + if self.__pn_encrypted_data == None or payload_updated: self.__pn_encrypted_data = self.__destination.encrypt(self.packed[LXMessage.DESTINATION_LENGTH:]) self.ratchet_id = self.__destination.latest_ratchet_id From 62038573f1b249ef92c7005ff66ecfc1ef47d566 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 3 Nov 2025 22:21:13 +0100 Subject: [PATCH 105/123] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 3e2f46a..d69d16e 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.9.0" +__version__ = "0.9.1" From dca6cc2adc733ebfb9aaef721273982195ba36ae Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 7 Nov 2025 23:10:30 +0100 Subject: [PATCH 106/123] Ensure LXMF and RNS exit handlers are called on SIGINT and SIGTERM, since for some ungodly reason atexit events are not always called on some combinations of Python version and platforms, even though they have been registered. --- LXMF/LXMRouter.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 7afbc73..4c247be 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1360,14 +1360,16 @@ class LXMRouter: def sigint_handler(self, signal, frame): if not self.exit_handler_running: RNS.log("Received SIGINT, shutting down now!", RNS.LOG_WARNING) - sys.exit(0) + self.exit_handler() + RNS.exit(0) else: RNS.log("Received SIGINT, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) def sigterm_handler(self, signal, frame): if not self.exit_handler_running: RNS.log("Received SIGTERM, shutting down now!", RNS.LOG_WARNING) - sys.exit(0) + self.exit_handler() + RNS.exit(0) else: RNS.log("Received SIGTERM, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) From 00ffbc09febd3df2a548c9c60625140e09b15c9b Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 8 Nov 2025 01:20:31 +0100 Subject: [PATCH 107/123] Using multiprocessing start method fork on Linux to avoid issues with Python 3.14. Fixes #35. --- LXMF/LXStamper.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 56e2500..39b541b 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -15,6 +15,8 @@ PN_VALIDATION_POOL_MIN_SIZE = 256 active_jobs = {} +if RNS.vendor.platformutils.is_linux(): multiprocessing.set_start_method("fork") + def stamp_workblock(material, expand_rounds=WORKBLOCK_EXPAND_ROUNDS): wb_st = time.time() workblock = b"" From ee15e9f0b6dfbe6c6f1f87dc5ca1de4ca9de8d31 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 8 Nov 2025 14:30:47 +0100 Subject: [PATCH 108/123] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index d69d16e..a2fecb4 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.9.1" +__version__ = "0.9.2" From 39e398be65f7c4a46c6d93badb072f335c4580d4 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 13 Nov 2025 17:48:10 +0100 Subject: [PATCH 109/123] Fixed missing PN config unpack on incoming sync auto-peering --- LXMF/LXMRouter.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 4c247be..3016e89 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -2200,14 +2200,15 @@ class LXMRouter: # 4: Limit for incoming propagation node syncs # 5: Propagation stamp costs for this node # 6: Node metadata - if remote_app_data[2] and self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: - remote_timebase = remote_app_data[1] - remote_transfer_limit = remote_app_data[3] - remote_sync_limit = remote_app_data[4] - remote_stamp_cost = remote_app_data[5][0] - remote_stamp_flex = remote_app_data[5][1] - remote_peering_cost = remote_app_data[5][2] - remote_metadata = remote_app_data[6] + pn_config = msgpack.unpackb(remote_app_data) + if pn_config[2] and self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: + remote_timebase = pn_config[1] + remote_transfer_limit = pn_config[3] + remote_sync_limit = pn_config[4] + remote_stamp_cost = pn_config[5][0] + remote_stamp_flex = pn_config[5][1] + remote_peering_cost = pn_config[5][2] + remote_metadata = pn_config[6] RNS.log(f"Auto-peering with {remote_str} discovered via incoming sync", RNS.LOG_DEBUG) # TODO: Remove debug self.peer(remote_hash, remote_timebase, remote_transfer_limit, remote_sync_limit, remote_stamp_cost, remote_stamp_flex, remote_peering_cost, remote_metadata) From bc7522b63d9c1f4ca275c78998e6b7fda362158c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 13 Nov 2025 19:42:24 +0100 Subject: [PATCH 110/123] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index a2fecb4..c598173 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.9.2" +__version__ = "0.9.3" From 7c71eb1df46b2f53481199d46a3f84dd195d8bea Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 24 Nov 2025 22:02:30 +0100 Subject: [PATCH 111/123] Cleanup --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 3016e89..9abef7c 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -219,7 +219,7 @@ class LXMRouter: self.locally_delivered_transient_ids = {} except Exception as e: - RNS.log("Could not load locally delivered message ID cache from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + RNS.log(f"Could not load locally delivered message ID cache from storage. The contained exception was: {e}", RNS.LOG_ERROR) self.locally_delivered_transient_ids = {} try: From f4c805ea35bcf549c35a4a2b713492b17188bf81 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 27 Nov 2025 18:38:52 +0100 Subject: [PATCH 112/123] Updated makefile --- Makefile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c0b53da..c00cde9 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,12 @@ create_symlinks: -ln -s ../../LXMF ./LXMF/Utilities/LXMF build_wheel: - python3 setup.py sdist bdist_wheel + python3 setup.py bdist_wheel + +build_sdist: + python3 setup.py sdist + +build_spkg: remove_symlinks build_sdist create_symlinks release: remove_symlinks build_wheel create_symlinks From a6f5a56a38f076708923f3a27ba30f88c6ab5def Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 2 Dec 2025 20:17:46 +0100 Subject: [PATCH 113/123] Improved outbound message processing speed --- LXMF/Handlers.py | 22 +-- LXMF/LXMRouter.py | 374 +++++++++++++++++++++++----------------------- 2 files changed, 198 insertions(+), 198 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 01841f9..871cc56 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -13,17 +13,6 @@ class LXMFDeliveryAnnounceHandler: self.lxmrouter = lxmrouter def received_announce(self, destination_hash, announced_identity, app_data): - for lxmessage in self.lxmrouter.pending_outbound: - if destination_hash == lxmessage.destination_hash: - if lxmessage.method == LXMessage.DIRECT or lxmessage.method == LXMessage.OPPORTUNISTIC: - lxmessage.next_delivery_attempt = time.time() - - def outbound_trigger(): - while self.lxmrouter.processing_outbound: time.sleep(0.1) - self.lxmrouter.process_outbound() - - threading.Thread(target=outbound_trigger, daemon=True).start() - try: stamp_cost = stamp_cost_from_app_data(app_data) self.lxmrouter.update_stamp_cost(destination_hash, stamp_cost) @@ -31,6 +20,17 @@ class LXMFDeliveryAnnounceHandler: except Exception as e: RNS.log(f"An error occurred while trying to decode announced stamp cost. The contained exception was: {e}", RNS.LOG_ERROR) + for lxmessage in self.lxmrouter.pending_outbound: + if destination_hash == lxmessage.destination_hash: + if lxmessage.method == LXMessage.DIRECT or lxmessage.method == LXMessage.OPPORTUNISTIC: + lxmessage.next_delivery_attempt = time.time() + + def outbound_trigger(): + while self.lxmrouter.outbound_processing_lock.locked(): time.sleep(0.1) + self.lxmrouter.process_outbound() + + threading.Thread(target=outbound_trigger, daemon=True).start() + class LXMFPropagationAnnounceHandler: def __init__(self, lxmrouter): diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 9abef7c..6f44f95 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -109,7 +109,6 @@ class LXMRouter: self.retain_synced_on_node = False self.default_sync_strategy = sync_strategy - self.processing_outbound = False self.processing_inbound = False self.processing_count = 0 self.name = name @@ -160,6 +159,7 @@ class LXMRouter: self.outbound_stamp_costs = {} self.available_tickets = {"outbound": {}, "inbound": {}, "last_deliveries": {}} + self.outbound_processing_lock = threading.Lock() self.cost_file_lock = threading.Lock() self.ticket_file_lock = threading.Lock() self.stamp_gen_lock = threading.Lock() @@ -1664,10 +1664,10 @@ class LXMRouter: lxmessage.defer_stamp = False if not lxmessage.defer_stamp and not (lxmessage.desired_method == LXMessage.PROPAGATED and lxmessage.defer_propagation_stamp): - while not unknown_path_requested and self.processing_outbound: time.sleep(0.05) + while not unknown_path_requested and self.outbound_processing_lock.locked(): time.sleep(0.05) self.pending_outbound.append(lxmessage) - if not unknown_path_requested: self.process_outbound() + if not unknown_path_requested: threading.Thread(target=self.process_outbound, daemon=True).start() else: self.pending_deferred_stamps[lxmessage.message_id] = lxmessage @@ -2373,6 +2373,7 @@ class LXMRouter: def fail_message(self, lxmessage): RNS.log(str(lxmessage)+" failed to send", RNS.LOG_DEBUG) + lxmessage.progress = 0.0 if lxmessage in self.pending_outbound: self.pending_outbound.remove(lxmessage) @@ -2494,198 +2495,141 @@ class LXMRouter: RNS.log(f"An error occurred while processing propagation transfer signalling. The contained exception was: {e}", RNS.LOG_ERROR) def process_outbound(self, sender = None): - if self.processing_outbound: - return + if self.outbound_processing_lock.locked(): return + with self.outbound_processing_lock: + for lxmessage in self.pending_outbound: + if lxmessage.state == LXMessage.DELIVERED: + RNS.log("Delivery has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) + self.pending_outbound.remove(lxmessage) - for lxmessage in self.pending_outbound: - if lxmessage.state == LXMessage.DELIVERED: - RNS.log("Delivery has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) - self.pending_outbound.remove(lxmessage) + # Udate ticket delivery stats + if lxmessage.include_ticket and FIELD_TICKET in lxmessage.fields: + RNS.log(f"Updating latest ticket delivery for {RNS.prettyhexrep(lxmessage.destination_hash)}", RNS.LOG_DEBUG) + self.available_tickets["last_deliveries"][lxmessage.destination_hash] = time.time() + self.save_available_tickets() - # Udate ticket delivery stats - if lxmessage.include_ticket and FIELD_TICKET in lxmessage.fields: - RNS.log(f"Updating latest ticket delivery for {RNS.prettyhexrep(lxmessage.destination_hash)}", RNS.LOG_DEBUG) - self.available_tickets["last_deliveries"][lxmessage.destination_hash] = time.time() - self.save_available_tickets() + # Prepare link for backchannel communications + delivery_destination_hash = lxmessage.get_destination().hash + if lxmessage.method == LXMessage.DIRECT and delivery_destination_hash in self.direct_links: + direct_link = self.direct_links[delivery_destination_hash] + if not hasattr(direct_link, "backchannel_identified") or direct_link.backchannel_identified == False: + if direct_link.initiator == True: + source_destination_hash = lxmessage.get_source().hash + if source_destination_hash in self.delivery_destinations: + backchannel_identity = self.delivery_destinations[source_destination_hash].identity + backchannel_desthash = RNS.Destination.hash_from_name_and_identity("lxmf.delivery", backchannel_identity) + direct_link.identify(backchannel_identity) + direct_link.backchannel_identified = True + self.delivery_link_established(direct_link) + RNS.log(f"Performed backchannel identification as {RNS.prettyhexrep(backchannel_desthash)} on {direct_link}", RNS.LOG_DEBUG) - # Prepare link for backchannel communications - delivery_destination_hash = lxmessage.get_destination().hash - if lxmessage.method == LXMessage.DIRECT and delivery_destination_hash in self.direct_links: - direct_link = self.direct_links[delivery_destination_hash] - if not hasattr(direct_link, "backchannel_identified") or direct_link.backchannel_identified == False: - if direct_link.initiator == True: - source_destination_hash = lxmessage.get_source().hash - if source_destination_hash in self.delivery_destinations: - backchannel_identity = self.delivery_destinations[source_destination_hash].identity - backchannel_desthash = RNS.Destination.hash_from_name_and_identity("lxmf.delivery", backchannel_identity) - direct_link.identify(backchannel_identity) - direct_link.backchannel_identified = True - self.delivery_link_established(direct_link) - RNS.log(f"Performed backchannel identification as {RNS.prettyhexrep(backchannel_desthash)} on {direct_link}", RNS.LOG_DEBUG) + elif lxmessage.method == LXMessage.PROPAGATED and lxmessage.state == LXMessage.SENT: + RNS.log("Propagation has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) + self.pending_outbound.remove(lxmessage) - elif lxmessage.method == LXMessage.PROPAGATED and lxmessage.state == LXMessage.SENT: - RNS.log("Propagation has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) - self.pending_outbound.remove(lxmessage) + elif lxmessage.state == LXMessage.CANCELLED: + RNS.log("Cancellation requested for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) + self.pending_outbound.remove(lxmessage) + if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): + lxmessage.failed_callback(lxmessage) - elif lxmessage.state == LXMessage.CANCELLED: - RNS.log("Cancellation requested for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) - self.pending_outbound.remove(lxmessage) - if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): - lxmessage.failed_callback(lxmessage) + elif lxmessage.state == LXMessage.REJECTED: + RNS.log("Receiver rejected "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) + if lxmessage in self.pending_outbound: self.pending_outbound.remove(lxmessage) + if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): + lxmessage.failed_callback(lxmessage) - elif lxmessage.state == LXMessage.REJECTED: - RNS.log("Receiver rejected "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) - if lxmessage in self.pending_outbound: self.pending_outbound.remove(lxmessage) - if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): - lxmessage.failed_callback(lxmessage) + else: + RNS.log("Outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - else: - RNS.log("Outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + if lxmessage.progress == None or lxmessage.progress < 0.01: lxmessage.progress = 0.01 - if lxmessage.progress == None or lxmessage.progress < 0.01: lxmessage.progress = 0.01 - - # Outbound handling for opportunistic messages - if lxmessage.method == LXMessage.OPPORTUNISTIC: - if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: - if lxmessage.delivery_attempts >= LXMRouter.MAX_PATHLESS_TRIES and not RNS.Transport.has_path(lxmessage.get_destination().hash): - RNS.log(f"Requesting path to {RNS.prettyhexrep(lxmessage.get_destination().hash)} after {lxmessage.delivery_attempts} pathless tries for {lxmessage}", RNS.LOG_DEBUG) - lxmessage.delivery_attempts += 1 - RNS.Transport.request_path(lxmessage.get_destination().hash) - lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT - lxmessage.progress = 0.01 - elif lxmessage.delivery_attempts == LXMRouter.MAX_PATHLESS_TRIES+1 and RNS.Transport.has_path(lxmessage.get_destination().hash): - RNS.log(f"Opportunistic delivery for {lxmessage} still unsuccessful after {lxmessage.delivery_attempts} attempts, trying to rediscover path to {RNS.prettyhexrep(lxmessage.get_destination().hash)}", RNS.LOG_DEBUG) - lxmessage.delivery_attempts += 1 - RNS.Reticulum.get_instance().drop_path(lxmessage.get_destination().hash) - def rediscover_job(): - time.sleep(0.5) - RNS.Transport.request_path(lxmessage.get_destination().hash) - threading.Thread(target=rediscover_job, daemon=True).start() - lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT - lxmessage.progress = 0.01 - else: - if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: - lxmessage.delivery_attempts += 1 - lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT - RNS.log("Opportunistic delivery attempt "+str(lxmessage.delivery_attempts)+" for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - lxmessage.send() - else: - RNS.log("Max delivery attempts reached for oppertunistic "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - self.fail_message(lxmessage) - - # Outbound handling for messages transferred - # over a direct link to the final recipient - elif lxmessage.method == LXMessage.DIRECT: - if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: - delivery_destination_hash = lxmessage.get_destination().hash - direct_link = None - - if delivery_destination_hash in self.direct_links: - # An established direct link already exists to - # the destination, so we'll try to use it for - # delivering the message - direct_link = self.direct_links[delivery_destination_hash] - RNS.log(f"Using available direct link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}", RNS.LOG_DEBUG) - - elif delivery_destination_hash in self.backchannel_links: - # An established backchannel link exists to - # the destination, so we'll try to use it for - # delivering the message - direct_link = self.backchannel_links[delivery_destination_hash] - RNS.log(f"Using available backchannel link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}", RNS.LOG_DEBUG) - - if direct_link != None: - if direct_link.status == RNS.Link.ACTIVE: - if lxmessage.progress == None or lxmessage.progress < 0.05: - lxmessage.progress = 0.05 - if lxmessage.state != LXMessage.SENDING: - RNS.log("Starting transfer of "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" on link "+str(direct_link), RNS.LOG_DEBUG) - lxmessage.set_delivery_destination(direct_link) - lxmessage.send() - else: - if lxmessage.representation == LXMessage.RESOURCE: - RNS.log("The transfer of "+str(lxmessage)+" is in progress ("+str(round(lxmessage.progress*100, 1))+"%)", RNS.LOG_DEBUG) - else: - RNS.log("Waiting for proof for "+str(lxmessage)+" sent as link packet", RNS.LOG_DEBUG) - elif direct_link.status == RNS.Link.CLOSED: - if direct_link.activated_at != None: - RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was closed unexpectedly, retrying path request...", RNS.LOG_DEBUG) - RNS.Transport.request_path(lxmessage.get_destination().hash) - else: - if not hasattr(lxmessage, "path_request_retried"): - RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was never activated, retrying path request...", RNS.LOG_DEBUG) - RNS.Transport.request_path(lxmessage.get_destination().hash) - lxmessage.path_request_retried = True - else: - RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was never activated", RNS.LOG_DEBUG) - - lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT - - lxmessage.set_delivery_destination(None) - if delivery_destination_hash in self.direct_links: - self.direct_links.pop(delivery_destination_hash) - if delivery_destination_hash in self.backchannel_links: - self.backchannel_links.pop(delivery_destination_hash) - lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT - else: - # Simply wait for the link to become active or close - RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" is pending, waiting for link to become active", RNS.LOG_DEBUG) - else: - # No link exists, so we'll try to establish one, but - # only if we've never tried before, or the retry wait - # period has elapsed. - if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: - lxmessage.delivery_attempts += 1 - lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT - - if lxmessage.delivery_attempts < LXMRouter.MAX_DELIVERY_ATTEMPTS: - if RNS.Transport.has_path(lxmessage.get_destination().hash): - RNS.log("Establishing link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" for delivery attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - delivery_link = RNS.Link(lxmessage.get_destination()) - delivery_link.set_link_established_callback(self.process_outbound) - self.direct_links[delivery_destination_hash] = delivery_link - lxmessage.progress = 0.03 - else: - RNS.log("No path known for delivery attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+". Requesting path...", RNS.LOG_DEBUG) - RNS.Transport.request_path(lxmessage.get_destination().hash) - lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT - lxmessage.progress = 0.01 - else: - RNS.log("Max delivery attempts reached for direct "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - self.fail_message(lxmessage) - - # Outbound handling for messages transported via - # propagation to a LXMF router network. - elif lxmessage.method == LXMessage.PROPAGATED: - RNS.log("Attempting propagated delivery for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - - if self.outbound_propagation_node == None: - RNS.log("No outbound propagation node specified for propagated "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_ERROR) - self.fail_message(lxmessage) - else: + # Outbound handling for opportunistic messages + if lxmessage.method == LXMessage.OPPORTUNISTIC: if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: + if lxmessage.delivery_attempts >= LXMRouter.MAX_PATHLESS_TRIES and not RNS.Transport.has_path(lxmessage.get_destination().hash): + RNS.log(f"Requesting path to {RNS.prettyhexrep(lxmessage.get_destination().hash)} after {lxmessage.delivery_attempts} pathless tries for {lxmessage}", RNS.LOG_DEBUG) + lxmessage.delivery_attempts += 1 + RNS.Transport.request_path(lxmessage.get_destination().hash) + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + lxmessage.progress = 0.01 + elif lxmessage.delivery_attempts == LXMRouter.MAX_PATHLESS_TRIES+1 and RNS.Transport.has_path(lxmessage.get_destination().hash): + RNS.log(f"Opportunistic delivery for {lxmessage} still unsuccessful after {lxmessage.delivery_attempts} attempts, trying to rediscover path to {RNS.prettyhexrep(lxmessage.get_destination().hash)}", RNS.LOG_DEBUG) + lxmessage.delivery_attempts += 1 + RNS.Reticulum.get_instance().drop_path(lxmessage.get_destination().hash) + def rediscover_job(): + time.sleep(0.5) + RNS.Transport.request_path(lxmessage.get_destination().hash) + threading.Thread(target=rediscover_job, daemon=True).start() + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + lxmessage.progress = 0.01 + else: + if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: + lxmessage.delivery_attempts += 1 + lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT + RNS.log("Opportunistic delivery attempt "+str(lxmessage.delivery_attempts)+" for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + lxmessage.send() + else: + RNS.log("Max delivery attempts reached for oppertunistic "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + self.fail_message(lxmessage) - if self.outbound_propagation_link != None: - # A link already exists, so we'll try to use it - # to deliver the message - if self.outbound_propagation_link.status == RNS.Link.ACTIVE: + # Outbound handling for messages transferred + # over a direct link to the final recipient + elif lxmessage.method == LXMessage.DIRECT: + if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: + delivery_destination_hash = lxmessage.get_destination().hash + direct_link = None + + if delivery_destination_hash in self.direct_links: + # An established direct link already exists to + # the destination, so we'll try to use it for + # delivering the message + direct_link = self.direct_links[delivery_destination_hash] + RNS.log(f"Using available direct link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}", RNS.LOG_DEBUG) + + elif delivery_destination_hash in self.backchannel_links: + # An established backchannel link exists to + # the destination, so we'll try to use it for + # delivering the message + direct_link = self.backchannel_links[delivery_destination_hash] + RNS.log(f"Using available backchannel link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}", RNS.LOG_DEBUG) + + if direct_link != None: + if direct_link.status == RNS.Link.ACTIVE: + if lxmessage.progress == None or lxmessage.progress < 0.05: + lxmessage.progress = 0.05 if lxmessage.state != LXMessage.SENDING: - RNS.log("Starting propagation transfer of "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" via "+RNS.prettyhexrep(self.outbound_propagation_node), RNS.LOG_DEBUG) - lxmessage.set_delivery_destination(self.outbound_propagation_link) + RNS.log("Starting transfer of "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" on link "+str(direct_link), RNS.LOG_DEBUG) + lxmessage.set_delivery_destination(direct_link) lxmessage.send() else: if lxmessage.representation == LXMessage.RESOURCE: RNS.log("The transfer of "+str(lxmessage)+" is in progress ("+str(round(lxmessage.progress*100, 1))+"%)", RNS.LOG_DEBUG) else: RNS.log("Waiting for proof for "+str(lxmessage)+" sent as link packet", RNS.LOG_DEBUG) - elif self.outbound_propagation_link.status == RNS.Link.CLOSED: - RNS.log("The link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" was closed", RNS.LOG_DEBUG) - self.outbound_propagation_link = None + elif direct_link.status == RNS.Link.CLOSED: + if direct_link.activated_at != None: + RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was closed unexpectedly, retrying path request...", RNS.LOG_DEBUG) + RNS.Transport.request_path(lxmessage.get_destination().hash) + else: + if not hasattr(lxmessage, "path_request_retried"): + RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was never activated, retrying path request...", RNS.LOG_DEBUG) + RNS.Transport.request_path(lxmessage.get_destination().hash) + lxmessage.path_request_retried = True + else: + RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was never activated", RNS.LOG_DEBUG) + + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + + lxmessage.set_delivery_destination(None) + if delivery_destination_hash in self.direct_links: + self.direct_links.pop(delivery_destination_hash) + if delivery_destination_hash in self.backchannel_links: + self.backchannel_links.pop(delivery_destination_hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT else: - # Simply wait for the link to become - # active or close - RNS.log("The propagation link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" is pending, waiting for link to become active", RNS.LOG_DEBUG) + # Simply wait for the link to become active or close + RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" is pending, waiting for link to become active", RNS.LOG_DEBUG) else: # No link exists, so we'll try to establish one, but # only if we've never tried before, or the retry wait @@ -2695,18 +2639,74 @@ class LXMRouter: lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT if lxmessage.delivery_attempts < LXMRouter.MAX_DELIVERY_ATTEMPTS: - if RNS.Transport.has_path(self.outbound_propagation_node): - RNS.log("Establishing link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" for propagation attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - propagation_node_identity = RNS.Identity.recall(self.outbound_propagation_node) - propagation_node_destination = RNS.Destination(propagation_node_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") - self.outbound_propagation_link = RNS.Link(propagation_node_destination, established_callback=self.process_outbound) - self.outbound_propagation_link.set_packet_callback(self.propagation_transfer_signalling_packet) - self.outbound_propagation_link.for_lxmessage = lxmessage + if RNS.Transport.has_path(lxmessage.get_destination().hash): + RNS.log("Establishing link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" for delivery attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + delivery_link = RNS.Link(lxmessage.get_destination()) + delivery_link.set_link_established_callback(self.process_outbound) + self.direct_links[delivery_destination_hash] = delivery_link + lxmessage.progress = 0.03 else: - RNS.log("No path known for propagation attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(self.outbound_propagation_node)+". Requesting path...", RNS.LOG_DEBUG) - RNS.Transport.request_path(self.outbound_propagation_node) + RNS.log("No path known for delivery attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+". Requesting path...", RNS.LOG_DEBUG) + RNS.Transport.request_path(lxmessage.get_destination().hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT - + lxmessage.progress = 0.01 else: - RNS.log("Max delivery attempts reached for propagated "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + RNS.log("Max delivery attempts reached for direct "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) self.fail_message(lxmessage) + + # Outbound handling for messages transported via + # propagation to a LXMF router network. + elif lxmessage.method == LXMessage.PROPAGATED: + RNS.log("Attempting propagated delivery for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + + if self.outbound_propagation_node == None: + RNS.log("No outbound propagation node specified for propagated "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_ERROR) + self.fail_message(lxmessage) + else: + if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: + + if self.outbound_propagation_link != None: + # A link already exists, so we'll try to use it + # to deliver the message + if self.outbound_propagation_link.status == RNS.Link.ACTIVE: + if lxmessage.state != LXMessage.SENDING: + RNS.log("Starting propagation transfer of "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" via "+RNS.prettyhexrep(self.outbound_propagation_node), RNS.LOG_DEBUG) + lxmessage.set_delivery_destination(self.outbound_propagation_link) + lxmessage.send() + else: + if lxmessage.representation == LXMessage.RESOURCE: + RNS.log("The transfer of "+str(lxmessage)+" is in progress ("+str(round(lxmessage.progress*100, 1))+"%)", RNS.LOG_DEBUG) + else: + RNS.log("Waiting for proof for "+str(lxmessage)+" sent as link packet", RNS.LOG_DEBUG) + elif self.outbound_propagation_link.status == RNS.Link.CLOSED: + RNS.log("The link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" was closed", RNS.LOG_DEBUG) + self.outbound_propagation_link = None + lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT + else: + # Simply wait for the link to become + # active or close + RNS.log("The propagation link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" is pending, waiting for link to become active", RNS.LOG_DEBUG) + else: + # No link exists, so we'll try to establish one, but + # only if we've never tried before, or the retry wait + # period has elapsed. + if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: + lxmessage.delivery_attempts += 1 + lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT + + if lxmessage.delivery_attempts < LXMRouter.MAX_DELIVERY_ATTEMPTS: + if RNS.Transport.has_path(self.outbound_propagation_node): + RNS.log("Establishing link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" for propagation attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + propagation_node_identity = RNS.Identity.recall(self.outbound_propagation_node) + propagation_node_destination = RNS.Destination(propagation_node_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + self.outbound_propagation_link = RNS.Link(propagation_node_destination, established_callback=self.process_outbound) + self.outbound_propagation_link.set_packet_callback(self.propagation_transfer_signalling_packet) + self.outbound_propagation_link.for_lxmessage = lxmessage + else: + RNS.log("No path known for propagation attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(self.outbound_propagation_node)+". Requesting path...", RNS.LOG_DEBUG) + RNS.Transport.request_path(self.outbound_propagation_node) + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + + else: + RNS.log("Max delivery attempts reached for propagated "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + self.fail_message(lxmessage) From 694f2413ea899f70b3bff7f079a879d2fa1d70d5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 2 Dec 2025 20:43:44 +0100 Subject: [PATCH 114/123] Added more descriptive error if propagation node peers file is corrupt --- LXMF/LXMRouter.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 6f44f95..8506769 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -576,13 +576,20 @@ class LXMRouter: RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) st = time.time() - if os.path.isfile(self.storagepath+"/peers"): - peers_file = open(self.storagepath+"/peers", "rb") + peers_storage_path = self.storagepath+"/peers" + if os.path.isfile(peers_storage_path): + peers_file = open(peers_storage_path, "rb") peers_data = peers_file.read() peers_file.close() if len(peers_data) > 0: - serialised_peers = msgpack.unpackb(peers_data) + try: serialised_peers = msgpack.unpackb(peers_data) + except Exception as e: + RNS.log(f"Could not load propagation node peering data from storage. The contained exception was: {e}", RNS.LOG_ERROR) + RNS.log(f"The peering data file located at {peers_storage_path} is likely corrupt.", RNS.LOG_ERROR) + RNS.log(f"You can delete this file and attempt starting the propagation node again, but peer synchronization states will be lost.", RNS.LOG_ERROR) + raise e + del peers_data while len(serialised_peers) > 0: From 6ecd271e48e9085031ef5a1c7d802958191170db Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 22 Dec 2025 22:22:17 +0100 Subject: [PATCH 115/123] Updated readme --- README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/README.md b/README.md index ed7e4f0..cd1a6c5 100644 --- a/README.md +++ b/README.md @@ -183,6 +183,26 @@ options: Or run `lxmd --exampleconfig` to generate a commented example configuration documenting all the available configuration directives. +## Support LXMF Development +You can help support the continued development of open, free and private communications systems by donating via one of the following channels: + +- Monero: + ``` + 84FpY1QbxHcgdseePYNmhTHcrgMX4nFfBYtz2GKYToqHVVhJp8Eaw1Z1EedRnKD19b3B8NiLCGVxzKV17UMmmeEsCrPyA5w + ``` +- Bitcoin + ``` + bc1pgqgu8h8xvj4jtafslq396v7ju7hkgymyrzyqft4llfslz5vp99psqfk3a6 + ``` +- Ethereum + ``` + 0x91C421DdfB8a30a49A71d63447ddb54cEBe3465E + ``` +- Liberapay: https://liberapay.com/Reticulum/ + +- Ko-Fi: https://ko-fi.com/markqvist + + ## Caveat Emptor LXMF is beta software, and should be considered experimental. While it has been built with cryptography best practices very foremost in mind, it _has not_ been externally security audited, and there could very well be privacy-breaking bugs. If you want to help out, or help sponsor an audit, please do get in touch. From 97f8c105afd80d1817f9cd1ad8a1cf723654c392 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 28 Dec 2025 00:55:10 +0100 Subject: [PATCH 116/123] Updated readme --- MIRROR.md | 33 +++++++++++++++++++++++++++++++++ README.md | 2 ++ 2 files changed, 35 insertions(+) create mode 100644 MIRROR.md diff --git a/MIRROR.md b/MIRROR.md new file mode 100644 index 0000000..ef2fdd4 --- /dev/null +++ b/MIRROR.md @@ -0,0 +1,33 @@ +This repository is a public mirror. All potential future development is happening elsewhere. + +I am stepping back from all public-facing interaction with this project. Reticulum has always been primarily my work, and continuing in the current public, internet-facing model is no longer sustainable. + +The software remains available for use as-is. Occasional updates may appear at unpredictable intervals, but there will be no support, no responses to issues, no discussions, and no community management in this or any other public venue. If it doesn't work for you, it doesn't work. That is the entire extent of available troubleshooting assistance I can offer you. + +If you've followed this project for a while, you already know what this means. You know who designed, wrote and tested this, and you know how many years of my life it took. You'll also know about both my particular challenges and strengths, and how I believe anything worth building needs to be built and maintained with our own hands. + +Seven months ago, I said I needed to step back, that I was exhausted, and that I needed to recover. I believed a public resolve would be enough to effectuate that, but while striving to get just a few more useful features and protocols out, the unproductive requests and demands also ramped up, and I got pulled back into the same patterns and draining interactions that I'd explicitly said I couldn't sustain anymore. + +So here's what you might have already guessed: I'm done playing the game by rules I can't win at. + +Everything you need is right here, and by any sensible measure, it's done. Anyone who wants to invest the time, skill and persistence can build on it, or completely re-imagine it with different priorities. That was always the point. + +The people who actually contributed - you know who you are, and you know I mean it when I say: Thank you. All of you who've used this to build something real - that was the goal, and you did it without needing me to hold your hand. + +The rest of you: You have what you need. Use it or don't. I am not going to be the person who explains it to you anymore. + +This is not a temporary break. It's not "see you after some rest", but a recognition that the current model is fundamentally incompatible with my life, my health, and my reality. + +If you want to support continued work, you can do so at the donation links listed in this repository. But please understand, that this is not purchasing support or guaranteeing updates. It is support for work that happens on my timeline, according to my capacity, which at the moment is not what it was. + +If you want Reticulum to continue evolving, you have the power to make that happen. The protocol is public domain. The code is open source. Everything you need is right here. I've provided the tools, but building what comes next is not my responsibility anymore. It's yours. + +To the small group of people who has actually been here, and understood what this work was and what it cost - you already know where to find me if it actually matters. + +To everyone else: This is where we part ways. No hard feelings. It's just time. + +--- + +असतो मा सद्गमय +तमसो मा ज्योतिर्गमय +मृत्योर्मा अमृतं गमय diff --git a/README.md b/README.md index cd1a6c5..e435cec 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # Lightweight Extensible Message Format +*This repository is [a public mirror](./MIRROR.md). All development is happening elsewhere.* + LXMF is a simple and flexible messaging format and delivery protocol that allows a wide variety of implementations, while using as little bandwidth as possible. It is built on top of [Reticulum](https://reticulum.network) and offers zero-conf message routing, end-to-end encryption and Forward Secrecy, and can be transported over any kind of medium that Reticulum supports. LXMF is efficient enough that it can deliver messages over extremely low-bandwidth systems such as packet radio or LoRa. Encrypted LXMF messages can also be encoded as QR-codes or text-based URIs, allowing completely analog *paper message* transport. From 72853fcf77b1653dcfb33d83390b4539fd71178e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 4 Jan 2026 01:05:01 +0100 Subject: [PATCH 117/123] Updated versions --- LXMF/_version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index c598173..e94731c 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.9.3" +__version__ = "0.9.4" diff --git a/setup.py b/setup.py index 16d8d3c..93e5cc0 100644 --- a/setup.py +++ b/setup.py @@ -26,6 +26,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=["rns>=1.0.1"], + install_requires=["rns>=1.1.0"], python_requires=">=3.7", ) From 7c9bdd7fa2ef631a3200f18d3fa360d8f5c72a67 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 4 Jan 2026 01:32:40 +0100 Subject: [PATCH 118/123] Fix --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 8506769..82f141d 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1236,7 +1236,7 @@ class LXMRouter: os.makedirs(self.storagepath) outbound_stamp_costs_file = open(self.storagepath+"/outbound_stamp_costs", "wb") - outbound_stamp_costs_file.write(msgpack.packb(self.outbound_stamp_costs)) + outbound_stamp_costs_file.write(msgpack.packb(self.outbound_stamp_costs.copy())) outbound_stamp_costs_file.close() except Exception as e: From ef2e1234a5f34bceed0b446dbbf5110246011d71 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 6 Jan 2026 13:57:12 +0100 Subject: [PATCH 119/123] Fix --- LXMF/LXMRouter.py | 24 ++++++++++++++---------- LXMF/Utilities/lxmd.py | 10 ++++------ 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 82f141d..c1903cd 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1365,20 +1365,24 @@ class LXMRouter: self.save_node_stats() def sigint_handler(self, signal, frame): - if not self.exit_handler_running: - RNS.log("Received SIGINT, shutting down now!", RNS.LOG_WARNING) - self.exit_handler() - RNS.exit(0) + if threading.current_thread() != threading.main_thread(): os._exit(0) else: - RNS.log("Received SIGINT, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) + if not self.exit_handler_running: + RNS.log("Received SIGINT, shutting down now!", RNS.LOG_WARNING) + self.exit_handler() + RNS.exit(0) + else: + RNS.log("Received SIGINT, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) def sigterm_handler(self, signal, frame): - if not self.exit_handler_running: - RNS.log("Received SIGTERM, shutting down now!", RNS.LOG_WARNING) - self.exit_handler() - RNS.exit(0) + if threading.current_thread() != threading.main_thread(): os._exit(0) else: - RNS.log("Received SIGTERM, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) + if not self.exit_handler_running: + RNS.log("Received SIGTERM, shutting down now!", RNS.LOG_WARNING) + self.exit_handler() + RNS.exit(0) + else: + RNS.log("Received SIGTERM, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) def __str__(self): return "" diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index ab8e30f..4838189 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -319,11 +319,8 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo storagedir = configdir+"/storage" lxmdir = storagedir+"/messages" - if not os.path.isdir(storagedir): - os.makedirs(storagedir) - - if not os.path.isdir(lxmdir): - os.makedirs(lxmdir) + if not os.path.isdir(storagedir): os.makedirs(storagedir) + if not os.path.isdir(lxmdir): os.makedirs(lxmdir) if not os.path.isfile(configpath): RNS.log("Could not load config file, creating default configuration file...") @@ -351,6 +348,7 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo # Start Reticulum RNS.log("Substantiating Reticulum...") reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) + if targetlogdest == RNS.LOG_FILE: RNS.logfile = configdir+"/logfile" # Generate or load primary identity if os.path.isfile(identitypath): @@ -971,7 +969,7 @@ autopeer = yes # The maximum peering depth (in hops) for # automatically peered nodes. -autopeer_maxdepth = 4 +autopeer_maxdepth = 6 # The maximum amount of storage to use for # the LXMF Propagation Node message store, From 2b7ba9558b1e8a39e522248c498e473452fe24ba Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 6 Jan 2026 17:03:09 +0100 Subject: [PATCH 120/123] Cleanup --- LXMF/LXMRouter.py | 8 ++++++-- LXMF/LXStamper.py | 24 ++++++++++++++---------- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index c1903cd..4b593a9 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1365,7 +1365,9 @@ class LXMRouter: self.save_node_stats() def sigint_handler(self, signal, frame): - if threading.current_thread() != threading.main_thread(): os._exit(0) + if threading.current_thread() != threading.main_thread(): + RNS.log(f"SIGINT on non-main thread {threading.current_thread()}, exiting immediately", RNS.LOG_WARNING) + os._exit(0) else: if not self.exit_handler_running: RNS.log("Received SIGINT, shutting down now!", RNS.LOG_WARNING) @@ -1375,7 +1377,9 @@ class LXMRouter: RNS.log("Received SIGINT, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) def sigterm_handler(self, signal, frame): - if threading.current_thread() != threading.main_thread(): os._exit(0) + if threading.current_thread() != threading.main_thread(): + RNS.log(f"SIGTERM on non-main thread {threading.current_thread()}, exiting immediately", RNS.LOG_WARNING) + os._exit(0) else: if not self.exit_handler_running: RNS.log("Received SIGTERM, shutting down now!", RNS.LOG_WARNING) diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 39b541b..3d7a1c2 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -15,8 +15,6 @@ PN_VALIDATION_POOL_MIN_SIZE = 256 active_jobs = {} -if RNS.vendor.platformutils.is_linux(): multiprocessing.set_start_method("fork") - def stamp_workblock(material, expand_rounds=WORKBLOCK_EXPAND_ROUNDS): wb_st = time.time() workblock = b"" @@ -79,8 +77,10 @@ def validate_pn_stamps_job_multip(transient_list, target_cost): pool_count = min(cores, math.ceil(len(transient_list) / PN_VALIDATION_POOL_MIN_SIZE)) RNS.log(f"Validating {len(transient_list)} stamps using {pool_count} processes...", RNS.LOG_VERBOSE) - with multiprocessing.Pool(pool_count) as p: + with multiprocessing.get_context("spawn").Pool(pool_count) as p: validated_entries = p.starmap(validate_pn_stamp, zip(transient_list, itertools.repeat(target_cost))) + + RNS.log(f"Validation pool completed for {len(transient_list)} stamps", RNS.LOG_VERBOSE) return [e for e in validated_entries if e[0] != None] @@ -210,22 +210,19 @@ def job_linux(stamp_cost, workblock, message_id): job_procs = [] RNS.log(f"Starting {jobs} stamp generation workers", RNS.LOG_DEBUG) for jpn in range(jobs): - process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event, "pn": jpn, "sc": stamp_cost, "wb": workblock}, daemon=True) + process = multiprocessing.get_context("fork").Process(target=job, kwargs={"stop_event": stop_event, "pn": jpn, "sc": stamp_cost, "wb": workblock}, daemon=True) job_procs.append(process) process.start() active_jobs[message_id] = [stop_event, result_queue] stamp = result_queue.get() - RNS.log("Got stamp result from worker", RNS.LOG_DEBUG) # TODO: Remove # Collect any potential spurious # results from worker queue. try: - while True: - result_queue.get_nowait() - except: - pass + while True: result_queue.get_nowait() + except: pass for j in range(jobs): nrounds = 0 @@ -388,4 +385,11 @@ if __name__ == "__main__": RNS.log("", RNS.LOG_DEBUG) RNS.log("Testing peering key generation", RNS.LOG_DEBUG) message_id = os.urandom(32) - generate_stamp(message_id, cost, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PEERING) \ No newline at end of file + generate_stamp(message_id, cost, expand_rounds=WORKBLOCK_EXPAND_ROUNDS_PEERING) + + transient_list = [] + st = time.time(); count = 10000 + for i in range(count): transient_list.append(os.urandom(256)) + validate_pn_stamps(transient_list, 5) + dt = time.time()-st; mps = count/dt + RNS.log(f"Validated {count} PN stamps in {RNS.prettytime(dt)}, {round(mps,1)} m/s", RNS.LOG_DEBUG) From ba2c6b833e327f7bb7f215c0c46f749e7c11533d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 6 Jan 2026 17:55:31 +0100 Subject: [PATCH 121/123] Versions --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 93e5cc0..0c8fe7e 100644 --- a/setup.py +++ b/setup.py @@ -26,6 +26,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=["rns>=1.1.0"], + install_requires=["rns>=1.1.1"], python_requires=">=3.7", ) From 2ad82b68bde5d76dfee2b333e06079fd43bbc13f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 6 Jan 2026 21:01:53 +0100 Subject: [PATCH 122/123] Announce control destination --- LXMF/LXMRouter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 4b593a9..cccd1e4 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -321,6 +321,7 @@ class LXMRouter: def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) self.propagation_destination.announce(app_data=self.get_propagation_node_app_data()) + if len(self.control_allowed_list) > 1: self.control_destination.announce() da_thread = threading.Thread(target=delayed_announce) da_thread.setDaemon(True) From 269ce43afc6552e934c212887c2450718311396a Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 10 Jan 2026 21:50:42 +0100 Subject: [PATCH 123/123] Added Zen of Reticulum --- Zen of Reticulum.md | 415 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 415 insertions(+) create mode 100644 Zen of Reticulum.md diff --git a/Zen of Reticulum.md b/Zen of Reticulum.md new file mode 100644 index 0000000..aa286be --- /dev/null +++ b/Zen of Reticulum.md @@ -0,0 +1,415 @@ +# Zen of Reticulum + +## I: The Illusion Of The Center + +For the better part of a generation, we have been taught to visualize the digital world through the lens of hierarchy. The mental maps we carry are dominated by a single, misleading image: **The Cloud**. + +We imagine the network as a vast, ethereal space "up there" or "out there". A centralized repository of services and data to which we, the lowly clients, must connect. We build our software with this assumption hardcoded into our logic: *There is a server. The server has the authority. The server knows the way. I must find the server to function*. + +This is the Client-Server mental model, and it is the primary obstacle to understanding Reticulum. + +### Fallacy Of The Cloud + +The first step in the Zen of Reticulum is to realize that *there is no cloud*. There is only other people's computers. When you build for the cloud, you are building *for* a landlord. You are accepting that your application's existence is conditional on the permission, uptime, and continued goodwill of a central authority. + +In Reticulum, you must shift your thinking from "connecting to" to "being among". Reticulum is not a service you subscribe to - *it is a fabric you inhabit*. There is no "up there". There is only *here* and *there*, and the space between them is peer-to-peer. + +### Decentralization Or Uncentralizability? + +It is common to hear the word "decentralized" thrown around in modern tech circles. But often, this is merely a marketing term for "slightly distributed centralization". A blockchain with a few dominant miners, or a federated protocol with a few giant servers. *In practice*, it's still centralized. It simply has a few centers instead of one. + +Reticulum goes further. It wants **Uncentralizability**. + +This is not a wishful political stance, but a foundational mathematical characteristic of the protocol, onto which everything else has been built. Reticulum assumes that every peer on the network is potentially hostile, and every link is potentially compromised. It is designed with no "privileged" nodes. While some nodes may act as Transport Instances - forwarding traffic for others - they do so *blindly*, and they only know about their immediate surroundings, and nothing more. They route based on cryptographic proofs, not on administrative privilege. They cannot see who is talking to whom, nor can they selectively manipulate traffic without breaking their own ability to route entirely. + +The system is designed to make hierarchy structurally impossible. You cannot hijack an address, because there is no central registry to hijack. You cannot block a user, because there is no central switch to flip. You can offer paths through the network, but you can't force anyone to use them. + +### Death To The Address + +To break free of the center, you must also let go of the concept of the "Address". + +In the IP world, an address is a location. It is a coordinate in a *deeply hierarchical* and static grid. If you move your computer to a different house, your address changes. If your router reboots, your address might change. Your *identity* is bound to your *location*, and therefore, it is fragile, and easily controlled. + +Reticulum abolishes this link between *Identity* and *Location*. + +In Reticulum, an address is not a place; it is a **Hash of an Identity**. It is a cryptographic representation of *who* you are, not *where* you are. Because of this, your address is portable. You can take a laptop from a WiFi cafe in Berlin, to a LoRa mesh in the mountains, to a packet radio link on a boat, and your "address" - your *Destination Hash* - never changes. + +The network does not route to a place; it routes to a *person* (or a machine). When you send a packet, you are not targeting a coordinate in a grid; you are encrypting a message for a specific entity. The network dynamically discovers where that entity currently resides, and it does so in a way where no one really knows where that entity is actually located physically. + +**Consider:** + +- **The Old Way:** *"I am at `192.168.1.5`. Come find me"*. +- **The Zen Way:** *"I am `<327c1b2f87c9353e01769b01090b18f2>`. Wherever I am, my peers can reach me"*. + +Once you stop thinking about servers and start thinking about portable identities, where everyone can always reach everyone else directly, the illusion of the center fades away. You realize there *is* no center holding the network together. No coordinators or bureaucrats required. The network is simply the sum of its peers, communicating directly, sovereignly, and without a master. + + +## II: Physics Of Trust +*Paranoia Is A Great Design Principle* + +If we accept that there is no center - that the network is a chaotic, peer-to-peer mesh - we are forced to confront a terrifying reality: **There is no one guarding the door**. + +In the traditional networking mindset, we rely on the concept of the "trusted core". We assume our local coffee shop WiFi is safe, or that the backbone providers are neutral custodians. We build our security like a castle: strong walls on the outside, soft and trusting on the inside. We use encryption only when we step out into the "wild" internet. + +### Hostile Environments + +The Zen of Reticulum requires you to invert this. You must assume that *every* environment is hostile. This isn't cynicism, just uncaring physics. + +When you transmit information over radio waves, you are shouting into a crowded room. Anyone can listen. When you traverse the internet, your packets pass through routers controlled by strangers, corporations, and state actors. Assuming privacy in this environment without cryptographic protection is not optimism but gross negligence. + +Reticulum is built on the premise that every link is tapped, and every peer is a potential adversary. If your system cannot survive an adversary owning the physical layer, it cannot survive at all. + +But this is the paradox: By assuming the network is hostile, you make it safe. When you accept the dangers for what they are, they become manageable. When you stop trusting the infrastructure and start trusting the math, you eliminate the single point of failure: Human integrity. + +### Encryption Is Not A Feature + +In the world of TCP/IP, encryption is an afterthought. It is a layer we slap on top of the protocol (HTTPS, TLS) to patch the security holes of the original design. It is a "feature" you sometimes *enable* for "sensitive data". This is fundamentally flawed, since all data is sensitive. + +In Reticulum, encryption is **gravity**. + +It is not optional. It is not a plugin. It is the *fundamental force that allows the network to exist*. If you were to strip the encryption from Reticulum, the routing would break. The Transport system uses cryptographic signatures and entropy to verify paths and pass information. If packets were plaintext, intermediate nodes could not prove that a route was valid, nor could endpoints prevent spoofing or tampering. + +In Reticulum, the entropy of the encrypted packet *is* the routing logic. + +To ask for a version of Reticulum without encryption is like asking for a version of the ocean without liquid. You are not asking for a feature change; you're asking for a different physical universe. We design for a universe where information has mass, structure, and integrity. + +### Zero-Trust Architectures + +We must unlearn our reliance on **Institutional Trust**. + +For decades, we have been trained to trust authorities. We trust a website because a chain of Certificate Authorities (companies we don't know) vouches for it. We trust an app because it is in an app store (run by a corporation we don't control). We trust a message because it comes from a phone number assigned by a telecom. Yet, everything in our digital information sphere today is more untrustworthy and risky than a medieval second-hand underwear market. + +Reticulum replaces institutional trust with **Cryptographic Proof**. + +In Reticulum, you do not trust a node because it has a nice hostname or because it is listed in a directory. You trust it because it holds the private key corresponding to the Destination Hash you are communicating with. This trust is binary, mathematical, and **absolute**. Either the signature matches, or it does not. There is no "maybe". + +This shift moves the power from the institution to the individual. You become the ultimate arbiter of your own trust relationships. You decide which keys to accept, which paths to follow, and which identities to recognize. + +**Consider:** + +- **The Old Way:** *"I trust this site because the browser says the lock icon is green"*. +- **The Zen Way:** *"I trust this destination because I have verified its hash fingerprint out-of-band, and the math confirms the signature"*. + +When you internalize the Physics of Trust, you stop looking for protection from firewalls, VPNs, and Terms of Service agreements. You realize that true security comes from the design of the protocol itself. You can stop trusting the cloud, and you start trusting the code - because you can verify it yourself. + + +## III: Merits Of Scarcity +*Every Bit Counts* + +We have grown addicted to abundance. In the modern digital ecosystem, bandwidth is treated as an endless, flat ocean. We stream high-definition video without a thought, we ship entire libraries of code just to render a single button, and we measure performance in gigabits per second. This abundance has hollowed out our craft. When constraints vanish, efficiency dies, and with it, a certain kind of Clarity and Quality. + +Reticulum asks you to step out of the ocean and onto the tightrope. + +### The Bandwidth Fallacy + +The Zen of Reticulum requires the realization that **5 bits per second is a valid speed**. + +To a modern developer, this sounds like paralysis. But there is a profound freedom in limits: When you have a gigabit connection, you can be incredibly sloppy. You can be wasteful. You can push your problems onto the infrastructure. *"It’s slow? Get a faster router"*. + +But on a high-latency, low-bandwidth link (be it a noisy HF radio channel or a tenuous LoRa hop) you cannot push problems anywhere. You must solve them. The network does not negotiate with waste. + +This forces a shift from consumption to interaction. You are no longer, then, consuming a service provided by a fat pipe; you are engaging in a careful negotiation with the physical medium. The medium becomes a partner in the conversation, not just a dumb conduit. You suddenly need to *understand the world to be in it*. + +### Cost Of A Byte + +In a scarce economy, a byte is not just data, but energy, time, and space. + +Every byte you transmit consumes battery life on a solar-powered node. It occupies valuable airtime that could have been used by another peer. It represents a measurable slice of the electromagnetic spectrum. + +When you internalize this, you begin to write code differently. You stop asking, "How much data can I send?" and start asking, "What is the *minimum* amount of information required to convey this intent? How can I best utilize my informational entropy?" + +This is where the elegance of Reticulum shines. The protocol is designed to strip away the non-essential. A link establishment takes three very small packets. A destination hash fits in 16 bytes. The overhead is vanishingly small, leaving almost the entire channel for the message itself. + +**Consider:** + +- **The Old Way:** *"I need to send a status update. I'll send a JSON object with metadata, timestamps, and user profile info (15KB)."* +- **The Zen Way:** *"I need to send a status update. I'll send a single byte representing the state code. The context is already known."* + +This is of course optimization, but more importantly, *it is a form of respect*. Efficiency in a shared medium is an act of stewardship. By taking only what you need from the network, you leave room for others. The network listens to those who speak with purpose. + +### Flow & Time + +Scarcity also teaches us about time. We have become addicted to the *synchronous* now - the instant ping, the real-time stream. But Reticulum embraces *asynchronous* time. + +When links are intermittent and latency is measured in minutes or hours, "real-time" is an illusion. Reticulum doesn't encourage **Store and Forward** as a mere fallback, but as a primary mode of existence. You write a message, it propagates when it can, and it arrives when it arrives. + +This changes the psychological texture of communication. It removes the anxiety of the immediate response. It allows for contemplation. You are not demanding the recipient's attention *right now*; you are placing a gift in their path, to be found when they are ready. + +By designing for delay, you design for resilience. You are no longer building a house of cards that collapses when a single packet drops. You are building a stone arch that distributes the load *over time*. + +### Liberation From Limits + +There is a strange optimism in scarcity. When you are forced to work within strict constraints, you are forced to prioritize. *You* must decide what truly matters. *That* is the real core of agency. + +In the infinite fantasy world of The Cloud, everything is urgent, so nothing is. In the economy of Reticulum, the cost of transmission forces you to weigh the value of your message. Do you really need to send that heart beat? Is that photo essential? + +When you strip away the noise, what remains is *signal*. + +This discipline creates a different kind of developer. It creates a craftsman who understands that the best code is the code you don't have to write. It creates a user who understands that the most powerful message is the one that is *understood*, not the one that is loudest. In the world of Reticulum, you are not a mere consumer of bandwidth; you are an architect of intent. + + +## IV: Sovereignty Through Infrastructure +**Be Your Own Network** + +We live in an era of digital tenancy. We lease our connectivity from ISPs. We rent our storage from cloud providers. We even borrow our identity from social media platforms. We are tenants in a house we did not build, governed by rules we did not write, subject to eviction at the whim of a landlord who has never met us. + +The Zen of Reticulum is the realization that you *can* own the house. + +### A Carrier-Grade Fallacy + +For decades, we have been gaslit into believing that networking is really not just hard, but impossible. It is presented as a dark art reserved for telcos and billionaires, requiring millions of dollars of fiber optics, climate-controlled data centers, and armies of engineers. We are told that building reliable infrastructure is "too complex" for the individual or small organization. + +This is a big, fat lie. + +Physics is simple. A radio wave needs a transmitter and a receiver. A packet needs a path. The "complexity" of the modern internet is largely bureaucratic - a mountain of billing systems, regulatory capture, and legacy cruft designed to keep the gatekeepers in power. + +Reticulum strips away the bureaucracy. It runs on hardware that costs the price of a dinner. It runs on spectrum that is free to use. It demonstrates that a robust, planetary-scale network does not require a Fortune 500 company. It requires only the will to deploy, and the distributed, uncoordinated efforts of many individuals. + +### Personal Infrastructure + +This is where the rubber meets the road. You can read about Reticulum, you can understand the theory, but the insights only arrive when you plug in a radio and run a Transport Node. Suddenly, you are no longer a consumer. You're an operator. + +This shift is subtle but profound. When you run your own infrastructure, the network ceases to be a service that is provided *to* you. It becomes a space that you *inhabit*. You become responsible for the flow of information. You gain an intimate understanding of the medium - the way the weather affects the radio waves, the way the topology changes, the way the packets dance through the ether. + +There is a quiet competence that comes from this. You stop asking "Is the internet down?" and start asking "Is *my* links up?" You stop waiting for a technician and start checking the logs. This is a form of strength. To understand the system that carries your words is to be free from the mystery that keeps you dependent. + +### The Ability To Disconnect + +Why go to the trouble? Why buy the radio, write the config, and leave the Pi running in the corner? + +Because the old, centralized network is fragile. And because most of us doesn't even really want to be there anymore. + +The internet we rely on today is a chain of single points of failure. Cut the undersea cable, and a continent goes dark. Shut down the power grid, and the cloud evaporates. Deprioritize the "wrong" traffic, and the flow of information is strangled. + +Sovereignty is the ability to survive the cut, whether or not that cut was an accident or on purpose. + +When you build your own infrastructure, you build a lifeline. Reticulum is designed to function over media that the traditional internet cannot touch - bare wires, battery-powered radios, ad-hoc WiFi meshes. When the grid fails, or the censors arrive, or the bill goes unpaid, your Reticulum network continues to hum. + +This is not about "dropping out" of society. It is about building a substrate on which an actual *Society* can function. + +**Consider:** + +- **The Old Way:** "My connection is slow. I should call my ISP and complain." +- **The Zen Way:** "The path is noisy. I will adjust the antenna or find a better route." + +By taking ownership of the infrastructure, you take ownership of your voice. You stop shouting into someone else's megaphone and start building your own. The network is no longer something that happens to you; it is something you make happen. + + +# V: Identity and Nomadism +**A Fluid Self** + +In the old world, you are defined by your coordinates. If you are at `34.109.71.5`, you're *here*. If you unplug the cable and walk down the street, you vanish. Your digital self evaporates because it was tethered to the wall. You are a ghost in the endless machinations of gears, levers and transistors, bound to the hardware, and those that own it. + +This creates a subtle, constant anxiety. We are terrified of disconnecting because, in the architecture of the old web, disconnecting is a kind of death. + +The Zen of Reticulum offers a different way to be. + +### Portable Existence + +In Reticulum, your identity is not a location, or a username granted by a service. It is a cryptographic key - a complex, unique mathematical signature that exists independently of the physical world. You can carry it only in your mind, if you want to. + +Think of it less like a street address and more like a name. *A true name*. + +If you travel from Berlin to Tokyo, you do not change your name. You are still you. The people who know you can still recognize you. Reticulum applies this principle to the network layer. Your Destination Hash is **invariant**. It travels with you, stored securely on your device, *immutable as a stone*. + +This changes the relationship between you and the machine. You are not "logged into" the network via a specific gateway. You *are* the endpoint. The network does not connect to a place; *it converges on you*. + +### Roaming Nodes + +This freedom introduces a new concept of time and space: **Nomadism**. + +Because your identity is portable, your connectivity can be fluid. You can be sitting at a desk connected to a fiber backbone one moment, and walking through a field connected only to a long-range LoRa mesh the next. To the rest of the network, nothing has changed. Your friends do not need to update your contact info. The messages they send do not bounce back. The network senses the shift in the medium and reroutes the flow of data automatically. + +You are no longer a stationary node in a fixed grid. You are a wanderer in a fluid medium. + +The interfaces - whether it is WiFi, Ethernet, Packet Radio, or a physical wire - is merely the clothing your node wears. You change it to suit the environment. Underneath, you remain the same. This is the liberation of the protocol. It treats the physical medium as a transient circumstance, not a definition of self. + +**Consider:** + +- **The Old Way:** *"I lost connection. I have to reconnect to the VPN to tell them where I am now."* +- **The Zen Way:** *"I moved. The network subtly bends to accomodate this new reality."* + +### Announcing Presence + +How does the network find a wanderer? It listens. + +In the IP world, we query directories. We ask a server, "Where is Mark?" The server checks its database and gives us a coordinate. This means that someone, somewhere, is keeping track of you. It assumes and *requires* surveillance. + +Reticulum replaces surveillance with **Announces**. + +Instead of asking a central authority where you are, you simply state your presence. You broadcast a cryptographic proof: "I am here, and I am who I say I am". This ripples out through the mesh. Your neighbors hear it, update their path tables, and pass it on. + +This is a quiet, organic process. It is the digital equivalent of lighting lanterns in the dark. You do not need to chase the light; you let the light find you. It respects your autonomy. You choose when to announce, how often to speak, and to whom. You also choose when to disappear - for but a moment or perpetually. + +### Anchor In The Flow + +There is a deep peace in this nomadism. It teaches you that stability does not come from standing still. Stability comes from *internal coherence*. + +By holding your own private key, you hold your own center of gravity. The world around you; the infrastructure, the topography and the availability of links can all shift chaotically. Storms can knock out towers. Cables can be cut. The internet can go down. + +But as long as you possess your key, you possess your identity. The entire infrastructure can be destroyed and rebuilt, and you are still you. Nothing lasts, yet nothing is lost. + +You become a sovereign entity moving through the noise, connected not by the rigidity of cables, but by the fluidity of recognition. The network becomes a place you inhabit, rather than a utility you subscribe to: You are at home in the ether. + + +## VI: Ethics Of The Tool +**Technology With Conscience** + +You have unlearned the center. You have accepted the physics of trust. You have embraced the economy of scarcity and the freedom of unbound nomadism. You are standing in a new space. Now, look at the tool in your hand. + +In the old world, we were taught that technology is neutral. We are told that "guns don't kill people, people do", or that a component is just a component, indifferent to what its combinatorial potential is. This is a convenient lie. It serves only to allow the builders to wash their hands of responsibility. + +But we know better now. We know that **architecture is politics**, and *politics is control*. The way you build a system determines how it will be used. If you build a system optimized for mass surveillance, you *will* get a panopticon. If you build a system optimized for centralized control, you *will* get a dictatorship. If you build a system optimized for extraction, you *will* get a parasite. + +The Zen of Reticulum asserts that a tool is never neutral. + +On the very contrary: A tool is intent, **crystallized**. + +### The Harm Principle + +Why does the Reticulum License forbid the software from being used in systems designed to harm humans? Is it not just a restriction on freedom? + +It is a restriction on *license*, yes, but it is an expansion of *freedom*. + +Building powerful tools without a moral compass is in no way virtuous or commendable, it is plain and simple irresponsibility. + +A tool that can easily be used to oppress is a real danger to the user. If you build a network that can be turned against you by a tyrant, you are not free. You are merely waiting for the leash to tighten. By encoding the "Harm Principle" into the legal DNA of the reference implementation, we are building a safeguard. We are stating, clearly and immutably, that *this tool* is for **life**, not for death. + +This aligns the software with the interests of humanity. It cements that the network cannot be conscripted into a kill-system, a weaponized drone controller, or a torture device without breaking the license and the law. It is a line drawn in the sand - not by a government or external authority, but by the creators of the tool itself. + +**Consider:** + +- **The Old Way:** *"It's just software. How people use it is not my problem."* +- **The Zen Way:** *"This software is a habitat. I will not allow it to be used to build a cage."* + +It is *your* choice whether to align with this - we are not forcing this stance on anyone. If you choose to align with life over death, with creativity over destruction, we grant you an immensely powerful tool, to own and build with as you please. If you do not, we deny it. + +If you do not like this, we most assuredly do not need you here, and you are on your own. + +### Public Domain Protocol + +This leads to a vital distinction: The difference between the *idea* and the *implementation*. + +The protocol - the mathematical rules of how Reticulum works - is dedicated to the Public Domain. It belongs to humanity. **No one can own it**. Anyone can implement it, improve it, or adapt it. This is the core idea of free communication, which itself must be forever free. + +But the functional, deployed *reference implementation* - the Python code, the maintenance, the years of labor - has a conscience. This distinction is the engine of sustainability. It allows the protocol to be universal, while ensuring that the specific labor of the builders is not hijacked to undermine the foundational intent of the project itself. From this document, it should be very clear what this intent is. + +If you want to build a system with Reticulum that manipulates and damages users for profits or targets missiles, you can use the public domain protocol, and start from scratch. But you cannot take our work. You must do your own. This serves as a pillar of accountability. If you want to build a weapon, *you* go and forge the steel yourself, while the world observes. And when the blood is drawn - it is on **your** hands. + +### Preserving Human Agency + +We live in an era of predatory extraction. The open-source commons is being scraped, ingested, and regurgitated by machine learning algorithms, whose corporate owners seek to replace the very humans who built those commons. Our code, our words, and our creativity is being used to train systems that are specifically designed to make us obsolete, without offering anything else in return than serfdom and leashes. + +Reticulum stands against this. + +The license protects the software from being used to feed the beast. It draws a hard line: This tool is for *people*. It is for human-to-human connection. It is not a dataset to be strip-mined for the purpose of building a synthetic overlord, puppeteered by a miniscule conglomerate of controllers. + +This is a radical act of preservation. By protecting the code from AI appropriation, we are protecting space for human agency. We are ensuring that there remains a digital realm where the actors are flesh, blood and soul, where decisions are made by minds, not overlords hiding behind models. + +When you use Reticulum, you are using a tool that respects you. It does not see you as a product to be tracked. It does not see your data as fuel for an algorithm. It sees you as a sovereign, equal peer. + +This changes the foundational premise of using the technology. It restores dignity to the interaction. You are not the user of a service; you are a participant in a mutual covenant. The tool aligns with your autonomy, rather than eroding it. + +In this way, ethics is not a restriction, but a foundation. It is the foundation that helps ensure the network will still belong to you tomorrow. + + +## VII: Design Patterns For Post-IP Systems +**Practical Philosophy for Developers** + +The philosophy is useless if it cannot be hammered into code. The metaphors we have explored - nomadism, scarcity, trust - are not just poetry, but real-world engineering constraints. When you sit down to write software for Reticulum, these concepts must shape the very structure of your application. + +We are now moving from the *why* to the *how*. This is where the abstract becomes concrete, and where you will see the true depth of the patterns we have been weaving. + +### Store & Forward + +The web has trained us to be impatient. We write synchronous code. We fire a request and we wait, blocking the UI, holding our breath. If the response doesn't come in 250 milliseconds, we show a spinner. If it doesn't come in five seconds, we show an error. We treat network connectivity as a binary state: either we are "online" or we are "broken". + +This is brittle. It is a rejection of reality. + +In Reticulum, connectivity is a spectrum, and presence is asynchronous. If at all applicable to your intent, you must design your applications to embrace **Store & Forward**. + +Instead of demanding an immediate answer, your application should act as a patient participant. You create a message for someone or something in the mesh. The network holds it. It carries it from node to node, perhaps over hours or days, waiting for the recipient to appear. When they finally surface, the message is delivered. This requires a shift from "request/response" to "event/handler". How exactly you do this is a challenge for you to solve intelligently within your problem domain, but Reticulum-based systems already exist that does this extremely well, and you can use them for inspiration. + +**Consider:** + +- **The Old Way:** `Connect() -> Send() -> Wait() -> Crash if timeout.` +- **The Zen Way:** `Send() -> Continue living. -> Receive() when it arrives.` + +This changes the user experience profoundly. It removes the anxiety of the loading bar. It creates a sense of continuity. The user is not "waiting for the network"; they are interacting with a persistent log of communication that lives in the network itself. + +### Naming Is Power + +In the IP world, we are slaves to the Domain Name System. We rely on a hierarchy of registrars to map human-readable names to machine-readable addresses. This hierarchy is a choke point. If the registrar revokes your domain, or if the DNS server goes down, you vanish. + +Reticulum dissolves this hierarchy with **Hash-based Identity**. + +In this design pattern, a name is not a string you look up; it is a cryptographic destination you verify. When you design for Reticulum, you stop asking the user for a URL and start asking for a Destination or Identity Hash. + +This feels strange at first. A hash like `<83b7328926fed0d2e6a10a7671f9e237>` looks alien compared to `myfriend.com`. But that alienness is the armor. It **cannot** be spoofed. It **cannot** be censored by a registrar. It is **absolute**. + +Designing for this means shifting your UI metaphors. You are no longer browsing a web of pages; you are managing a ledger of keys. You are building an "Address Book" that is actually a keyring. The names are given by the user, and the power stays with them. That hashes look complex is directly analogous to the strengths of the bonds formed by their use. It forces the user to engage in a moment of verification, an out-of-band handshake, which restores the human element of trust that SSL certificates stripped away. + +### The Interface Is The Medium + +One of the most liberating patterns in Reticulum is **Transport Agnosticism**. + +In traditional networking, your code is often littered with transport logic. "Am I on WiFi? Check bandwidth. Am I on Cellular? Check data plan. Am I on Ethernet?". You are constantly micromanaging the pipe. + +In Reticulum, you write to the API, and the API writes to the medium. You send a packet to a Destination. You do not care if that packet travels over a TCP tunnel, a LoRa radio wave, or a serial wire interface. That is the stack's concern. + +This allows you to write **Universal Applications**. +Imagine a messaging app. You write it once. It works on a laptop connected to fiber. It works on a phone in the city using WiFi. And, without a single line of code changed, it works on a device in the wilderness, talking only to other devices via radio. + +The pattern is simple: **Never code to the hardware. Code to the intent.** + +**Consider:** + +- **The Old Way:** `socket.connect(ip, port)` +- **The Zen Way:** `RNS.Packet(destination, data).send()` + +By abstracting the medium, you make your software immortal to changes in infrastructure. The user might switch from a 4G hotspot to a HF modem tomorrow. Your software doesn't need to know. It simply continues the conversation. + +### Emergent Patterns + +When you combine these patterns - *Store & Forward*, *Hash-based Identity*, and *Transport Agnosticism* - you create software that feels fundamentally different. + +It feels *grounded*. It doesn't flicker when the signal drops. It doesn't panic when the server is down. It has weight. It has persistence. It has *relevance*. + +You are no longer building a "client" that begs a "server" for attention. You are building an autonomous agent that exists within the mesh. It speaks when it needs to, listens when it can, and carries its identity with it wherever it goes. + +This is the culmination of the Zen. The code is not just a set of instructions: It is a behavioral envelope. It is a way of *being* in the network. + + +## VIII: Fabric Of The Independent + +We have stripped away the illusions. We have seen that the center is empty, that trust *must* be hard, that resources are finite, and that we must own our infrastructure. We have seen that tools have ethics and that our identity can move fluidly. + +This is a reclaiming of the commons. For too long, we have allowed the most vital substrate of human society - *our ability to speak to one another* - to be colonized by entities that do not share our interests. We have allowed the architecture of our communication to be designed by accountants rather than architects. + +We are taking it back. Not by petitioning the masters, but by building the new world within, over, under and around the shell of the old. + +### The Work Is Finished + +The heavy lifting is done. + +The protocol is in the public domain, a gift to humanity that can never be taken away. The software is written, tested, and running on devices scattered across the globe. The manual lies open before you. The source code for the reference implementation is now distributed on hundreds of thousands of devices across the planet. No one can delete or destroy it. The hardware is accessible and abundant. + +It was a hard road to get here, but we got here. Now, there is no roadmap committee waiting for approval. There is no venture capital dictating the user experience. There is no CEO to sign off on the next feature release. + +There is only you. + +The barrier to entry is no longer complexity: It is the mere habit of dependency. You were conditioned to wait. Wait for the app update. Wait for the ISP to fix the line. Wait for the platform to allow the post. Wait for the government to change the policies. Wait for the likes. Wait for the revolution to be televised. + +The revolution never was televised. + +It is packetized. + +### Open Sky + +The future of this technology is a construction project. + +It looks like a single node on a windowsill, listening to the static. It looks like a message sent to a neighbor, bypassing the noise of the commercial web. It looks like a community mesh that grows, link by link, hop by hop, carried by hands that care more about connection than profit. + +You have the blueprints. You have the tools. You have the philosophy. The noise of the old world has fallen away, leaving you with the quiet clarity of the open spectrum. + +*Mark, early 2026* \ No newline at end of file