From 356cb6412fbfda050dd37d8a680bb6b13351b52f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 10:46:59 +0100 Subject: [PATCH 01/54] Optimise structure overhead --- LXMF/LXMPeer.py | 30 +++++++++++++++++------------- LXMF/LXMRouter.py | 14 +++++++++++++- 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a88f6da..2b10987 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -63,12 +63,13 @@ class LXMPeer: for transient_id in dictionary["handled_ids"]: if transient_id in router.propagation_entries: - peer.handled_messages[transient_id] = router.propagation_entries[transient_id] + peer.handled_messages.append(transient_id) for transient_id in dictionary["unhandled_ids"]: if transient_id in router.propagation_entries: - peer.unhandled_messages[transient_id] = router.propagation_entries[transient_id] + peer.unhandled_messages.append(transient_id) + del dictionary return peer def to_bytes(self): @@ -108,8 +109,8 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - self.unhandled_messages = {} - self.handled_messages = {} + self.unhandled_messages = [] + self.handled_messages = [] self.last_offer = [] self.router = router @@ -118,6 +119,7 @@ class LXMPeer: if self.identity != None: self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") else: + self.destination = None RNS.log(f"Could not recall identity for LXMF propagation peer {RNS.prettyhexrep(self.destination_hash)}, will retry identity resolution on next sync", RNS.LOG_WARNING) def sync(self): @@ -171,7 +173,7 @@ class LXMPeer: for transient_id in purged_ids: RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) - self.unhandled_messages.pop(transient_id) + self.unhandled_messages.remove(transient_id) unhandled_entries.sort(key=lambda e: e[1], reverse=False) per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now @@ -189,7 +191,7 @@ class LXMPeer: RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) self.last_offer = unhandled_ids - self.link.request(LXMPeer.OFFER_REQUEST_PATH, self.last_offer, response_callback=self.offer_response, failed_callback=self.request_failed) + self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT else: @@ -226,13 +228,14 @@ class LXMPeer: # Peer already has all advertised messages for transient_id in self.last_offer: if transient_id in self.unhandled_messages: - self.handled_messages[transient_id] = self.unhandled_messages.pop(transient_id) + self.handled_messages.append(transient_id) + self.unhandled_messages.remove(transient_id) elif response == True: # Peer wants all advertised messages for transient_id in self.last_offer: - wanted_messages.append(self.unhandled_messages[transient_id]) + wanted_messages.append(self.router.propagation_entries[transient_id]) wanted_message_ids.append(transient_id) else: @@ -242,10 +245,11 @@ class LXMPeer: # already received it from another peer. if not transient_id in response: if transient_id in self.unhandled_messages: - self.handled_messages[transient_id] = self.unhandled_messages.pop(transient_id) + self.handled_messages.append(transient_id) + self.unhandled_messages.remove(transient_id) for transient_id in response: - wanted_messages.append(self.unhandled_messages[transient_id]) + wanted_messages.append(self.router.propagation_entries[transient_id]) wanted_message_ids.append(transient_id) if len(wanted_messages) > 0: @@ -288,8 +292,8 @@ class LXMPeer: def resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: for transient_id in resource.transferred_messages: - message = self.unhandled_messages.pop(transient_id) - self.handled_messages[transient_id] = message + self.handled_messages.append(transient_id) + self.unhandled_messages.remove(transient_id) if self.link != None: self.link.teardown() @@ -330,7 +334,7 @@ class LXMPeer: def handle_message(self, transient_id): if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - self.unhandled_messages[transient_id] = self.router.propagation_entries[transient_id] + self.unhandled_messages.append(transient_id) def __str__(self): if self.destination_hash: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 79678c6..a19f401 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1,5 +1,6 @@ import os import time +import math import random import base64 import atexit @@ -427,6 +428,8 @@ class LXMRouter: os.makedirs(self.messagepath) self.propagation_entries = {} + + st = time.time(); RNS.log("Indexing messagestore...", RNS.LOG_NOTICE) for filename in os.listdir(self.messagepath): components = filename.split("_") if len(components) == 2: @@ -452,9 +455,13 @@ class LXMRouter: except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) + et = time.time(); RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {math.floor(len(self.propagation_entries)/(et-st))} msgs/s", RNS.LOG_NOTICE) + st = time.time(); RNS.log("Loading propagation node peers...", RNS.LOG_NOTICE) + if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") peers_data = peers_file.read() + peers_file.close() if len(peers_data) > 0: serialised_peers = msgpack.unpackb(peers_data) @@ -468,8 +475,13 @@ class LXMRouter: lim_str = ", "+RNS.prettysize(peer.propagation_transfer_limit*1000)+" transfer limit" RNS.log("Loaded peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(len(peer.unhandled_messages))+" unhandled messages"+lim_str, RNS.LOG_DEBUG) else: + del peer RNS.log("Peer "+RNS.prettyhexrep(peer.destination_hash)+" could not be loaded, because its identity could not be recalled. Dropping peer.", RNS.LOG_DEBUG) + del serialised_peers + del peers_data + + RNS.log(f"Loaded {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) self.propagation_node = True self.propagation_destination.set_link_established_callback(self.propagation_link_established) @@ -1676,7 +1688,7 @@ class LXMRouter: if remote_hash != None and remote_hash in self.peers: transient_id = RNS.Identity.full_hash(lxmf_data) peer = self.peers[remote_hash] - peer.handled_messages[transient_id] = [transient_id, remote_timebase, lxmf_data] + peer.handled_messages.append(transient_id) self.lxmf_propagation(lxmf_data) else: From 7701f326d99b20bfed3d64c3a80809e02755a06f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:33:39 +0100 Subject: [PATCH 02/54] Memory optimisations --- LXMF/LXMPeer.py | 120 +++++++++++++++++++++++++++++++------ LXMF/LXMRouter.py | 149 +++++++++++++++++++++++++++++++++------------- 2 files changed, 209 insertions(+), 60 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 2b10987..f4c522c 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -38,11 +38,16 @@ class LXMPeer: @staticmethod def from_bytes(peer_bytes, router): dictionary = msgpack.unpackb(peer_bytes) + peer_destination_hash = dictionary["destination_hash"] + peer_peering_timebase = dictionary["peering_timebase"] + peer_alive = dictionary["alive"] + peer_last_heard = dictionary["last_heard"] + + peer = LXMPeer(router, peer_destination_hash) + peer.peering_timebase = peer_peering_timebase + peer.alive = peer_alive + peer.last_heard = peer_last_heard - peer = LXMPeer(router, dictionary["destination_hash"]) - peer.peering_timebase = dictionary["peering_timebase"] - peer.alive = dictionary["alive"] - peer.last_heard = dictionary["last_heard"] if "link_establishment_rate" in dictionary: peer.link_establishment_rate = dictionary["link_establishment_rate"] else: @@ -61,13 +66,22 @@ class LXMPeer: else: peer.propagation_transfer_limit = None + hm_count = 0 for transient_id in dictionary["handled_ids"]: if transient_id in router.propagation_entries: - peer.handled_messages.append(transient_id) + peer.add_handled_message(transient_id) + hm_count += 1 + um_count = 0 for transient_id in dictionary["unhandled_ids"]: if transient_id in router.propagation_entries: - peer.unhandled_messages.append(transient_id) + peer.add_unhandled_message(transient_id) + um_count += 1 + + peer._hm_count = hm_count + peer._um_count = um_count + peer._hm_counts_synced = True + peer._um_counts_synced = True del dictionary return peer @@ -93,7 +107,10 @@ class LXMPeer: dictionary["handled_ids"] = handled_ids dictionary["unhandled_ids"] = unhandled_ids - return msgpack.packb(dictionary) + peer_bytes = msgpack.packb(dictionary) + del dictionary + + return peer_bytes def __init__(self, router, destination_hash): self.alive = False @@ -106,11 +123,14 @@ class LXMPeer: self.sync_transfer_rate = 0 self.propagation_transfer_limit = None + self._hm_count = 0 + self._um_count = 0 + self._hm_counts_synced = False + self._um_counts_synced = False + self.link = None self.state = LXMPeer.IDLE - self.unhandled_messages = [] - self.handled_messages = [] self.last_offer = [] self.router = router @@ -173,7 +193,7 @@ class LXMPeer: for transient_id in purged_ids: RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) - self.unhandled_messages.remove(transient_id) + self.remove_unhandled_message(transient_id) unhandled_entries.sort(key=lambda e: e[1], reverse=False) per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now @@ -228,8 +248,8 @@ class LXMPeer: # Peer already has all advertised messages for transient_id in self.last_offer: if transient_id in self.unhandled_messages: - self.handled_messages.append(transient_id) - self.unhandled_messages.remove(transient_id) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) elif response == True: @@ -244,9 +264,8 @@ class LXMPeer: # If the peer did not want the message, it has # already received it from another peer. if not transient_id in response: - if transient_id in self.unhandled_messages: - self.handled_messages.append(transient_id) - self.unhandled_messages.remove(transient_id) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) for transient_id in response: wanted_messages.append(self.router.propagation_entries[transient_id]) @@ -292,8 +311,8 @@ class LXMPeer: def resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: for transient_id in resource.transferred_messages: - self.handled_messages.append(transient_id) - self.unhandled_messages.remove(transient_id) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) if self.link != None: self.link.teardown() @@ -332,9 +351,72 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - def handle_message(self, transient_id): + def new_propagation_message(self, transient_id): if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - self.unhandled_messages.append(transient_id) + self.add_unhandled_message(transient_id) + + @property + def handled_messages(self): + pes = self.router.propagation_entries.copy() + hm = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][4], pes)) + self._hm_count = len(hm); del pes + return hm + + @property + def unhandled_messages(self): + pes = self.router.propagation_entries.copy() + um = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][5], pes)) + self._um_count = len(um); del pes + return um + + @property + def handled_message_count(self): + if not self._hm_counts_synced: + self._update_counts() + + return self._hm_count + + @property + def unhandled_message_count(self): + if not self._um_counts_synced: + self._update_counts() + + return self._um_count + + def _update_counts(self): + if not self._hm_counts_synced: + RNS.log("UPDATE HM COUNTS") + hm = self.handled_messages; del hm + self._hm_counts_synced = True + + if not self._um_counts_synced: + RNS.log("UPDATE UM COUNTS") + um = self.unhandled_messages; del um + self._um_counts_synced = True + + def add_handled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if not self.destination_hash in self.router.propagation_entries[transient_id][4]: + self.router.propagation_entries[transient_id][4].append(self.destination_hash) + self._hm_counts_synced = False + + def add_unhandled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if not self.destination_hash in self.router.propagation_entries[transient_id][5]: + self.router.propagation_entries[transient_id][5].append(self.destination_hash) + self._um_count += 1 + + def remove_handled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if self.destination_hash in self.router.propagation_entries[transient_id][4]: + self.router.propagation_entries[transient_id][4].remove(self.destination_hash) + self._hm_counts_synced = False + + def remove_unhandled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if self.destination_hash in self.router.propagation_entries[transient_id][5]: + self.router.propagation_entries[transient_id][5].remove(self.destination_hash) + self._um_counts_synced = False def __str__(self): if self.destination_hash: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index a19f401..9163824 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1,9 +1,11 @@ import os +import sys import time import math import random import base64 import atexit +import signal import threading import RNS @@ -94,6 +96,9 @@ class LXMRouter: self.outbound_propagation_node = None self.outbound_propagation_link = None + if delivery_limit == None: + delivery_limit = LXMRouter.DELIVERY_LIMIT + self.message_storage_limit = None self.information_storage_limit = None self.propagation_per_transfer_limit = propagation_limit @@ -117,6 +122,7 @@ class LXMRouter: self.cost_file_lock = threading.Lock() self.ticket_file_lock = threading.Lock() self.stamp_gen_lock = threading.Lock() + self.exit_handler_running = False if identity == None: identity = RNS.Identity() @@ -221,6 +227,8 @@ class LXMRouter: RNS.log("Could not load outbound stamp costs from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) atexit.register(self.exit_handler) + signal.signal(signal.SIGINT, self.sigint_handler) + signal.signal(signal.SIGTERM, self.sigterm_handler) job_thread = threading.Thread(target=self.jobloop) job_thread.setDaemon(True) @@ -446,17 +454,19 @@ class LXMRouter: file.close() self.propagation_entries[transient_id] = [ - destination_hash, - filepath, - received, - msg_size, + destination_hash, # 0: Destination hash + filepath, # 1: Storage location + received, # 2: Receive timestamp + msg_size, # 3: Message size + [], # 4: Handled peers + [], # 5: Unhandled peers ] except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) et = time.time(); RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {math.floor(len(self.propagation_entries)/(et-st))} msgs/s", RNS.LOG_NOTICE) - st = time.time(); RNS.log("Loading propagation node peers...", RNS.LOG_NOTICE) + st = time.time(); RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") @@ -465,23 +475,25 @@ class LXMRouter: if len(peers_data) > 0: serialised_peers = msgpack.unpackb(peers_data) + del peers_data - for serialised_peer in serialised_peers: + while len(serialised_peers) > 0: + serialised_peer = serialised_peers.pop() peer = LXMPeer.from_bytes(serialised_peer, self) + del serialised_peer if peer.identity != None: self.peers[peer.destination_hash] = peer lim_str = ", no transfer limit" if peer.propagation_transfer_limit != None: lim_str = ", "+RNS.prettysize(peer.propagation_transfer_limit*1000)+" transfer limit" - RNS.log("Loaded peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(len(peer.unhandled_messages))+" unhandled messages"+lim_str, RNS.LOG_DEBUG) + RNS.log("Rebuilt peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(peer.unhandled_message_count)+" unhandled messages"+lim_str, RNS.LOG_DEBUG) else: - del peer RNS.log("Peer "+RNS.prettyhexrep(peer.destination_hash)+" could not be loaded, because its identity could not be recalled. Dropping peer.", RNS.LOG_DEBUG) + del peer del serialised_peers - del peers_data - RNS.log(f"Loaded {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) self.propagation_node = True self.propagation_destination.set_link_established_callback(self.propagation_link_established) @@ -602,36 +614,37 @@ class LXMRouter: JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 def jobs(self): - self.processing_count += 1 + if not self.exit_handler_running: + self.processing_count += 1 - if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0: - self.process_outbound() + if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0: + self.process_outbound() - if self.processing_count % LXMRouter.JOB_STAMPS_INTERVAL == 0: - threading.Thread(target=self.process_deferred_stamps, daemon=True).start() + if self.processing_count % LXMRouter.JOB_STAMPS_INTERVAL == 0: + threading.Thread(target=self.process_deferred_stamps, daemon=True).start() - if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0: - self.clean_links() + if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0: + self.clean_links() - if self.processing_count % LXMRouter.JOB_TRANSIENT_INTERVAL == 0: - self.clean_transient_id_caches() + if self.processing_count % LXMRouter.JOB_TRANSIENT_INTERVAL == 0: + self.clean_transient_id_caches() - if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: - self.clean_message_store() + if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: + self.clean_message_store() - if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: - self.sync_peers() + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: + self.sync_peers() def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual # triggers can delay next run - try: self.jobs() except Exception as e: RNS.log("An error ocurred while running LXMF Router jobs.", RNS.LOG_ERROR) RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR) + RNS.trace_exception(e) time.sleep(LXMRouter.PROCESSING_INTERVAL) def clean_links(self): @@ -888,22 +901,24 @@ class LXMRouter: def save_locally_delivered_transient_ids(self): try: - if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + if len(self.locally_delivered_transient_ids) > 0: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) - with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: - locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) + with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: + locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) except Exception as e: RNS.log("Could not save locally delivered message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) def save_locally_processed_transient_ids(self): try: - if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + if len(self.locally_processed_transient_ids) > 0: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) - with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: - locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) + with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: + locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) except Exception as e: RNS.log("Could not save locally processed transient ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1001,10 +1016,43 @@ class LXMRouter: RNS.log(f"An error occurred while reloading available tickets from storage: {e}", RNS.LOG_ERROR) def exit_handler(self): + if self.exit_handler_running: + return + + self.exit_handler_running = True + + RNS.log("Tearing down delivery destinations...", RNS.LOG_NOTICE) + for destination_hash in self.delivery_destinations: + delivery_destination = self.delivery_destinations[destination_hash] + delivery_destination.set_packet_callback(None) + delivery_destination.set_link_established_callback(None) + for link in delivery_destination.links: + try: + if link.status == RNS.Link.ACTIVE: + link.teardown() + except Exception as e: + RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) + + if self.propagation_node: + RNS.log("Tearing down propagation node destination...", RNS.LOG_NOTICE) + self.propagation_destination.set_link_established_callback(None) + self.propagation_destination.set_packet_callback(None) + self.propagation_destination.deregister_request_handler(LXMPeer.OFFER_REQUEST_PATH) + self.propagation_destination.deregister_request_handler(LXMPeer.MESSAGE_GET_PATH) + for link in self.active_propagation_links: + try: + if link.status == RNS.Link.ACTIVE: + link.teardown() + except Exception as e: + RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) + + RNS.log("Persisting LXMF state data to storage...", RNS.LOG_NOTICE) if self.propagation_node: try: + st = time.time(); RNS.log("Saving peer synchronisation states to storage...", RNS.LOG_NOTICE) serialised_peers = [] - for peer_id in self.peers: + peer_dict = self.peers.copy() + for peer_id in peer_dict: peer = self.peers[peer_id] serialised_peers.append(peer.to_bytes()) @@ -1012,7 +1060,7 @@ class LXMRouter: peers_file.write(msgpack.packb(serialised_peers)) peers_file.close() - RNS.log("Saved "+str(len(serialised_peers))+" peers to storage", RNS.LOG_DEBUG) + RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) except Exception as e: RNS.log("Could not save propagation node peers to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1020,6 +1068,20 @@ class LXMRouter: self.save_locally_delivered_transient_ids() self.save_locally_processed_transient_ids() + def sigint_handler(self, signal, frame): + if not self.exit_handler_running: + RNS.log("Received SIGINT, shutting down now!", RNS.LOG_WARNING) + sys.exit(0) + else: + RNS.log("Received SIGINT, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) + + def sigterm_handler(self, signal, frame): + if not self.exit_handler_running: + RNS.log("Received SIGTERM, shutting down now!", RNS.LOG_WARNING) + sys.exit(0) + else: + RNS.log("Received SIGTERM, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) + def __str__(self): return "" @@ -1685,19 +1747,23 @@ class LXMRouter: messages = data[1] for lxmf_data in messages: + peer = None + transient_id = RNS.Identity.full_hash(lxmf_data) if remote_hash != None and remote_hash in self.peers: - transient_id = RNS.Identity.full_hash(lxmf_data) peer = self.peers[remote_hash] - peer.handled_messages.append(transient_id) - self.lxmf_propagation(lxmf_data) + self.lxmf_propagation(lxmf_data, from_peer=peer) + if peer != None: + peer.add_handled_message(transient_id) + else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) except Exception as e: RNS.log("Error while unpacking received propagation resource", RNS.LOG_DEBUG) + RNS.trace_exception(e) - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False): + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False, from_peer=None): no_stamp_enforcement = False if is_paper_message: no_stamp_enforcement = True @@ -1708,7 +1774,6 @@ class LXMRouter: if not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids: received = time.time() - propagation_entry = [transient_id, received, lxmf_data] destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] self.locally_processed_transient_ids[transient_id] = received @@ -1732,12 +1797,13 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data)] + self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) for peer_id in self.peers: peer = self.peers[peer_id] - peer.handle_message(transient_id) + if peer != from_peer: + peer.new_propagation_message(transient_id) else: # TODO: Add message to sneakernet queues when implemented @@ -1757,6 +1823,7 @@ class LXMRouter: except Exception as e: RNS.log("Could not assemble propagated LXMF message from received data", RNS.LOG_DEBUG) RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG) + RNS.trace_exception(e) return False def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None): From 44d1d992f8a9be4b81c5f6b302f6f48b1e46e161 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:34:00 +0100 Subject: [PATCH 03/54] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 906d362..43c4ab0 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.0" +__version__ = "0.6.1" From bfed126a7c17fd90551204afd0bbab3fac1441f9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:44:24 +0100 Subject: [PATCH 04/54] Memory optimisations --- LXMF/LXMPeer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index f4c522c..d133027 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -358,14 +358,14 @@ class LXMPeer: @property def handled_messages(self): pes = self.router.propagation_entries.copy() - hm = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][4], pes)) + hm = list(filter(lambda tid: self.destination_hash in pes[tid][4], pes)) self._hm_count = len(hm); del pes return hm @property def unhandled_messages(self): pes = self.router.propagation_entries.copy() - um = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][5], pes)) + um = list(filter(lambda tid: self.destination_hash in pes[tid][5], pes)) self._um_count = len(um); del pes return um From 1c9c74410790188db976dbecdff3b994d33ac5d9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:51:25 +0100 Subject: [PATCH 05/54] Memory optimisations --- LXMF/LXMPeer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index d133027..add54da 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -360,6 +360,7 @@ class LXMPeer: pes = self.router.propagation_entries.copy() hm = list(filter(lambda tid: self.destination_hash in pes[tid][4], pes)) self._hm_count = len(hm); del pes + self._hm_counts_synced = True return hm @property @@ -367,6 +368,7 @@ class LXMPeer: pes = self.router.propagation_entries.copy() um = list(filter(lambda tid: self.destination_hash in pes[tid][5], pes)) self._um_count = len(um); del pes + self._um_counts_synced = True return um @property @@ -387,12 +389,10 @@ class LXMPeer: if not self._hm_counts_synced: RNS.log("UPDATE HM COUNTS") hm = self.handled_messages; del hm - self._hm_counts_synced = True if not self._um_counts_synced: RNS.log("UPDATE UM COUNTS") um = self.unhandled_messages; del um - self._um_counts_synced = True def add_handled_message(self, transient_id): if transient_id in self.router.propagation_entries: From 1430b1ce90b989e9627d07841b5634e6f3a1f8e1 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 20:20:39 +0100 Subject: [PATCH 06/54] Enqueue and batch process distribution queue mappings --- LXMF/LXMPeer.py | 40 ++++++++++++++++++++++++++++++----- LXMF/LXMRouter.py | 53 +++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 79 insertions(+), 14 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index add54da..74a40c7 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -4,6 +4,7 @@ import time import RNS import RNS.vendor.umsgpack as msgpack +from collections import deque from .LXMF import APP_NAME class LXMPeer: @@ -122,6 +123,8 @@ class LXMPeer: self.link_establishment_rate = 0 self.sync_transfer_rate = 0 self.propagation_transfer_limit = None + self.handled_messages_queue = deque() + self.unhandled_messages_queue = deque() self._hm_count = 0 self._um_count = 0 @@ -351,9 +354,38 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - def new_propagation_message(self, transient_id): - if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - self.add_unhandled_message(transient_id) + def queued_items(self): + return len(self.handled_messages_queue) > 0 or len(self.unhandled_messages_queue) > 0 + + def queue_unhandled_message(self, transient_id): + self.unhandled_messages_queue.append(transient_id) + + def queue_handled_message(self, transient_id): + self.handled_messages_queue.append(transient_id) + + def process_queues(self): + if len(self.unhandled_messages_queue) > 0 or len(self.handled_messages_queue) > 0: + # TODO: Remove debug + # st = time.time(); lu = len(self.unhandled_messages_queue); lh = len(self.handled_messages_queue) + + handled_messages = self.handled_messages + unhandled_messages = self.unhandled_messages + + while len(self.handled_messages_queue) > 0: + transient_id = self.handled_messages_queue.pop() + if not transient_id in handled_messages: + self.add_handled_message(transient_id) + if transient_id in unhandled_messages: + self.remove_unhandled_message(transient_id) + + while len(self.unhandled_messages_queue) > 0: + transient_id = self.unhandled_messages_queue.pop() + if not transient_id in handled_messages and not transient_id in unhandled_messages: + self.add_unhandled_message(transient_id) + + del handled_messages, unhandled_messages + # TODO: Remove debug + # RNS.log(f"{self} processed {lh}/{lu} in {RNS.prettytime(time.time()-st)}") @property def handled_messages(self): @@ -387,11 +419,9 @@ class LXMPeer: def _update_counts(self): if not self._hm_counts_synced: - RNS.log("UPDATE HM COUNTS") hm = self.handled_messages; del hm if not self._um_counts_synced: - RNS.log("UPDATE UM COUNTS") um = self.unhandled_messages; del um def add_handled_message(self, transient_id): diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 9163824..1e62914 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -8,6 +8,8 @@ import atexit import signal import threading +from collections import deque + import RNS import RNS.vendor.umsgpack as msgpack @@ -143,6 +145,8 @@ class LXMRouter: self.peers = {} self.propagation_entries = {} + self.peer_distribution_queue = deque() + RNS.Transport.register_announce_handler(LXMFDeliveryAnnounceHandler(self)) RNS.Transport.register_announce_handler(LXMFPropagationAnnounceHandler(self)) @@ -613,6 +617,7 @@ class LXMRouter: JOB_TRANSIENT_INTERVAL = 60 JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 + JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL def jobs(self): if not self.exit_handler_running: self.processing_count += 1 @@ -632,6 +637,9 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: self.clean_message_store() + if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: + self.flush_queues() + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: self.sync_peers() @@ -647,6 +655,17 @@ class LXMRouter: RNS.trace_exception(e) time.sleep(LXMRouter.PROCESSING_INTERVAL) + def flush_queues(self): + self.flush_peer_distribution_queue() + RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + if peer.queued_items(): + peer.process_queues() + + RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) + def clean_links(self): closed_links = [] for link_hash in self.direct_links: @@ -1047,6 +1066,7 @@ class LXMRouter: RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) RNS.log("Persisting LXMF state data to storage...", RNS.LOG_NOTICE) + self.flush_queues() if self.propagation_node: try: st = time.time(); RNS.log("Saving peer synchronisation states to storage...", RNS.LOG_NOTICE) @@ -1608,8 +1628,9 @@ class LXMRouter: culled_peers = [] waiting_peers = [] unresponsive_peers = [] - for peer_id in self.peers: - peer = self.peers[peer_id] + peers = self.peers.copy() + for peer_id in peers: + peer = peers[peer_id] if time.time() > peer.last_heard + LXMPeer.MAX_UNREACHABLE: culled_peers.append(peer_id) else: @@ -1754,7 +1775,7 @@ class LXMRouter: self.lxmf_propagation(lxmf_data, from_peer=peer) if peer != None: - peer.add_handled_message(transient_id) + peer.queue_handled_message(transient_id) else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) @@ -1763,6 +1784,24 @@ class LXMRouter: RNS.log("Error while unpacking received propagation resource", RNS.LOG_DEBUG) RNS.trace_exception(e) + def enqueue_peer_distribution(self, transient_id, from_peer): + self.peer_distribution_queue.append([transient_id, from_peer]) + + def flush_peer_distribution_queue(self): + if len(self.peer_distribution_queue) > 0: + entries = [] + while len(self.peer_distribution_queue) > 0: + entries.append(self.peer_distribution_queue.pop()) + + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + for entry in entries: + transient_id = entry[0] + from_peer = entry[1] + if peer != from_peer: + peer.queue_unhandled_message(transient_id) + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False, from_peer=None): no_stamp_enforcement = False if is_paper_message: @@ -1797,13 +1836,9 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] - RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) - for peer_id in self.peers: - peer = self.peers[peer_id] - if peer != from_peer: - peer.new_propagation_message(transient_id) + self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] + self.enqueue_peer_distribution(transient_id, from_peer) else: # TODO: Add message to sneakernet queues when implemented From c2a08ef35588ccd512a7ea7c9898c83e5fd2864e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 20:44:11 +0100 Subject: [PATCH 07/54] Enqueue and batch process distribution queue mappings --- LXMF/LXMRouter.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 1e62914..ee1dca8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -656,15 +656,16 @@ class LXMRouter: time.sleep(LXMRouter.PROCESSING_INTERVAL) def flush_queues(self): - self.flush_peer_distribution_queue() - RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() - for peer_id in self.peers.copy(): - if peer_id in self.peers: - peer = self.peers[peer_id] - if peer.queued_items(): - peer.process_queues() + if len(self.peers) > 0: + self.flush_peer_distribution_queue() + RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + if peer.queued_items(): + peer.process_queues() - RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) + RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) def clean_links(self): closed_links = [] From e69da2ed2a29b33af0acda059aa9a624b475a6e7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 22 Jan 2025 01:37:09 +0100 Subject: [PATCH 08/54] Added static peers and peering limit --- LXMF/Handlers.py | 14 ++++-- LXMF/LXMPeer.py | 59 ++++++++++++++++++++--- LXMF/LXMRouter.py | 107 +++++++++++++++++++++++++++++++++++------ LXMF/Utilities/lxmd.py | 29 +++++++++-- 4 files changed, 179 insertions(+), 30 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 7420ea5..22c6cd3 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -51,12 +51,16 @@ class LXMFPropagationAnnounceHandler: except: propagation_transfer_limit = None - if data[0] == True: - if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + if destination_hash in self.lxmrouter.static_peers: + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) - elif data[0] == False: - self.lxmrouter.unpeer(destination_hash, node_timebase) + else: + if data[0] == True: + if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + + elif data[0] == False: + self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 74a40c7..ec0cfe2 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -66,6 +66,31 @@ class LXMPeer: peer.propagation_transfer_limit = None else: peer.propagation_transfer_limit = None + + if "offered" in dictionary: + peer.offered = dictionary["offered"] + else: + peer.offered = 0 + + if "outgoing" in dictionary: + peer.outgoing = dictionary["outgoing"] + else: + peer.outgoing = 0 + + if "incoming" in dictionary: + peer.incoming = dictionary["incoming"] + else: + peer.incoming = 0 + + if "rx_bytes" in dictionary: + peer.rx_bytes = dictionary["rx_bytes"] + else: + peer.rx_bytes = 0 + + if "tx_bytes" in dictionary: + peer.tx_bytes = dictionary["tx_bytes"] + else: + peer.tx_bytes = 0 hm_count = 0 for transient_id in dictionary["handled_ids"]: @@ -96,6 +121,11 @@ class LXMPeer: dictionary["link_establishment_rate"] = self.link_establishment_rate dictionary["sync_transfer_rate"] = self.sync_transfer_rate dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit + dictionary["offered"] = self.offered + dictionary["outgoing"] = self.outgoing + dictionary["incoming"] = self.incoming + dictionary["rx_bytes"] = self.rx_bytes + dictionary["tx_bytes"] = self.tx_bytes handled_ids = [] for transient_id in self.handled_messages: @@ -126,6 +156,12 @@ class LXMPeer: self.handled_messages_queue = deque() self.unhandled_messages_queue = deque() + self.offered = 0 # Messages offered to this peer + self.outgoing = 0 # Messages transferred to this peer + self.incoming = 0 # Messages received from this peer + self.rx_bytes = 0 # Bytes received from this peer + self.tx_bytes = 0 # Bytes sent to this peer + self._hm_count = 0 self._um_count = 0 self._hm_counts_synced = False @@ -212,7 +248,7 @@ class LXMPeer: cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) - RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) + RNS.log(f"Offering {len(unhandled_ids)} messages to peer {RNS.prettyhexrep(self.destination.hash)}", RNS.LOG_VERBOSE) self.last_offer = unhandled_ids self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT @@ -242,10 +278,16 @@ class LXMPeer: if response == LXMPeer.ERROR_NO_IDENTITY: if self.link != None: - RNS.log("Remote peer indicated that no identification was received, retrying...", RNS.LOG_DEBUG) + RNS.log("Remote peer indicated that no identification was received, retrying...", RNS.LOG_VERBOSE) self.link.identify() self.state = LXMPeer.LINK_READY self.sync() + return + + elif response == LXMPeer.ERROR_NO_ACCESS: + RNS.log("Remote indicated that access was denied, breaking peering", RNS.LOG_VERBOSE) + self.router.unpeer(self.destination_hash) + return elif response == False: # Peer already has all advertised messages @@ -275,10 +317,9 @@ class LXMPeer: wanted_message_ids.append(transient_id) if len(wanted_messages) > 0: - RNS.log("Peer wanted "+str(len(wanted_messages))+" of the available messages", RNS.LOG_DEBUG) + RNS.log("Peer wanted "+str(len(wanted_messages))+" of the available messages", RNS.LOG_VERBOSE) lxm_list = [] - for message_entry in wanted_messages: file_path = message_entry[1] if os.path.isfile(file_path): @@ -294,7 +335,8 @@ class LXMPeer: self.state = LXMPeer.RESOURCE_TRANSFERRING else: - RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_DEBUG) + RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_VERBOSE) + self.offered += len(self.last_offer) if self.link != None: self.link.teardown() @@ -328,12 +370,15 @@ class LXMPeer: self.sync_transfer_rate = (resource.get_transfer_size()*8)/(time.time()-resource.sync_transfer_started) rate_str = f" at {RNS.prettyspeed(self.sync_transfer_rate)}" - RNS.log("Sync to peer "+RNS.prettyhexrep(self.destination_hash)+" completed"+rate_str, RNS.LOG_DEBUG) + RNS.log(f"Syncing {len(resource.transferred_messages)} messages to peer {RNS.prettyhexrep(self.destination_hash)} completed{rate_str}", RNS.LOG_VERBOSE) self.alive = True self.last_heard = time.time() + self.offered += len(self.last_offer) + self.outgoing += len(resource.transferred_messages) + self.tx_bytes += resource.get_data_size() else: - RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_DEBUG) + RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_VERBOSE) if self.link != None: self.link.teardown() diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index ee1dca8..bfe863d 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -37,6 +37,7 @@ class LXMRouter: NODE_ANNOUNCE_DELAY = 20 + MAX_PEERS = 50 AUTOPEER = True AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 @@ -67,7 +68,10 @@ class LXMRouter: ### Developer-facing API ############################## ####################################################### - def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None, propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT, enforce_ratchets = False, enforce_stamps = False): + def __init__(self, identity=None, storagepath=None, autopeer=AUTOPEER, autopeer_maxdepth=None, + propagation_limit=PROPAGATION_LIMIT, delivery_limit=DELIVERY_LIMIT, enforce_ratchets=False, + enforce_stamps=False, static_peers = [], max_peers=None, from_static_only=False): + random.seed(os.urandom(10)) self.pending_inbound = [] @@ -142,6 +146,27 @@ class LXMRouter: else: self.autopeer_maxdepth = LXMRouter.AUTOPEER_MAXDEPTH + if max_peers == None: + self.max_peers = LXMRouter.MAX_PEERS + else: + if type(max_peers) == int and max_peers >= 0: + self.max_peers = max_peers + else: + raise ValueError(f"Invalid value for max_peers: {max_peers}") + + self.from_static_only = from_static_only + if type(static_peers) != list: + raise ValueError(f"Invalid type supplied for static peer list: {type(static_peers)}") + else: + for static_peer in static_peers: + if type(static_peer) != bytes: + raise ValueError(f"Invalid static peer destination hash: {static_peer}") + else: + if len(static_peer) != RNS.Reticulum.TRUNCATED_HASHLENGTH//8: + raise ValueError(f"Invalid static peer destination hash: {static_peer}") + + self.static_peers = static_peers + self.peers = {} self.propagation_entries = {} @@ -245,8 +270,9 @@ class LXMRouter: def announce_propagation_node(self): def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) + node_state = self.propagation_node and not self.from_static_only announce_data = [ - self.propagation_node, # Boolean flag signalling propagation node state + node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes ] @@ -485,6 +511,11 @@ class LXMRouter: serialised_peer = serialised_peers.pop() peer = LXMPeer.from_bytes(serialised_peer, self) del serialised_peer + if peer.destination_hash in self.static_peers and peer.last_heard == 0: + # TODO: Allow path request responses through announce handler + # momentarily here, so peering config can be updated even if + # the static peer is not available to directly send an announce. + RNS.Transport.request_path(peer.destination_hash) if peer.identity != None: self.peers[peer.destination_hash] = peer lim_str = ", no transfer limit" @@ -497,6 +528,17 @@ class LXMRouter: del serialised_peers + if len(self.static_peers) > 0: + for static_peer in self.static_peers: + if not static_peer in self.peers: + RNS.log(f"Activating static peering with {RNS.prettyhexrep(static_peer)}", RNS.LOG_NOTICE) + self.peers[static_peer] = LXMPeer(self, static_peer) + if self.peers[static_peer].last_heard == 0: + # TODO: Allow path request responses through announce handler + # momentarily here, so peering config can be updated even if + # the static peer is not available to directly send an announce. + RNS.Transport.request_path(static_peer) + RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) self.propagation_node = True @@ -643,6 +685,11 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: self.sync_peers() + # def syncstats(self): + # for peer_id in self.peers: + # p = self.peers[peer_id] + # RNS.log(f"{RNS.prettyhexrep(peer_id)} O={p.offered} S={p.outgoing} I={p.incoming} TX={RNS.prettysize(p.tx_bytes)} RX={RNS.prettysize(p.rx_bytes)}") + def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual @@ -1070,7 +1117,7 @@ class LXMRouter: self.flush_queues() if self.propagation_node: try: - st = time.time(); RNS.log("Saving peer synchronisation states to storage...", RNS.LOG_NOTICE) + st = time.time(); RNS.log(f"Saving {len(self.peers)} peer synchronisation states to storage...", RNS.LOG_NOTICE) serialised_peers = [] peer_dict = self.peers.copy() for peer_id in peer_dict: @@ -1081,7 +1128,7 @@ class LXMRouter: peers_file.write(msgpack.packb(serialised_peers)) peers_file.close() - RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettyshorttime(time.time()-st)}", RNS.LOG_NOTICE) except Exception as e: RNS.log("Could not save propagation node peers to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1605,14 +1652,18 @@ class LXMRouter: peer.peering_timebase = timestamp peer.last_heard = time.time() peer.propagation_transfer_limit = propagation_transfer_limit + RNS.log(f"Peering config updated for {RNS.prettyhexrep(destination_hash)}", RNS.LOG_VERBOSE) else: - peer = LXMPeer(self, destination_hash) - peer.alive = True - peer.last_heard = time.time() - peer.propagation_transfer_limit = propagation_transfer_limit - self.peers[destination_hash] = peer - RNS.log("Peered with "+str(peer.destination)) + if len(self.peers) < self.max_peers: + peer = LXMPeer(self, destination_hash) + peer.alive = True + peer.last_heard = time.time() + peer.propagation_transfer_limit = propagation_transfer_limit + self.peers[destination_hash] = peer + RNS.log(f"Peered with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_NOTICE) + else: + RNS.log(f"Max peers reached, not peering with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_DEBUG) def unpeer(self, destination_hash, timestamp = None): if timestamp == None: @@ -1633,7 +1684,8 @@ class LXMRouter: for peer_id in peers: peer = peers[peer_id] if time.time() > peer.last_heard + LXMPeer.MAX_UNREACHABLE: - culled_peers.append(peer_id) + if not peer_id in self.static_peers: + culled_peers.append(peer_id) else: if peer.state == LXMPeer.IDLE and len(peer.unhandled_messages) > 0: if peer.alive: @@ -1693,10 +1745,23 @@ class LXMRouter: self.active_propagation_links.append(link) def propagation_resource_advertised(self, resource): + if self.from_static_only: + remote_identity = resource.link.get_remote_identity() + if remote_identity == None: + RNS.log(f"Rejecting propagation resource from unidentified peer", RNS.LOG_DEBUG) + return False + else: + remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) + if not remote_hash in self.static_peers: + RNS.log(f"Rejecting propagation resource from {remote_str} not in static peers list", RNS.LOG_DEBUG) + return False + size = resource.get_data_size() limit = self.propagation_per_transfer_limit*1000 if limit != None and size > limit: - RNS.log("Rejecting "+RNS.prettysize(size)+" incoming LXMF propagation resource, since it exceeds the limit of "+RNS.prettysize(limit), RNS.LOG_DEBUG) + RNS.log(f"Rejecting {RNS.prettysize(size)} incoming propagation resource, since it exceeds the limit of {RNS.prettysize(limit)}", RNS.LOG_DEBUG) return False else: return True @@ -1723,6 +1788,14 @@ class LXMRouter: if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY else: + if self.from_static_only: + remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) + if not remote_hash in self.static_peers: + RNS.log(f"Rejecting propagation request from {remote_str} not in static peers list", RNS.LOG_DEBUG) + return LXMPeer.ERROR_NO_ACCESS + try: transient_ids = data wanted_ids = [] @@ -1745,7 +1818,6 @@ class LXMRouter: return None def propagation_resource_concluded(self, resource): - RNS.log("Transfer concluded for incoming propagation resource "+str(resource), RNS.LOG_DEBUG) if resource.status == RNS.Resource.COMPLETE: # TODO: The peer this was received from should # have the transient id added to its list of @@ -1757,22 +1829,29 @@ class LXMRouter: # This is a series of propagation messages from a peer or originator remote_timebase = data[0] remote_hash = None + remote_str = "unknown peer" remote_identity = resource.link.get_remote_identity() if remote_identity != None: remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) if not remote_hash in self.peers: if self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: self.peer(remote_hash, remote_timebase) + else: + remote_str = f"peer {remote_str}" messages = data[1] + RNS.log(f"Received {len(messages)} messages from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) if remote_hash != None and remote_hash in self.peers: peer = self.peers[remote_hash] + peer.incoming += 1 + peer.rx_bytes += len(lxmf_data) self.lxmf_propagation(lxmf_data, from_peer=peer) if peer != None: @@ -1837,7 +1916,7 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) + RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_EXTREME) self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] self.enqueue_peer_distribution(transient_id, from_peer) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 38e71b1..0c87a73 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -140,6 +140,24 @@ def apply_config(): else: active_configuration["prioritised_lxmf_destinations"] = [] + if "propagation" in lxmd_config and "static_peers" in lxmd_config["propagation"]: + static_peers = lxmd_config["propagation"].as_list("static_peers") + active_configuration["static_peers"] = [] + for static_peer in static_peers: + active_configuration["static_peers"].append(bytes.fromhex(static_peer)) + else: + active_configuration["static_peers"] = [] + + if "propagation" in lxmd_config and "max_peers" in lxmd_config["propagation"]: + active_configuration["max_peers"] = lxmd_config["propagation"].as_int("max_peers") + else: + active_configuration["max_peers"] = None + + if "propagation" in lxmd_config and "from_static_only" in lxmd_config["propagation"]: + active_configuration["from_static_only"] = lxmd_config["propagation"].as_bool("from_static_only") + else: + active_configuration["from_static_only"] = False + # Load various settings if "logging" in lxmd_config and "loglevel" in lxmd_config["logging"]: targetloglevel = lxmd_config["logging"].as_int("loglevel") @@ -305,7 +323,10 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo autopeer_maxdepth = active_configuration["autopeer_maxdepth"], propagation_limit = active_configuration["propagation_transfer_max_accepted_size"], delivery_limit = active_configuration["delivery_transfer_max_accepted_size"], - ) + max_peers = active_configuration["max_peers"], + static_peers = active_configuration["static_peers"], + from_static_only = active_configuration["from_static_only"]) + message_router.register_delivery_callback(lxmf_delivery) for destination_hash in active_configuration["ignored_lxmf_destinations"]: @@ -362,13 +383,13 @@ def jobs(): try: if "peer_announce_interval" in active_configuration and active_configuration["peer_announce_interval"] != None: if time.time() > last_peer_announce + active_configuration["peer_announce_interval"]: - RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_EXTREME) + RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_VERBOSE) message_router.announce(lxmf_destination.hash) last_peer_announce = time.time() if "node_announce_interval" in active_configuration and active_configuration["node_announce_interval"] != None: if time.time() > last_node_announce + active_configuration["node_announce_interval"]: - RNS.log("Sending announce for LXMF Propagation Node", RNS.LOG_EXTREME) + RNS.log("Sending announce for LXMF Propagation Node", RNS.LOG_VERBOSE) message_router.announce_propagation_node() last_node_announce = time.time() @@ -381,7 +402,7 @@ def deferred_start_jobs(): global active_configuration, last_peer_announce, last_node_announce global message_router, lxmf_destination time.sleep(DEFFERED_JOBS_DELAY) - RNS.log("Running deferred start jobs") + RNS.log("Running deferred start jobs", RNS.LOG_DEBUG) if active_configuration["peer_announce_at_start"]: RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_EXTREME) message_router.announce(lxmf_destination.hash) From 68257a441ff1029054378185b09f4b61020e9d3e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 22 Jan 2025 09:44:03 +0100 Subject: [PATCH 09/54] Set transfer limit on reverse auto-peer --- LXMF/LXMRouter.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index bfe863d..5465356 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1839,7 +1839,12 @@ class LXMRouter: if not remote_hash in self.peers: if self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: - self.peer(remote_hash, remote_timebase) + # TODO: Query cache for an announce and get propagation + # transfer limit from that. For now, initialise it to a + # sane default value, and wait for an announce to arrive + # that will update the peering config to the actual limit. + propagation_transfer_limit = LXMRouter.PROPAGATION_LIMIT//4 + self.peer(remote_hash, remote_timebase, propagation_transfer_limit) else: remote_str = f"peer {remote_str}" From 61b1ecce276631a4ec2c1165c33b5195e46e946d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 22 Jan 2025 10:10:57 +0100 Subject: [PATCH 10/54] Updated readme --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index faced95..ed7e4f0 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,7 @@ User-facing clients built on LXMF include: Community-provided tools and utilities for LXMF include: +- [LXMFy](https://lxmfy.quad4.io/) - [LXMF-Bot](https://github.com/randogoth/lxmf-bot) - [LXMF Messageboard](https://github.com/chengtripp/lxmf_messageboard) - [LXMEvent](https://github.com/faragher/LXMEvent) From 2c71cea7a0d2fc0a3ab5bbd26883befb5a0dd9fc Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 14:13:08 +0100 Subject: [PATCH 11/54] Added local node stats request handler --- LXMF/LXMRouter.py | 134 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 131 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 5465356..22ef3ac 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -64,6 +64,8 @@ class LXMRouter: PR_ALL_MESSAGES = 0x00 + STATS_GET_PATH = "/pn/get/stats" + ### Developer-facing API ############################## ####################################################### @@ -92,6 +94,7 @@ class LXMRouter: self.processing_count = 0 self.propagation_node = False + self.propagation_node_start_time = None if storagepath == None: raise ValueError("LXMF cannot be initialised without a storage path") @@ -135,6 +138,11 @@ class LXMRouter: self.identity = identity self.propagation_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation") + self.control_destination = None + self.client_propagation_messages_received = 0 + self.client_propagation_messages_served = 0 + self.unpeered_propagation_incoming = 0 + self.unpeered_propagation_rx_bytes = 0 if autopeer != None: self.autopeer = autopeer @@ -541,13 +549,35 @@ class LXMRouter: RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + try: + if os.path.isfile(self.storagepath+"/node_stats"): + node_stats_file = open(self.storagepath+"/node_stats", "rb") + data = node_stats_file.read() + node_stats_file.close() + node_stats = msgpack.unpackb(data) + + if not type(node_stats) == dict: + RNS.log("Invalid data format for loaded local node stats, node stats will be reset", RNS.LOG_ERROR) + else: + self.client_propagation_messages_received = node_stats["client_propagation_messages_received"] + self.client_propagation_messages_served = node_stats["client_propagation_messages_served"] + self.unpeered_propagation_incoming = node_stats["unpeered_propagation_incoming"] + self.unpeered_propagation_rx_bytes = node_stats["unpeered_propagation_rx_bytes"] + + except Exception as e: + RNS.log("Could not load local node stats. The contained exception was: "+str(e), RNS.LOG_ERROR) + self.propagation_node = True + self.propagation_node_start_time = time.time() self.propagation_destination.set_link_established_callback(self.propagation_link_established) self.propagation_destination.set_packet_callback(self.propagation_packet) self.propagation_destination.register_request_handler(LXMPeer.OFFER_REQUEST_PATH, self.offer_request, allow = RNS.Destination.ALLOW_ALL) self.propagation_destination.register_request_handler(LXMPeer.MESSAGE_GET_PATH, self.message_get_request, allow = RNS.Destination.ALLOW_ALL) + self.control_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + self.control_destination.register_request_handler(LXMRouter.STATS_GET_PATH, self.stats_get_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=[self.identity.hash]) + if self.message_storage_limit != None: limit_str = ", limit is "+RNS.prettysize(self.message_storage_limit) else: @@ -650,6 +680,76 @@ class LXMRouter: return False + ### Propagation Node Control ########################## + ####################################################### + + def compile_stats(self): + if not self.propagation_node: + return None + else: + peer_stats = {} + for peer_id in self.peers.copy(): + peer = self.peers[peer_id] + peer_stats[peer_id] = { + "type": "static" if peer_id in self.static_peers else "discovered", + "state": peer.state, + "alive": peer.alive, + "last_heard": int(peer.last_heard), + "next_sync_attempt": peer.next_sync_attempt, + "last_sync_attempt": peer.last_sync_attempt, + "sync_backoff": peer.sync_backoff, + "peering_timebase": peer.peering_timebase, + "ler": int(peer.link_establishment_rate), + "str": int(peer.sync_transfer_rate), + "transfer_limit": peer.propagation_transfer_limit, + "network_distance": RNS.Transport.hops_to(peer_id), + "rx_bytes": peer.rx_bytes, + "tx_bytes": peer.tx_bytes, + "messages": { + "offered": peer.offered, + "outgoing": peer.outgoing, + "incoming": peer.incoming, + }, + } + + node_stats = { + "identity_hash": self.identity.hash, + "destination_hash": self.propagation_destination.hash, + "uptime": time.time()-self.propagation_node_start_time, + "delivery_limit": self.delivery_per_transfer_limit, + "propagation_limit": self.propagation_per_transfer_limit, + "autopeer_maxdepth": self.autopeer_maxdepth, + "from_static_only": self.from_static_only, + "messagestore": { + "count": len(self.propagation_entries), + "bytes": self.message_storage_size(), + "limit": self.message_storage_limit, + }, + "clients" : { + "client_propagation_messages_received": self.client_propagation_messages_received, + "client_propagation_messages_served": self.client_propagation_messages_served, + }, + "unpeered_propagation_incoming": self.unpeered_propagation_incoming, + "unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes, + "static_peers": len(self.static_peers), + "discovered_peers": len(self.peers)-len(self.static_peers), + "total_peers": len(self.peers), + "max_peers": self.max_peers, + "peers": peer_stats, + } + + return node_stats + + def stats_get_request(self, path, data, request_id, remote_identity, requested_at): + RNS.log("Stats request", RNS.LOG_DEBUG) # TODO: Remove debug + if remote_identity == None: + return LXMPeer.ERROR_NO_IDENTITY + elif remote_identity.hash != self.identity.hash: + return LXMPeer.ERROR_NO_ACCESS + else: + return self.compile_stats() + + ### Utility & Maintenance ############################# ####################################################### @@ -970,7 +1070,7 @@ class LXMRouter: try: if len(self.locally_delivered_transient_ids) > 0: if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + os.makedirs(self.storagepath) with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) @@ -982,7 +1082,7 @@ class LXMRouter: try: if len(self.locally_processed_transient_ids) > 0: if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + os.makedirs(self.storagepath) with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) @@ -990,6 +1090,24 @@ class LXMRouter: except Exception as e: RNS.log("Could not save locally processed transient ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + def save_node_stats(self): + try: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) + + with open(self.storagepath+"/node_stats", "wb") as stats_file: + node_stats = { + "client_propagation_messages_received": self.client_propagation_messages_received, + "client_propagation_messages_served": self.client_propagation_messages_served, + "unpeered_propagation_incoming": self.unpeered_propagation_incoming, + "unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes, + } + stats_file.write(msgpack.packb(node_stats)) + + except Exception as e: + RNS.log("Could not save local node stats to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + + def clean_outbound_stamp_costs(self): try: expired = [] @@ -1106,6 +1224,7 @@ class LXMRouter: self.propagation_destination.set_packet_callback(None) self.propagation_destination.deregister_request_handler(LXMPeer.OFFER_REQUEST_PATH) self.propagation_destination.deregister_request_handler(LXMPeer.MESSAGE_GET_PATH) + self.propagation_destination.deregister_request_handler(LXMRouter.STATS_GET_PATH) for link in self.active_propagation_links: try: if link.status == RNS.Link.ACTIVE: @@ -1135,6 +1254,7 @@ class LXMRouter: self.save_locally_delivered_transient_ids() self.save_locally_processed_transient_ids() + self.save_node_stats() def sigint_handler(self, signal, frame): if not self.exit_handler_running: @@ -1263,6 +1383,7 @@ class LXMRouter: except Exception as e: RNS.log("Error while processing message download request from "+RNS.prettyhexrep(remote_destination.hash)+". The contained exception was: "+str(e), RNS.LOG_ERROR) + self.client_propagation_messages_served += len(response_messages) return response_messages @@ -1777,6 +1898,7 @@ class LXMRouter: messages = data[1] for lxmf_data in messages: self.lxmf_propagation(lxmf_data) + self.client_propagation_messages_received += 1 packet.prove() @@ -1849,7 +1971,7 @@ class LXMRouter: remote_str = f"peer {remote_str}" messages = data[1] - RNS.log(f"Received {len(messages)} messages from {remote_str}", RNS.LOG_VERBOSE) + RNS.log(f"Received {len(messages)} message{"" if len(messages) == 1 else "s"} from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) @@ -1857,6 +1979,12 @@ class LXMRouter: peer = self.peers[remote_hash] peer.incoming += 1 peer.rx_bytes += len(lxmf_data) + else: + if remote_identity != None: + self.unpeered_propagation_incoming += 1 + self.unpeered_propagation_rx_bytes += len(lxmf_data) + else: + self.client_propagation_messages_received += 1 self.lxmf_propagation(lxmf_data, from_peer=peer) if peer != None: From f683e038910e45cf9be83b4dc01465ce8c8877ff Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 14:15:12 +0100 Subject: [PATCH 12/54] Added lxmd status getter --- LXMF/Utilities/lxmd.py | 96 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 9 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 0c87a73..1bc1d12 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -35,6 +35,7 @@ import time import os from LXMF._version import __version__ +from LXMF import APP_NAME from RNS.vendor.configobj import ConfigObj @@ -415,6 +416,75 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() +def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5): + global configpath, identitypath, storagedir, lxmdir + global lxmd_config, active_configuration, targetloglevel + targetlogdest = RNS.LOG_STDOUT + + if configdir == None: + if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): + configdir = "/etc/lxmd" + elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): + configdir = RNS.Reticulum.userdir+"/.config/lxmd" + else: + configdir = RNS.Reticulum.userdir+"/.lxmd" + + configpath = configdir+"/config" + identitypath = configdir+"/identity" + identity = None + + if not os.path.isdir(configdir): + RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) + exit(201) + if not os.path.isfile(identitypath): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identitypath) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) + exit(4) + + if targetloglevel == None: + targetloglevel = 3 + if verbosity != 0 or quietness != 0: + targetloglevel = targetloglevel+verbosity-quietness + + reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) + control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) + exit(200) + else: + time.sleep(0.1) + + if not RNS.Transport.has_path(control_destination.hash): + RNS.Transport.request_path(control_destination.hash) + while not RNS.Transport.has_path(control_destination.hash): + check_timeout() + + link = RNS.Link(control_destination) + while not link.status == RNS.Link.ACTIVE: + check_timeout() + + link.identify(identity) + request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) + while not request_receipt.get_status() == RNS.RequestReceipt.READY: + check_timeout() + + response = request_receipt.get_response() + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: + RNS.log("Remote received no identity") + exit(203) + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_ACCESS: + RNS.log("Access denied") + exit(204) + else: + # TODO: Output stats + def main(): try: parser = argparse.ArgumentParser(description="Lightweight Extensible Messaging Daemon") @@ -425,6 +495,8 @@ def main(): parser.add_argument("-v", "--verbose", action="count", default=0) parser.add_argument("-q", "--quiet", action="count", default=0) parser.add_argument("-s", "--service", action="store_true", default=False, help="lxmd is running as a service and should log to file") + parser.add_argument("--status", action="store_true", default=False, help="display node status") + parser.add_argument("--timeout", action="store", default=5, help="timeout in seconds for query operations", type=float) parser.add_argument("--exampleconfig", action="store_true", default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version="lxmd {version}".format(version=__version__)) @@ -434,15 +506,21 @@ def main(): print(__default_lxmd_config__) exit() - program_setup( - configdir = args.config, - rnsconfigdir=args.rnsconfig, - run_pn=args.propagation_node, - on_inbound=args.on_inbound, - verbosity=args.verbose, - quietness=args.quiet, - service=args.service - ) + if args.status: + get_status(configdir = args.config, + rnsconfigdir=args.rnsconfig, + verbosity=args.verbose, + quietness=args.quiet, + timeout=args.timeout) + exit() + + program_setup(configdir = args.config, + rnsconfigdir=args.rnsconfig, + run_pn=args.propagation_node, + on_inbound=args.on_inbound, + verbosity=args.verbose, + quietness=args.quiet, + service=args.service) except KeyboardInterrupt: print("") From 460645cea2abc0a72b8f5d6444184286c4c676e8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 14:15:31 +0100 Subject: [PATCH 13/54] Added lxmd status getter --- LXMF/Utilities/lxmd.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 1bc1d12..d8b24d3 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -484,6 +484,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = exit(204) else: # TODO: Output stats + pass def main(): try: From e3be7e0cfdb529dece6e51165b67f697c70724b3 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:27:01 +0100 Subject: [PATCH 14/54] Persist last sync attempt --- LXMF/LXMPeer.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index ec0cfe2..61602c3 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -91,6 +91,11 @@ class LXMPeer: peer.tx_bytes = dictionary["tx_bytes"] else: peer.tx_bytes = 0 + + if "last_sync_attempt" in dictionary: + peer.last_sync_attempt = dictionary["last_sync_attempt"] + else: + peer.last_sync_attempt = 0 hm_count = 0 for transient_id in dictionary["handled_ids"]: @@ -121,6 +126,7 @@ class LXMPeer: dictionary["link_establishment_rate"] = self.link_establishment_rate dictionary["sync_transfer_rate"] = self.sync_transfer_rate dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit + dictionary["last_sync_attempt"] = self.last_sync_attempt dictionary["offered"] = self.offered dictionary["outgoing"] = self.outgoing dictionary["incoming"] = self.incoming From a198e96064fa47af3f8e1dc8db225fbb39f77f80 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:27:23 +0100 Subject: [PATCH 15/54] Include unhandled message count in stats --- LXMF/LXMRouter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 22ef3ac..8e824e4 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -709,6 +709,7 @@ class LXMRouter: "offered": peer.offered, "outgoing": peer.outgoing, "incoming": peer.incoming, + "unhandled": peer.unhandled_message_count }, } From 35dd70c59e09a7fa093f7e24b60065317e7507c9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:27:48 +0100 Subject: [PATCH 16/54] Format status and peers output --- LXMF/Utilities/lxmd.py | 140 +++++++++++++++++++++++++++++++++-------- 1 file changed, 115 insertions(+), 25 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index d8b24d3..dd070fc 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -416,34 +416,45 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() -def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5): +def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): global configpath, identitypath, storagedir, lxmdir global lxmd_config, active_configuration, targetloglevel targetlogdest = RNS.LOG_STDOUT - if configdir == None: - if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): - configdir = "/etc/lxmd" - elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): - configdir = RNS.Reticulum.userdir+"/.config/lxmd" + if identity_path == None: + if configdir == None: + if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): + configdir = "/etc/lxmd" + elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): + configdir = RNS.Reticulum.userdir+"/.config/lxmd" + else: + configdir = RNS.Reticulum.userdir+"/.lxmd" + + configpath = configdir+"/config" + identitypath = configdir+"/identity" + identity = None + + if not os.path.isdir(configdir): + RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) + exit(201) + if not os.path.isfile(identitypath): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) else: - configdir = RNS.Reticulum.userdir+"/.lxmd" + identity = RNS.Identity.from_file(identitypath) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) + exit(4) - configpath = configdir+"/config" - identitypath = configdir+"/identity" - identity = None - - if not os.path.isdir(configdir): - RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) - exit(201) - if not os.path.isfile(identitypath): - RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) - exit(202) else: - identity = RNS.Identity.from_file(identitypath) - if identity == None: - RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) - exit(4) + if not os.path.isfile(identity_path): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identity_path) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identity_path, RNS.LOG_ERROR) + exit(4) if targetloglevel == None: targetloglevel = 3 @@ -483,8 +494,82 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = RNS.log("Access denied") exit(204) else: - # TODO: Output stats - pass + s = response + ms_util = f"{round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2)}%" + if s["from_static_only"]: + who_str = "static peers only" + else: + who_str = "all nodes" + + available_peers = 0 + unreachable_peers = 0 + peered_incoming = 0 + peered_outgoing = 0 + peered_rx_bytes = 0 + peered_tx_bytes = 0 + for peer_id in s["peers"]: + p = s["peers"][peer_id] + pm = p["messages"] + peered_incoming += pm["incoming"] + peered_outgoing += pm["outgoing"] + peered_rx_bytes += p["rx_bytes"] + peered_tx_bytes += p["tx_bytes"] + if p["alive"]: + available_peers += 1 + else: + unreachable_peers += 1 + + total_incoming = peered_incoming+s["unpeered_propagation_incoming"]+s["clients"]["client_propagation_messages_received"] + total_rx_bytes = peered_rx_bytes+s["unpeered_propagation_rx_bytes"] + df = round(peered_outgoing/total_incoming, 2) + + print(f"\nLXMF Propagation Node running on {RNS.prettyhexrep(s["destination_hash"])}, uptime is {RNS.prettytime(s["uptime"])}") + + if show_status: + print(f"Messagestore contains {s["messagestore"]["count"]} messages, {RNS.prettysize(s["messagestore"]["bytes"])} of {RNS.prettysize(s["messagestore"]["limit"])} ({ms_util} utilised)") + print(f"Accepting propagated messages from {who_str}, {RNS.prettysize(s["propagation_limit"]*1000)} per-transfer limit") + print(f"") + print(f"Peers : {s["total_peers"]} total (peer limit is {s["max_peers"]})") + print(f" {s["discovered_peers"]} discovered, {s["static_peers"]} static") + print(f" {available_peers} available, {unreachable_peers} unreachable") + print(f"") + print(f"Traffic : {s["unpeered_propagation_incoming"]} messages received from unpeered nodes ({RNS.prettysize(s["unpeered_propagation_rx_bytes"])})") + print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") + print(f" {total_incoming} messages received from peered nodes ({RNS.prettysize(total_rx_bytes)})") + print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") + print(f" {s["clients"]["client_propagation_messages_received"]} messages received from clients") + print(f" {s["clients"]["client_propagation_messages_served"]} messages served to clients") + print(f" Distribution factor is {df}") + print(f"") + + if show_peers: + for peer_id in s["peers"]: + ind = " " + p = s["peers"][peer_id] + if p["type"] == "static": + t = "Static peer " + elif p["type"] == "discovered": + t = "Discovered peer " + else: + t = "Unknown peer " + a = "Available" if p["alive"] == True else "Unreachable" + h = max(time.time()-p["last_heard"], 0) + hops = p["network_distance"] + hs = f"{hops} hop away" if hops == 1 else f"{hops} hops away" + pm = p["messages"] + if p["last_sync_attempt"] != 0: + ls = f"last synced {RNS.prettytime(max(time.time()-p["last_sync_attempt"], 0))} ago" + else: + ls = "never synced" + + print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") + print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") + print(f"{ind*2}Speeds : {RNS.prettyspeed(p["str"])} STR, {RNS.prettyspeed(p["ler"])} LER, {RNS.prettysize(p["transfer_limit"]*1000)} transfer limit") + print(f"{ind*2}Messages : {pm["offered"]} offered, {pm["outgoing"]} outgoing, {pm["incoming"]} incoming") + print(f"{ind*2}Traffic : {RNS.prettysize(p["rx_bytes"])} received, {RNS.prettysize(p["tx_bytes"])} sent") + print(f"{ind*2}Sync state : {pm["unhandled"]} unhandled message{"" if pm["unhandled"] == 1 else "s"}, {ls}") + print("") + def main(): try: @@ -497,7 +582,9 @@ def main(): parser.add_argument("-q", "--quiet", action="count", default=0) parser.add_argument("-s", "--service", action="store_true", default=False, help="lxmd is running as a service and should log to file") parser.add_argument("--status", action="store_true", default=False, help="display node status") + parser.add_argument("--peers", action="store_true", default=False, help="display peered nodes") parser.add_argument("--timeout", action="store", default=5, help="timeout in seconds for query operations", type=float) + parser.add_argument("--identity", action="store", default=None, help="path to identity used for query request", type=str) parser.add_argument("--exampleconfig", action="store_true", default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version="lxmd {version}".format(version=__version__)) @@ -507,12 +594,15 @@ def main(): print(__default_lxmd_config__) exit() - if args.status: + if args.status or args.peers: get_status(configdir = args.config, rnsconfigdir=args.rnsconfig, verbosity=args.verbose, quietness=args.quiet, - timeout=args.timeout) + timeout=args.timeout, + show_status=args.status, + show_peers=args.peers, + identity_path=args.identity) exit() program_setup(configdir = args.config, From a87458d25f794d84c68f0c4212fedc91bcd7e7fb Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:28:11 +0100 Subject: [PATCH 17/54] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 43c4ab0..22049ab 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.1" +__version__ = "0.6.2" From fe59b265c50ce87ca33e183b2b154b3eaaf163c0 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:54:12 +0100 Subject: [PATCH 18/54] Fixed fstrings not working on Python < 3.12 --- LXMF/LXMRouter.py | 3 ++- LXMF/Utilities/lxmd.py | 40 ++++++++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 8e824e4..0358428 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1972,7 +1972,8 @@ class LXMRouter: remote_str = f"peer {remote_str}" messages = data[1] - RNS.log(f"Received {len(messages)} message{"" if len(messages) == 1 else "s"} from {remote_str}", RNS.LOG_VERBOSE) + ms = "" if len(messages) == 1 else "s" + RNS.log(f"Received {len(messages)} message{ms} from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index dd070fc..54e0b54 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -495,7 +495,8 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = exit(204) else: s = response - ms_util = f"{round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2)}%" + mutil = round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2) + ms_util = f"{mutil}%" if s["from_static_only"]: who_str = "static peers only" else: @@ -523,22 +524,28 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = total_rx_bytes = peered_rx_bytes+s["unpeered_propagation_rx_bytes"] df = round(peered_outgoing/total_incoming, 2) - print(f"\nLXMF Propagation Node running on {RNS.prettyhexrep(s["destination_hash"])}, uptime is {RNS.prettytime(s["uptime"])}") + dhs = RNS.prettyhexrep(s["destination_hash"]); uts = RNS.prettytime(s["uptime"]) + print(f"\nLXMF Propagation Node running on {dhs}, uptime is {uts}") if show_status: - print(f"Messagestore contains {s["messagestore"]["count"]} messages, {RNS.prettysize(s["messagestore"]["bytes"])} of {RNS.prettysize(s["messagestore"]["limit"])} ({ms_util} utilised)") - print(f"Accepting propagated messages from {who_str}, {RNS.prettysize(s["propagation_limit"]*1000)} per-transfer limit") + msb = RNS.prettysize(s["messagestore"]["bytes"]); msl = RNS.prettysize(s["messagestore"]["limit"]) + ptl = RNS.prettysize(s["propagation_limit"]*1000); uprx = RNS.prettysize(s["unpeered_propagation_rx_bytes"]) + mscnt = s["messagestore"]["count"]; stp = s["total_peers"]; smp = s["max_peers"]; sdp = s["discovered_peers"] + ssp = s["static_peers"]; cprr = s["clients"]["client_propagation_messages_received"] + cprs = s["clients"]["client_propagation_messages_served"]; upi = s["unpeered_propagation_incoming"] + print(f"Messagestore contains {mscnt} messages, {msb} ({ms_util} utilised of {msl})") + print(f"Accepting propagated messages from {who_str}, {ptl} per-transfer limit") print(f"") - print(f"Peers : {s["total_peers"]} total (peer limit is {s["max_peers"]})") - print(f" {s["discovered_peers"]} discovered, {s["static_peers"]} static") + print(f"Peers : {stp} total (peer limit is {smp})") + print(f" {sdp} discovered, {ssp} static") print(f" {available_peers} available, {unreachable_peers} unreachable") print(f"") - print(f"Traffic : {s["unpeered_propagation_incoming"]} messages received from unpeered nodes ({RNS.prettysize(s["unpeered_propagation_rx_bytes"])})") + print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") print(f" {total_incoming} messages received from peered nodes ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") - print(f" {s["clients"]["client_propagation_messages_received"]} messages received from clients") - print(f" {s["clients"]["client_propagation_messages_served"]} messages served to clients") + print(f" {cprr} messages received from clients") + print(f" {cprs} messages served to clients") print(f" Distribution factor is {df}") print(f"") @@ -558,16 +565,21 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = hs = f"{hops} hop away" if hops == 1 else f"{hops} hops away" pm = p["messages"] if p["last_sync_attempt"] != 0: - ls = f"last synced {RNS.prettytime(max(time.time()-p["last_sync_attempt"], 0))} ago" + lsa = p["last_sync_attempt"] + ls = f"last synced {RNS.prettytime(max(time.time()-lsa, 0))} ago" else: ls = "never synced" + sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]); stl = RNS.prettysize(p["transfer_limit"]*1000) + srxb = RNS.prettysize(p["rx_bytes"]); stxb = RNS.prettysize(p["tx_bytes"]); pmo = pm["offered"]; pmout = pm["outgoing"] + pmi = pm["incoming"]; pmuh = pm["unhandled"] print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") - print(f"{ind*2}Speeds : {RNS.prettyspeed(p["str"])} STR, {RNS.prettyspeed(p["ler"])} LER, {RNS.prettysize(p["transfer_limit"]*1000)} transfer limit") - print(f"{ind*2}Messages : {pm["offered"]} offered, {pm["outgoing"]} outgoing, {pm["incoming"]} incoming") - print(f"{ind*2}Traffic : {RNS.prettysize(p["rx_bytes"])} received, {RNS.prettysize(p["tx_bytes"])} sent") - print(f"{ind*2}Sync state : {pm["unhandled"]} unhandled message{"" if pm["unhandled"] == 1 else "s"}, {ls}") + print(f"{ind*2}Speeds : {sstr} STR, {sler} LER, {stl} transfer limit") + print(f"{ind*2}Messages : {pmo} offered, {pmout} outgoing, {pmi} incoming") + print(f"{ind*2}Traffic : {srxb} received, {stxb} sent") + ms = "" if pm["unhandled"] == 1 else "s" + print(f"{ind*2}Sync state : {pmuh} unhandled message{ms}, {ls}") print("") From 70186cf8d9fc780eba6ce39494964c31b2519a57 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:07:20 +0100 Subject: [PATCH 19/54] Fixed typo --- LXMF/Utilities/lxmd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 54e0b54..bb29661 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -542,7 +542,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"") print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") - print(f" {total_incoming} messages received from peered nodes ({RNS.prettysize(total_rx_bytes)})") + print(f" {total_incoming} messages received from all nodes ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") print(f" {cprr} messages received from clients") print(f" {cprs} messages served to clients") From a3e3868f9258ed1f0b930e85a8993234440d448d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:09:40 +0100 Subject: [PATCH 20/54] Changed formatting --- LXMF/Utilities/lxmd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index bb29661..415ecb6 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -542,7 +542,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"") print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") - print(f" {total_incoming} messages received from all nodes ({RNS.prettysize(total_rx_bytes)})") + print(f" {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") print(f" {cprr} messages received from clients") print(f" {cprs} messages served to clients") From fb4bf9b0b9307cb872e97619a685e8adf44a467e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:36:30 +0100 Subject: [PATCH 21/54] Cleanup --- LXMF/LXMRouter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 0358428..bd63e17 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -742,7 +742,6 @@ class LXMRouter: return node_stats def stats_get_request(self, path, data, request_id, remote_identity, requested_at): - RNS.log("Stats request", RNS.LOG_DEBUG) # TODO: Remove debug if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY elif remote_identity.hash != self.identity.hash: From cdea838a6c38f0b9a4f7d983b48361565bbc835f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:43:24 +0100 Subject: [PATCH 22/54] Updated status output --- LXMF/Utilities/lxmd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 415ecb6..b52d5ae 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -544,8 +544,8 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") print(f" {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") - print(f" {cprr} messages received from clients") - print(f" {cprs} messages served to clients") + print(f" {cprr} propagation messages received diretly from clients") + print(f" {cprs} propagation messages served to clients") print(f" Distribution factor is {df}") print(f"") From aa57b16cf562d8f9409e877f959d9751f8c5b300 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 00:09:36 +0100 Subject: [PATCH 23/54] Fixed #23 --- LXMF/LXMRouter.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index bd63e17..b0a4cc8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -777,13 +777,16 @@ class LXMRouter: self.clean_transient_id_caches() if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: - self.clean_message_store() + if self.propagation_node == True: + self.clean_message_store() if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: - self.flush_queues() + if self.propagation_node == True: + self.flush_queues() if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: - self.sync_peers() + if self.propagation_node == True: + self.sync_peers() # def syncstats(self): # for peer_id in self.peers: @@ -986,12 +989,12 @@ class LXMRouter: lxm_size = self.propagation_entries[transient_id][3] return lxm_size - def clean_message_store(self): + RNS.log("Cleaning message store", RNS.LOG_VERBOSE) # Check and remove expired messages now = time.time() removed_entries = {} - for transient_id in self.propagation_entries: + for transient_id in self.propagation_entries.copy(): entry = self.propagation_entries[transient_id] filepath = entry[1] components = filepath.split("_") @@ -999,7 +1002,7 @@ class LXMRouter: if len(components) == 2 and float(components[1]) > 0 and len(os.path.split(components[0])[1]) == (RNS.Identity.HASHLENGTH//8)*2: timestamp = float(components[1]) if now > timestamp+LXMRouter.MESSAGE_EXPIRY: - RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_DEBUG) + RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_EXTREME) removed_entries[transient_id] = filepath else: RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to invalid file path", RNS.LOG_WARNING) @@ -1017,7 +1020,7 @@ class LXMRouter: RNS.log("Could not remove "+RNS.prettyhexrep(transient_id)+" from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) if removed_count > 0: - RNS.log("Cleaned "+str(removed_count)+" entries from the message store", RNS.LOG_DEBUG) + RNS.log("Cleaned "+str(removed_count)+" entries from the message store", RNS.LOG_VERBOSE) # Check size of message store and cull if needed try: @@ -1029,7 +1032,7 @@ class LXMRouter: bytes_cleaned = 0 weighted_entries = [] - for transient_id in self.propagation_entries: + for transient_id in self.propagation_entries.copy(): weighted_entries.append([ self.propagation_entries[transient_id], self.get_weight(transient_id), From a8cc5f41cf92a7e35b80bc2f6b55292ad4cf170d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 00:21:37 +0100 Subject: [PATCH 24/54] Fixed typo --- LXMF/Utilities/lxmd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index b52d5ae..2f21108 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -540,11 +540,11 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f" {sdp} discovered, {ssp} static") print(f" {available_peers} available, {unreachable_peers} unreachable") print(f"") - print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") + print(f"Traffic : {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") - print(f" {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") + print(f" {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") - print(f" {cprr} propagation messages received diretly from clients") + print(f" {cprr} propagation messages received directly from clients") print(f" {cprs} propagation messages served to clients") print(f" Distribution factor is {df}") print(f"") From 6d2eb4f97375dc2c637dd531d94a11738d2cb7ed Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 00:26:47 +0100 Subject: [PATCH 25/54] Updated default config --- LXMF/Utilities/lxmd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 2f21108..7f54835 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -127,7 +127,7 @@ def apply_config(): if active_configuration["message_storage_limit"] < 0.005: active_configuration["message_storage_limit"] = 0.005 else: - active_configuration["message_storage_limit"] = 2000 + active_configuration["message_storage_limit"] = 500 if "propagation" in lxmd_config and "propagation_transfer_max_accepted_size" in lxmd_config["propagation"]: active_configuration["propagation_transfer_max_accepted_size"] = lxmd_config["propagation"].as_float("propagation_transfer_max_accepted_size") @@ -679,9 +679,9 @@ propagation_transfer_max_accepted_size = 256 # LXMF prioritises keeping messages that are # new and small. Large and old messages will # be removed first. This setting is optional -# and defaults to 2 gigabytes. +# and defaults to 500 megabytes. -# message_storage_limit = 2000 +# message_storage_limit = 500 # You can tell the LXMF message router to # prioritise storage for one or more From 962d9c90d1c468e95589b15ccaf2384a379dea35 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 13:50:56 +0100 Subject: [PATCH 26/54] Added wanted inbound peers to PN announce data --- LXMF/Handlers.py | 9 +++++++-- LXMF/LXMRouter.py | 10 ++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 22c6cd3..ea8960d 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,6 +45,11 @@ class LXMFPropagationAnnounceHandler: if pn_announce_data_is_valid(data): node_timebase = data[1] propagation_transfer_limit = None + if len(data) >= 4: + try: + wanted_inbound_peers = int(data[3]) + except: + wanted_inbound_peers = None if len(data) >= 3: try: propagation_transfer_limit = float(data[2]) @@ -52,12 +57,12 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit = None if destination_hash in self.lxmrouter.static_peers: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) else: if data[0] == True: if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) elif data[0] == False: self.lxmrouter.unpeer(destination_hash, node_timebase) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index b0a4cc8..9eccedc 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -283,6 +283,7 @@ class LXMRouter: node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes + self.get_wanted_inbound_peers(), # How many more inbound peers this node wants ] data = msgpack.packb(announce_data) @@ -888,6 +889,10 @@ class LXMRouter: self.save_outbound_stamp_costs() threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() + def get_wanted_inbound_peers(self): + # TODO: Implement + return None + def get_announce_app_data(self, destination_hash): if destination_hash in self.delivery_destinations: delivery_destination = self.delivery_destinations[destination_hash] @@ -1766,7 +1771,7 @@ class LXMRouter: ### Peer Sync & Propagation ########################### ####################################################### - def peer(self, destination_hash, timestamp, propagation_transfer_limit): + def peer(self, destination_hash, timestamp, propagation_transfer_limit, wanted_inbound_peers = None): if destination_hash in self.peers: peer = self.peers[destination_hash] if timestamp > peer.peering_timebase: @@ -1969,7 +1974,8 @@ class LXMRouter: # sane default value, and wait for an announce to arrive # that will update the peering config to the actual limit. propagation_transfer_limit = LXMRouter.PROPAGATION_LIMIT//4 - self.peer(remote_hash, remote_timebase, propagation_transfer_limit) + wanted_inbound_peers = None + self.peer(remote_hash, remote_timebase, propagation_transfer_limit, wanted_inbound_peers) else: remote_str = f"peer {remote_str}" From cec903a4dcc878f14f8cd8be6a9abc54868cbea6 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 14:05:12 +0100 Subject: [PATCH 27/54] Added status query API function --- LXMF/Handlers.py | 1 + LXMF/LXMPeer.py | 1 + LXMF/Utilities/lxmd.py | 60 ++++++++++++++++++++++++------------------ 3 files changed, 37 insertions(+), 25 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index ea8960d..26a5df6 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,6 +45,7 @@ class LXMFPropagationAnnounceHandler: if pn_announce_data_is_valid(data): node_timebase = data[1] propagation_transfer_limit = None + wanted_inbound_peers = None if len(data) >= 4: try: wanted_inbound_peers = int(data[3]) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 61602c3..5036528 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -20,6 +20,7 @@ class LXMPeer: ERROR_NO_IDENTITY = 0xf0 ERROR_NO_ACCESS = 0xf1 + ERROR_TIMEOUT = 0xfe # Maximum amount of time a peer can # be unreachable before it is removed diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 7f54835..a06d60c 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -416,6 +416,36 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() +def query_status(identity, timeout=5, exit_on_fail=False): + control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) + if exit_on_fail: + exit(200) + else: + return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT + else: + time.sleep(0.1) + + if not RNS.Transport.has_path(control_destination.hash): + RNS.Transport.request_path(control_destination.hash) + while not RNS.Transport.has_path(control_destination.hash): + check_timeout() + + link = RNS.Link(control_destination) + while not link.status == RNS.Link.ACTIVE: + check_timeout() + + link.identify(identity) + request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) + while not request_receipt.get_status() == RNS.RequestReceipt.READY: + check_timeout() + + return request_receipt.get_response() + def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): global configpath, identitypath, storagedir, lxmdir global lxmd_config, active_configuration, targetloglevel @@ -462,31 +492,8 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = targetloglevel = targetloglevel+verbosity-quietness reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) - control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + response = query_status(identity, timeout=timeout, exit_on_fail=True) - timeout = time.time()+timeout - def check_timeout(): - if time.time() > timeout: - RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) - exit(200) - else: - time.sleep(0.1) - - if not RNS.Transport.has_path(control_destination.hash): - RNS.Transport.request_path(control_destination.hash) - while not RNS.Transport.has_path(control_destination.hash): - check_timeout() - - link = RNS.Link(control_destination) - while not link.status == RNS.Link.ACTIVE: - check_timeout() - - link.identify(identity) - request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) - while not request_receipt.get_status() == RNS.RequestReceipt.READY: - check_timeout() - - response = request_receipt.get_response() if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: RNS.log("Remote received no identity") exit(203) @@ -550,6 +557,9 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"") if show_peers: + if not show_status: + print("") + for peer_id in s["peers"]: ind = " " p = s["peers"][peer_id] @@ -562,7 +572,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = a = "Available" if p["alive"] == True else "Unreachable" h = max(time.time()-p["last_heard"], 0) hops = p["network_distance"] - hs = f"{hops} hop away" if hops == 1 else f"{hops} hops away" + hs = "hops unknown" if hops == RNS.Transport.PATHFINDER_M else f"{hops} hop away" if hops == 1 else f"{hops} hops away" pm = p["messages"] if p["last_sync_attempt"] != 0: lsa = p["last_sync_attempt"] From 26a10cce8f8a572553084c69603ca6605f2672fd Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 26 Jan 2025 01:13:11 +0100 Subject: [PATCH 28/54] Status query return code --- LXMF/Utilities/lxmd.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index a06d60c..73d0eb0 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -422,8 +422,8 @@ def query_status(identity, timeout=5, exit_on_fail=False): timeout = time.time()+timeout def check_timeout(): if time.time() > timeout: - RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) if exit_on_fail: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) exit(200) else: return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT @@ -433,16 +433,22 @@ def query_status(identity, timeout=5, exit_on_fail=False): if not RNS.Transport.has_path(control_destination.hash): RNS.Transport.request_path(control_destination.hash) while not RNS.Transport.has_path(control_destination.hash): - check_timeout() + tc = check_timeout() + if tc: + return tc link = RNS.Link(control_destination) while not link.status == RNS.Link.ACTIVE: - check_timeout() + tc = check_timeout() + if tc: + return tc link.identify(identity) request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) while not request_receipt.get_status() == RNS.RequestReceipt.READY: - check_timeout() + tc = check_timeout() + if tc: + return tc return request_receipt.get_response() From e0163e100a5541ed9abf4c57bb38960739ca23ea Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 27 Jan 2025 10:26:11 +0100 Subject: [PATCH 29/54] Updated issue template --- .github/ISSUE_TEMPLATE/🐛-bug-report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/🐛-bug-report.md b/.github/ISSUE_TEMPLATE/🐛-bug-report.md index 77ad6c2..65b492e 100644 --- a/.github/ISSUE_TEMPLATE/🐛-bug-report.md +++ b/.github/ISSUE_TEMPLATE/🐛-bug-report.md @@ -12,7 +12,7 @@ Before creating a bug report on this issue tracker, you **must** read the [Contr - The issue tracker is used by developers of this project. **Do not use it to ask general questions, or for support requests**. - Ideas and feature requests can be made on the [Discussions](https://github.com/markqvist/Reticulum/discussions). **Only** feature requests accepted by maintainers and developers are tracked and included on the issue tracker. **Do not post feature requests here**. -- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), delete this section from your bug report. +- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), **delete this section only** (*"Read the Contribution Guidelines"*) from your bug report, **and fill in all the other sections**. **Describe the Bug** A clear and concise description of what the bug is. From 886ac69a8284e8ca3c3c0a4e2106f7160e8c7f62 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 27 Jan 2025 12:04:05 +0100 Subject: [PATCH 30/54] Tear down control link after use --- LXMF/Utilities/lxmd.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 73d0eb0..f9a2ef6 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -450,6 +450,7 @@ def query_status(identity, timeout=5, exit_on_fail=False): if tc: return tc + link.teardown() return request_receipt.get_response() def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): From e0e901291e20515d12abe105fef30010db7fb1f1 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 27 Jan 2025 12:04:16 +0100 Subject: [PATCH 31/54] Updated logging --- LXMF/LXMessage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 2342708..515ab11 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -380,7 +380,7 @@ class LXMessage: if self.desired_method == LXMessage.OPPORTUNISTIC: if self.__destination.type == RNS.Destination.SINGLE: if content_size > LXMessage.ENCRYPTED_PACKET_MAX_CONTENT: - RNS.log(f"Opportunistic delivery was requested for {self}, but content exceeds packet size limit. Falling back to link-based delivery.", RNS.LOG_DEBUG) + RNS.log(f"Opportunistic delivery was requested for {self}, but content of length {content_size} exceeds packet size limit. Falling back to link-based delivery.", RNS.LOG_DEBUG) self.desired_method = LXMessage.DIRECT # Set delivery parameters according to delivery method From f1d060a92ef9ea9b0a0f3402ff46fc9d91fddd5c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Jan 2025 01:26:36 +0100 Subject: [PATCH 32/54] Added peer rotation --- LXMF/LXMPeer.py | 4 +++ LXMF/LXMRouter.py | 68 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 5036528..e2f951a 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -469,6 +469,10 @@ class LXMPeer: return self._um_count + @property + def acceptance_rate(self): + return 0 if self.offered == 0 else (self.outgoing/self.offered) + def _update_counts(self): if not self._hm_counts_synced: hm = self.handled_messages; del hm diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 9eccedc..4bbd24c 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -41,6 +41,7 @@ class LXMRouter: AUTOPEER = True AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 + ROTATION_HEADROOM_PCT = 10 PROPAGATION_LIMIT = 256 DELIVERY_LIMIT = 1000 @@ -122,6 +123,7 @@ class LXMRouter: self.propagation_transfer_progress = 0.0 self.propagation_transfer_last_result = None self.propagation_transfer_max_messages = None + self.prioritise_rotating_unreachable_peers = False self.active_propagation_links = [] self.locally_delivered_transient_ids = {} self.locally_processed_transient_ids = {} @@ -783,17 +785,13 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: if self.propagation_node == True: + self.rotate_peers() self.flush_queues() if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: if self.propagation_node == True: self.sync_peers() - # def syncstats(self): - # for peer_id in self.peers: - # p = self.peers[peer_id] - # RNS.log(f"{RNS.prettyhexrep(peer_id)} O={p.offered} S={p.outgoing} I={p.incoming} TX={RNS.prettysize(p.tx_bytes)} RX={RNS.prettysize(p.rx_bytes)}") - def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual @@ -1805,6 +1803,66 @@ class LXMRouter: self.peers.pop(destination_hash) RNS.log("Broke peering with "+str(peer.destination)) + def rotate_peers(self): + try: + rotation_headroom = max(1, math.floor(self.max_peers*(LXMRouter.ROTATION_HEADROOM_PCT/100.0))) + required_drops = len(self.peers) - (self.max_peers - rotation_headroom) + if required_drops > 0 and len(self.peers) - required_drops > 1: + peers = self.peers.copy() + untested_peers = [] + for peer_id in self.peers: + peer = self.peers[peer_id] + if peer.last_sync_attempt == 0: + untested_peers.append(peer) + + if len(untested_peers) >= rotation_headroom: + RNS.log("Newly added peer threshold reached, postponing peer rotation", RNS.LOG_DEBUG) + return + + culled_peers = [] + waiting_peers = [] + unresponsive_peers = [] + for peer_id in peers: + peer = peers[peer_id] + if not peer_id in self.static_peers and peer.state == LXMPeer.IDLE: + if peer.alive: + if peer.offered == 0: + # Don't consider for unpeering until at + # least one message has been offered + pass + else: + waiting_peers.append(peer) + else: + unresponsive_peers.append(peer) + + drop_pool = [] + if len(unresponsive_peers) > 0: + drop_pool.extend(unresponsive_peers) + if not self.prioritise_rotating_unreachable_peers: + drop_pool.extend(waiting_peers) + + else: + drop_pool.extend(waiting_peers) + + if len(drop_pool) > 0: + drop_count = min(required_drops, len(drop_pool)) + low_acceptance_rate_peers = sorted( + drop_pool, + key=lambda p: ( 0 if p.offered == 0 else (p.outgoing/p.offered) ), + reverse=False + )[0:drop_count] + + ms = "" if len(low_acceptance_rate_peers) == 1 else "s" + RNS.log(f"Dropping {len(low_acceptance_rate_peers)} lowest acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) + for peer in low_acceptance_rate_peers: + ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) + RNS.log(f"Acceptance rate for {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing} / {peer.offered})", RNS.LOG_DEBUG) + self.unpeer(peer.destination_hash) + + except Exception as e: + RNS.log(f"An error occurred during peer rotation: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + def sync_peers(self): culled_peers = [] waiting_peers = [] From 40fc75f5593aee19563ed9403170c7b1c938e7fd Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Jan 2025 14:24:09 +0100 Subject: [PATCH 33/54] Refined peer rotation algorithm --- LXMF/LXMRouter.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 4bbd24c..1d8f50e 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -763,6 +763,7 @@ class LXMRouter: JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL + JOB_ROTATE_INTERVAL = 675 def jobs(self): if not self.exit_handler_running: self.processing_count += 1 @@ -785,9 +786,12 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: if self.propagation_node == True: - self.rotate_peers() self.flush_queues() + if self.processing_count % LXMRouter.JOB_ROTATE_INTERVAL == 0: + if self.propagation_node == True: + self.rotate_peers() + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: if self.propagation_node == True: self.sync_peers() @@ -1819,6 +1823,17 @@ class LXMRouter: RNS.log("Newly added peer threshold reached, postponing peer rotation", RNS.LOG_DEBUG) return + fully_synced_peers = {} + for peer_id in peers: + peer = peers[peer_id] + if peer.unhandled_message_count == 0: + fully_synced_peers[peer_id] = peer + + if len(fully_synced_peers) > 0: + peers = fully_synced_peers + ms = "" if len(fully_synced_peers) == 1 else "s" + RNS.log(f"Found {len(fully_synced_peers)} fully synced peer{ms}, using as peer rotation pool basis", RNS.LOG_DEBUG) + culled_peers = [] waiting_peers = [] unresponsive_peers = [] @@ -1856,7 +1871,8 @@ class LXMRouter: RNS.log(f"Dropping {len(low_acceptance_rate_peers)} lowest acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) for peer in low_acceptance_rate_peers: ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) - RNS.log(f"Acceptance rate for {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing} / {peer.offered})", RNS.LOG_DEBUG) + reachable_str = "reachable" if peer.alive else "unreachable" + RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) self.unpeer(peer.destination_hash) except Exception as e: From 40d0b9a5deca5fb054946dfcf37d2442bdac4469 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Jan 2025 21:21:51 +0100 Subject: [PATCH 34/54] Added acceptance rate threshold to peer rotation --- LXMF/LXMRouter.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 1d8f50e..df340d2 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -42,6 +42,7 @@ class LXMRouter: AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 ROTATION_HEADROOM_PCT = 10 + ROTATION_AR_MAX = 0.5 PROPAGATION_LIMIT = 256 DELIVERY_LIMIT = 1000 @@ -1867,13 +1868,16 @@ class LXMRouter: reverse=False )[0:drop_count] - ms = "" if len(low_acceptance_rate_peers) == 1 else "s" - RNS.log(f"Dropping {len(low_acceptance_rate_peers)} lowest acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) + dropped_peers = 0 for peer in low_acceptance_rate_peers: ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) - reachable_str = "reachable" if peer.alive else "unreachable" - RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) - self.unpeer(peer.destination_hash) + if ar < LXMRouter.ROTATION_AR_MAX*100: + reachable_str = "reachable" if peer.alive else "unreachable" + RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) + self.unpeer(peer.destination_hash) + + ms = "" if dropped_peers == 1 else "s" + RNS.log(f"Dropped {dropped_peers} low acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) except Exception as e: RNS.log(f"An error occurred during peer rotation: {e}", RNS.LOG_ERROR) From b7b67536400e768658dcc9cf63406ccf9baba468 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 00:37:50 +0100 Subject: [PATCH 35/54] Fixed potential division by zero. Fixes #25. --- LXMF/LXMRouter.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index df340d2..7bb44a5 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -507,8 +507,10 @@ class LXMRouter: except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) - et = time.time(); RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {math.floor(len(self.propagation_entries)/(et-st))} msgs/s", RNS.LOG_NOTICE) - st = time.time(); RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) + et = time.time(); mps = 0 if et-st == 0 else math.floor(len(self.propagation_entries)/(et-st)) + RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {mps} msgs/s", RNS.LOG_NOTICE) + RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) + st = time.time(); if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") @@ -1875,6 +1877,7 @@ class LXMRouter: reachable_str = "reachable" if peer.alive else "unreachable" RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) self.unpeer(peer.destination_hash) + dropped_peers += 1 ms = "" if dropped_peers == 1 else "s" RNS.log(f"Dropped {dropped_peers} low acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) From 9eca747757933d283922923c3b598d68a32f7902 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 10:46:31 +0100 Subject: [PATCH 36/54] Updated peer rotation timing to align with distribution queue mapping --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 7bb44a5..a364a12 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -766,7 +766,7 @@ class LXMRouter: JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL - JOB_ROTATE_INTERVAL = 675 + JOB_ROTATE_INTERVAL = 56*JOB_PEERINGEST_INTERVAL def jobs(self): if not self.exit_handler_running: self.processing_count += 1 From f42ccfc4e93b9a85ed32a6ebc3b5f3ed21a24b49 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 11:23:18 +0100 Subject: [PATCH 37/54] Automatically clean messages exceeding propagation transfer limit for peer from unhandled message queues --- LXMF/LXMPeer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index e2f951a..a777b86 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -250,7 +250,10 @@ class LXMPeer: lxm_size = unhandled_entry[2] next_size = cumulative_size + (lxm_size+per_message_overhead) if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): - pass + if lxm_size+per_message_overhead > self.propagation_transfer_limit: + RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) + self.remove_unhandled_message(transient_id) + self.add_handled_message(transient_id) else: cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) From b94a712bb626e83755fb54694a2e6a30690957f8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 11:30:45 +0100 Subject: [PATCH 38/54] Automatically clean messages exceeding propagation transfer limit for peer from unhandled message queues --- LXMF/LXMPeer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a777b86..a2b6b18 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -250,7 +250,7 @@ class LXMPeer: lxm_size = unhandled_entry[2] next_size = cumulative_size + (lxm_size+per_message_overhead) if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): - if lxm_size+per_message_overhead > self.propagation_transfer_limit: + if lxm_size+per_message_overhead > (self.propagation_transfer_limit*1000): RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) self.remove_unhandled_message(transient_id) self.add_handled_message(transient_id) From 7b4780cfb7537ccc114c9a0d99da7dc6928eb113 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 11:36:11 +0100 Subject: [PATCH 39/54] Automatically clean messages exceeding propagation transfer limit for peer from unhandled message queues --- LXMF/LXMPeer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a2b6b18..c1294bd 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -251,9 +251,9 @@ class LXMPeer: next_size = cumulative_size + (lxm_size+per_message_overhead) if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): if lxm_size+per_message_overhead > (self.propagation_transfer_limit*1000): - RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) self.remove_unhandled_message(transient_id) self.add_handled_message(transient_id) + RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) else: cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) From c0a8f3be498514611ccb7e44925bf78afd5d71ac Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 15:04:21 +0100 Subject: [PATCH 40/54] Cleanup --- LXMF/Handlers.py | 10 ++++++---- LXMF/LXMRouter.py | 5 +++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 26a5df6..9876f4c 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -47,10 +47,12 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit = None wanted_inbound_peers = None if len(data) >= 4: - try: - wanted_inbound_peers = int(data[3]) - except: - wanted_inbound_peers = None + # TODO: Rethink, probably not necessary anymore + # try: + # wanted_inbound_peers = int(data[3]) + # except: + # wanted_inbound_peers = None + if len(data) >= 3: try: propagation_transfer_limit = float(data[2]) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index a364a12..070dc71 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -286,7 +286,7 @@ class LXMRouter: node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes - self.get_wanted_inbound_peers(), # How many more inbound peers this node wants + None, # How many more inbound peers this node wants ] data = msgpack.packb(announce_data) @@ -895,7 +895,8 @@ class LXMRouter: threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() def get_wanted_inbound_peers(self): - # TODO: Implement + # TODO: Implement/rethink. + # Probably not necessary anymore. return None def get_announce_app_data(self, destination_hash): From a6cf585109a354554fb223394db3405ed0b9510c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 15:11:26 +0100 Subject: [PATCH 41/54] Cleanup --- LXMF/Handlers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 9876f4c..0705074 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -52,6 +52,7 @@ class LXMFPropagationAnnounceHandler: # wanted_inbound_peers = int(data[3]) # except: # wanted_inbound_peers = None + pass if len(data) >= 3: try: From d5540b927fd96d171a096e85efac64a3de921d37 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Jan 2025 13:38:56 +0100 Subject: [PATCH 42/54] Added allow_duplicate option to message ingest API --- LXMF/LXMRouter.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 070dc71..d631841 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1618,7 +1618,7 @@ class LXMRouter: ### Message Routing & Delivery ######################## ####################################################### - def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None, no_stamp_enforcement=False): + def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None, no_stamp_enforcement=False, allow_duplicate=False): try: message = LXMessage.unpack_from_bytes(lxmf_data) if ratchet_id and not message.ratchet_id: @@ -1685,7 +1685,7 @@ class LXMRouter: RNS.log(str(self)+" ignored message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) return False - if self.has_message(message.hash): + if not allow_duplicate and self.has_message(message.hash): RNS.log(str(self)+" ignored already received message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) return False else: @@ -2107,7 +2107,7 @@ class LXMRouter: if peer != from_peer: peer.queue_unhandled_message(transient_id) - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False, from_peer=None): + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False, is_paper_message=False, from_peer=None): no_stamp_enforcement = False if is_paper_message: no_stamp_enforcement = True @@ -2116,9 +2116,10 @@ class LXMRouter: if len(lxmf_data) >= LXMessage.LXMF_OVERHEAD: transient_id = RNS.Identity.full_hash(lxmf_data) - if not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids: + if (not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids) or allow_duplicate == True: received = time.time() destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] + RNS.log("GOT MESSAGE FOR "+RNS.prettyhexrep(destination_hash)) self.locally_processed_transient_ids[transient_id] = received @@ -2128,7 +2129,7 @@ class LXMRouter: decrypted_lxmf_data = delivery_destination.decrypt(encrypted_lxmf_data) if decrypted_lxmf_data != None: delivery_data = lxmf_data[:LXMessage.DESTINATION_LENGTH]+decrypted_lxmf_data - self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED, no_stamp_enforcement=no_stamp_enforcement) + self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED, no_stamp_enforcement=no_stamp_enforcement, allow_duplicate=allow_duplicate) self.locally_delivered_transient_ids[transient_id] = time.time() if signal_local_delivery != None: @@ -2166,7 +2167,7 @@ class LXMRouter: RNS.trace_exception(e) return False - def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None): + def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False): try: if not uri.lower().startswith(LXMessage.URI_SCHEMA+"://"): RNS.log("Cannot ingest LXM, invalid URI provided.", RNS.LOG_ERROR) @@ -2176,7 +2177,7 @@ class LXMRouter: lxmf_data = base64.urlsafe_b64decode(uri.replace(LXMessage.URI_SCHEMA+"://", "").replace("/", "")+"==") transient_id = RNS.Identity.full_hash(lxmf_data) - router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate, is_paper_message=True) + router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate, allow_duplicate=allow_duplicate, is_paper_message=True) if router_propagation_result != False: RNS.log("LXM with transient ID "+RNS.prettyhexrep(transient_id)+" was ingested.", RNS.LOG_DEBUG) return router_propagation_result From 1ef46650738f4ddc67579080d2eab60c9affcfa8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 18 Feb 2025 20:05:19 +0100 Subject: [PATCH 43/54] Cleanup --- LXMF/LXMRouter.py | 1 - requirements.txt | 5 ++--- setup.py | 4 ++-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index d631841..d502ee6 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -2119,7 +2119,6 @@ class LXMRouter: if (not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids) or allow_duplicate == True: received = time.time() destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] - RNS.log("GOT MESSAGE FOR "+RNS.prettyhexrep(destination_hash)) self.locally_processed_transient_ids[transient_id] = received diff --git a/requirements.txt b/requirements.txt index 6b7926a..2f4f642 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,2 @@ -qrcode==7.4.2 -rns==0.7.8 -setuptools==70.0.0 +qrcode>=7.4.2 +rns>=0.9.1 diff --git a/setup.py b/setup.py index cabf20a..e01e9eb 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.9.1'], - python_requires='>=3.7', + install_requires=["rns>=0.9.2"], + python_requires=">=3.7", ) From 570d2c68467e0614609df4dc89d295793e2a4878 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 7 Mar 2025 11:05:50 +0100 Subject: [PATCH 44/54] Added configuration options to default config file --- LXMF/Utilities/lxmd.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index f9a2ef6..48885b2 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -710,6 +710,25 @@ propagation_transfer_max_accepted_size = 256 # prioritise_destinations = 41d20c727598a3fbbdf9106133a3a0ed, d924b81822ca24e68e2effea99bcb8cf +# You can configure the maximum number of other +# propagation nodes that this node will peer +# with automatically. The default is 50. + +# max_peers = 25 + +# You can configure a list of static propagation +# node peers, that this node will always be +# peered with, by specifying a list of +# destination hashes. + +# static_peers = e17f833c4ddf8890dd3a79a6fea8161d, 5a2d0029b6e5ec87020abaea0d746da4 + +# You can configure the propagation node to +# only accept incoming propagation messages +# from configured static peers. + +# from_static_only = True + # By default, any destination is allowed to # connect and download messages, but you can # optionally restrict this. If you enable From 336792c07a47ca60dcf2df1295b8065f0b458a22 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 13 Mar 2025 19:45:15 +0100 Subject: [PATCH 45/54] Updated dependencies --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e01e9eb..19737d6 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=["rns>=0.9.2"], + install_requires=["rns>=0.9.3"], python_requires=">=3.7", ) From 326c0eed8f767e298eb3f09340a41314c254ddda Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 13 Mar 2025 19:46:11 +0100 Subject: [PATCH 46/54] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 22049ab..63af887 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.2" +__version__ = "0.6.3" From e6021b8fed0ffcffcc6a7b9bbdb7d0d1a282ba8f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 15 Apr 2025 20:21:16 +0200 Subject: [PATCH 47/54] Updated license --- LICENSE | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/LICENSE b/LICENSE index a25bd7a..eda392f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ -MIT License +Reticulum License -Copyright (c) 2020 Mark Qvist / unsigned.io +Copyright (c) 2025 Mark Qvist / unsigned.io Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,8 +9,16 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +- The Software shall not be used in any kind of system which includes amongst + its functions the ability to purposefully do harm to human beings. + +- The Software shall not be used, directly or indirectly, in the creation of + an artificial intelligence, machine learning or language model training + dataset, including but not limited to any use that contributes to the + training or development of such a model or algorithm. + +- The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, From 1bdcf6ad534c3d34e4966d23bb04a129fd5ebc4f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 15 Apr 2025 20:21:54 +0200 Subject: [PATCH 48/54] Updated license --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index eda392f..f5fb92d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ Reticulum License -Copyright (c) 2025 Mark Qvist / unsigned.io +Copyright (c) 2020-2025 Mark Qvist Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 005d71707c1f99a721e78196e2ecfad1df1c11fa Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 17 Apr 2025 13:31:00 +0200 Subject: [PATCH 49/54] Cleanup --- LXMF/Handlers.py | 14 +++++++------- LXMF/LXMRouter.py | 3 +-- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 0705074..5671170 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -1,4 +1,5 @@ import time +import threading import RNS import RNS.vendor.umsgpack as msgpack @@ -17,10 +18,11 @@ class LXMFDeliveryAnnounceHandler: if lxmessage.method == LXMessage.DIRECT or lxmessage.method == LXMessage.OPPORTUNISTIC: lxmessage.next_delivery_attempt = time.time() - while self.lxmrouter.processing_outbound: - time.sleep(0.1) + def outbound_trigger(): + while self.lxmrouter.processing_outbound: time.sleep(0.1) + self.lxmrouter.process_outbound() - self.lxmrouter.process_outbound() + threading.Thread(target=outbound_trigger, daemon=True).start() try: stamp_cost = stamp_cost_from_app_data(app_data) @@ -55,10 +57,8 @@ class LXMFPropagationAnnounceHandler: pass if len(data) >= 3: - try: - propagation_transfer_limit = float(data[2]) - except: - propagation_transfer_limit = None + try: propagation_transfer_limit = float(data[2]) + except: propagation_transfer_limit = None if destination_hash in self.lxmrouter.static_peers: self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index d502ee6..8da1476 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -2301,8 +2301,7 @@ class LXMRouter: else: RNS.log("Outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - if lxmessage.progress == None or lxmessage.progress < 0.01: - lxmessage.progress = 0.01 + if lxmessage.progress == None or lxmessage.progress < 0.01: lxmessage.progress = 0.01 # Outbound handling for opportunistic messages if lxmessage.method == LXMessage.OPPORTUNISTIC: From 37e99910ec8fb8f1b6df05567aa5ba2009ed9edc Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 12 May 2025 11:58:24 +0200 Subject: [PATCH 50/54] Updated version and RNS dependency version --- LXMF/_version.py | 2 +- setup.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 63af887..49e0fc1 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.3" +__version__ = "0.7.0" diff --git a/setup.py b/setup.py index 19737d6..724705f 100644 --- a/setup.py +++ b/setup.py @@ -15,9 +15,10 @@ setuptools.setup( long_description_content_type="text/markdown", url="https://github.com/markqvist/lxmf", packages=["LXMF", "LXMF.Utilities"], + license="Reticulum License", + license_files = ("LICENSE"), classifiers=[ "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], entry_points= { @@ -25,6 +26,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=["rns>=0.9.3"], + install_requires=["rns>=0.9.5"], python_requires=">=3.7", ) From 499fe4cc5381ffc641cbc04b7d27cf931cae3bb5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 15 May 2025 20:27:19 +0200 Subject: [PATCH 51/54] Use no_data_for instead of inactive_for for cleaning links --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 8da1476..5b7a5c2 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -827,7 +827,7 @@ class LXMRouter: closed_links = [] for link_hash in self.direct_links: link = self.direct_links[link_hash] - inactive_time = link.inactive_for() + inactive_time = link.no_data_for() if inactive_time > LXMRouter.LINK_MAX_INACTIVITY: link.teardown() From a9622e3a332fd36ffca2b045e42d22c07779266c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 15 May 2025 20:30:12 +0200 Subject: [PATCH 52/54] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 49e0fc1..a5f830a 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.7.0" +__version__ = "0.7.1" From c2207d1eb79db474a618b2ffd40d6be3ffc2a00a Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 17 May 2025 10:27:21 +0200 Subject: [PATCH 53/54] Added funding --- FUNDING.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 FUNDING.yml diff --git a/FUNDING.yml b/FUNDING.yml new file mode 100644 index 0000000..d125d55 --- /dev/null +++ b/FUNDING.yml @@ -0,0 +1,3 @@ +liberapay: Reticulum +ko_fi: markqvist +custom: "https://unsigned.io/donate" From 787cd069dc98ebab80afdef726a841c45f38566f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 26 May 2025 20:57:46 +0200 Subject: [PATCH 54/54] Fixed division by zero. Closes #30. --- LXMF/Utilities/lxmd.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 48885b2..a4ccaf5 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -529,14 +529,14 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = peered_outgoing += pm["outgoing"] peered_rx_bytes += p["rx_bytes"] peered_tx_bytes += p["tx_bytes"] - if p["alive"]: - available_peers += 1 - else: - unreachable_peers += 1 + + if p["alive"]: available_peers += 1 + else: unreachable_peers += 1 total_incoming = peered_incoming+s["unpeered_propagation_incoming"]+s["clients"]["client_propagation_messages_received"] total_rx_bytes = peered_rx_bytes+s["unpeered_propagation_rx_bytes"] - df = round(peered_outgoing/total_incoming, 2) + if total_incoming != 0: df = round(peered_outgoing/total_incoming, 2) + else: df = 0 dhs = RNS.prettyhexrep(s["destination_hash"]); uts = RNS.prettytime(s["uptime"]) print(f"\nLXMF Propagation Node running on {dhs}, uptime is {uts}")