From d8b25e092f0ba8693964fa75a6a500ba220b3257 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Oct 2025 16:43:26 +0100 Subject: [PATCH] Added metadate structure to propagation node announces --- LXMF/Handlers.py | 23 +++++++++++++---------- LXMF/LXMF.py | 26 +++++++++++++++++++------- LXMF/LXMRouter.py | 6 ++++-- LXMF/LXStamper.py | 2 +- 4 files changed, 37 insertions(+), 20 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index b5f2659..40c416d 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,12 +45,13 @@ class LXMFPropagationAnnounceHandler: if self.lxmrouter.propagation_node: data = msgpack.unpackb(app_data) if pn_announce_data_is_valid(data): - pn_active = data[0] - node_timebase = int(data[1]) - propagation_transfer_limit = int(data[2]) - propagation_sync_limit = int(data[3]) - propagation_stamp_cost = int(data[4][0]) - propagation_stamp_cost_flexibility = int(data[4][1]) + metadata = data[0] + propagation_enabled = data[1] + node_timebase = int(data[2]) + propagation_transfer_limit = int(data[3]) + propagation_sync_limit = int(data[4]) + propagation_stamp_cost = int(data[5][0]) + propagation_stamp_cost_flexibility = int(data[5][1]) if destination_hash in self.lxmrouter.static_peers: self.lxmrouter.peer(destination_hash=destination_hash, @@ -58,20 +59,22 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit=propagation_transfer_limit, propagation_sync_limit=propagation_sync_limit, propagation_stamp_cost=propagation_stamp_cost, - propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility) + propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, + metadata=metadata) else: if self.lxmrouter.autopeer: - if pn_active == True: + if propagation_enabled == True: if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: self.lxmrouter.peer(destination_hash=destination_hash, timestamp=node_timebase, propagation_transfer_limit=propagation_transfer_limit, propagation_sync_limit=propagation_sync_limit, propagation_stamp_cost=propagation_stamp_cost, - propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility) + propagation_stamp_cost_flexibility=propagation_stamp_cost_flexibility, + metadata=metadata) - elif pn_active == False: + elif propagation_enabled == False: self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 8d1aaf7..8342d33 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -91,6 +91,17 @@ RENDERER_MICRON = 0x01 RENDERER_MARKDOWN = 0x02 RENDERER_BBCODE = 0x03 +# Optional propagation node metadata fields. These +# fields may be highly unstable in allocation and +# availability until the version 1.0.0 release, so use +# at your own risk until then, and expect changes! +PN_META_VERSION = 0x00 +PN_META_NAME = 0x01 +PN_META_SYNC_STRATUM = 0x02 +PN_META_SYNC_THROTTLE = 0x03 +PN_META_AUTH_BAND = 0x04 +PN_META_UTIL_PRESSURE = 0x05 + ########################################################## # The following helper functions makes it easier to # # handle and operate on LXMF data in client programs # @@ -138,19 +149,20 @@ def stamp_cost_from_app_data(app_data=None): def pn_announce_data_is_valid(data): try: if type(data) == bytes: data = msgpack.unpackb(data) - if len(data) < 5: raise ValueError("Invalid announce data: Insufficient peer data") + if len(data) < 6: raise ValueError("Invalid announce data: Insufficient peer data") else: - if data[0] != True and data[0] != False: raise ValueError("Invalid announce data: Indeterminate propagation node status") - try: int(data[1]) - except: raise ValueError("Invalid announce data: Could not decode peer timebase") + if type(data[0]) != dict: raise ValueError("Invalid announce data: Could not decode peer metadata") + if data[1] != True and data[1] != False: raise ValueError("Invalid announce data: Indeterminate propagation node status") try: int(data[2]) - except: raise ValueError("Invalid announce data: Could not decode peer propagation transfer limit") + except: raise ValueError("Invalid announce data: Could not decode peer timebase") try: int(data[3]) + except: raise ValueError("Invalid announce data: Could not decode peer propagation transfer limit") + try: int(data[4]) except: raise ValueError("Invalid announce data: Could not decode peer propagation sync limit") if type(data[4]) != list: raise ValueError("Invalid announce data: Could not decode peer stamp costs") - try: int(data[4][0]) + try: int(data[5][0]) except: raise ValueError("Invalid announce data: Could not decode peer target stamp cost") - try: int(data[4][1]) + try: int(data[5][1]) except: raise ValueError("Invalid announce data: Could not decode peer stamp cost flexibility") except Exception as e: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index eef9fee..4f31920 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -282,9 +282,11 @@ class LXMRouter: def announce_propagation_node(self): def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) - node_state = self.propagation_node and not self.from_static_only - stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility] + node_state = self.propagation_node and not self.from_static_only + stamp_cost = [self.propagation_stamp_cost, self.propagation_stamp_cost_flexibility] + metadata = {} announce_data = [ + metadata, # Node metadata node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index a9ca7d6..2db0598 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -76,7 +76,7 @@ def validate_pn_stamps_job_multip(transient_stamps): cores = multiprocessing.cpu_count() pool_count = min(cores, math.ceil(len(transient_stamps) / PN_VALIDATION_POOL_MIN_SIZE)) - RNS.log(f"Validating {len(transient_stamps)} stamps using {pool_count} processes...") + RNS.log(f"Validating {len(transient_stamps)} stamps using {pool_count} processes...", RNS.LOG_VERBOSE) with multiprocessing.Pool(pool_count) as p: validated_entries = p.map(_validate_single_pn_stamp_entry, transient_stamps) return validated_entries