diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..caf3250 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +blank_issues_enabled: false +contact_links: + - name: ✨ Feature Request or Idea + url: https://github.com/markqvist/Reticulum/discussions/new?category=ideas + about: Propose and discuss features and ideas + - name: 💬 Questions, Help & Discussion + about: Ask anything, or get help + url: https://github.com/markqvist/Reticulum/discussions/new/choose + - name: 📖 Read the Reticulum Manual + url: https://markqvist.github.io/Reticulum/manual/ + about: The complete documentation for Reticulum diff --git a/.github/ISSUE_TEMPLATE/🐛-bug-report.md b/.github/ISSUE_TEMPLATE/🐛-bug-report.md new file mode 100644 index 0000000..65b492e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/🐛-bug-report.md @@ -0,0 +1,35 @@ +--- +name: "\U0001F41B Bug Report" +about: Report a reproducible bug +title: '' +labels: '' +assignees: '' + +--- + +**Read the Contribution Guidelines** +Before creating a bug report on this issue tracker, you **must** read the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md). Issues that do not follow the contribution guidelines **will be deleted without comment**. + +- The issue tracker is used by developers of this project. **Do not use it to ask general questions, or for support requests**. +- Ideas and feature requests can be made on the [Discussions](https://github.com/markqvist/Reticulum/discussions). **Only** feature requests accepted by maintainers and developers are tracked and included on the issue tracker. **Do not post feature requests here**. +- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), **delete this section only** (*"Read the Contribution Guidelines"*) from your bug report, **and fill in all the other sections**. + +**Describe the Bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Describe in detail how to reproduce the bug. + +**Expected Behavior** +A clear and concise description of what you expected to happen. + +**Logs & Screenshots** +Please include any relevant log output. If applicable, also add screenshots to help explain your problem. + +**System Information** +- OS and version +- Python version +- Program version + +**Additional context** +Add any other context about the problem here. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..b7ed71d --- /dev/null +++ b/Dockerfile @@ -0,0 +1,31 @@ +FROM python:alpine +LABEL authors="Petr Blaha petr.blaha@cleverdata.cz" +USER root +RUN apk update +RUN apk add sdl2_ttf sdl2 build-base libc-dev pkgconfig gstreamer sdl2_mixer sdl2_image sdl2_pango linux-headers mesa-dev py3-virtualenv + +RUN addgroup -S myuser && adduser -S -G myuser myuser +USER myuser +WORKDIR /home/myuser + +RUN pip install --upgrade pip + + +ENV PATH="/home/myuser/.local/bin:${PATH}" + +################### BEGIN LXMF ########################################### + +COPY --chown=myuser:myuser requirements.txt requirements.txt + +RUN pip install --user -r requirements.txt + + +COPY --chown=myuser:myuser . . + +#Python create virtual environment +RUN virtualenv /home/myuser/LXMF/venv +RUN source /home/myuser/LXMF/venv/bin/activate + +RUN make all + +################### END LXMF ########################################### diff --git a/Dockerfile.howto b/Dockerfile.howto new file mode 100644 index 0000000..bb20e5f --- /dev/null +++ b/Dockerfile.howto @@ -0,0 +1,6 @@ +# Run docker command one by one(all four), it will build LXMF artifact and copy to dist directory. +# No need to build locally and install dependencies +docker build -t lxmfdockerimage . +docker run -d -it --name lxmfdockercontainer lxmfdockerimage /bin/sh +docker cp lxmfdockercontainer:/home/myuser/dist . +docker rm -f lxmfdockercontainer \ No newline at end of file diff --git a/LICENSE b/LICENSE index a25bd7a..f5fb92d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ -MIT License +Reticulum License -Copyright (c) 2020 Mark Qvist / unsigned.io +Copyright (c) 2020-2025 Mark Qvist Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,8 +9,16 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +- The Software shall not be used in any kind of system which includes amongst + its functions the ability to purposefully do harm to human beings. + +- The Software shall not be used, directly or indirectly, in the creation of + an artificial intelligence, machine learning or language model training + dataset, including but not limited to any use that contributes to the + training or development of such a model or algorithm. + +- The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 32e045e..0705074 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -2,19 +2,19 @@ import time import RNS import RNS.vendor.umsgpack as msgpack -from .LXMF import APP_NAME - +from .LXMF import APP_NAME, stamp_cost_from_app_data, pn_announce_data_is_valid from .LXMessage import LXMessage class LXMFDeliveryAnnounceHandler: def __init__(self, lxmrouter): self.aspect_filter = APP_NAME+".delivery" + self.receive_path_responses = True self.lxmrouter = lxmrouter def received_announce(self, destination_hash, announced_identity, app_data): for lxmessage in self.lxmrouter.pending_outbound: if destination_hash == lxmessage.destination_hash: - if lxmessage.method == LXMessage.DIRECT: + if lxmessage.method == LXMessage.DIRECT or lxmessage.method == LXMessage.OPPORTUNISTIC: lxmessage.next_delivery_attempt = time.time() while self.lxmrouter.processing_outbound: @@ -22,23 +22,54 @@ class LXMFDeliveryAnnounceHandler: self.lxmrouter.process_outbound() + try: + stamp_cost = stamp_cost_from_app_data(app_data) + self.lxmrouter.update_stamp_cost(destination_hash, stamp_cost) + + except Exception as e: + RNS.log(f"An error occurred while trying to decode announced stamp cost. The contained exception was: {e}", RNS.LOG_ERROR) + class LXMFPropagationAnnounceHandler: def __init__(self, lxmrouter): self.aspect_filter = APP_NAME+".propagation" + self.receive_path_responses = False self.lxmrouter = lxmrouter def received_announce(self, destination_hash, announced_identity, app_data): try: if type(app_data) == bytes: - data = msgpack.unpackb(app_data) - if self.lxmrouter.propagation_node and self.lxmrouter.autopeer: - if data[0] == True: - if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, data[1]) - elif data[0] == False: - self.lxmrouter.unpeer(destination_hash, data[1]) + data = msgpack.unpackb(app_data) + + if pn_announce_data_is_valid(data): + node_timebase = data[1] + propagation_transfer_limit = None + wanted_inbound_peers = None + if len(data) >= 4: + # TODO: Rethink, probably not necessary anymore + # try: + # wanted_inbound_peers = int(data[3]) + # except: + # wanted_inbound_peers = None + pass + + if len(data) >= 3: + try: + propagation_transfer_limit = float(data[2]) + except: + propagation_transfer_limit = None + + if destination_hash in self.lxmrouter.static_peers: + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) + + else: + if data[0] == True: + if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) + + elif data[0] == False: + self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 5225d88..db0edb7 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -1,9 +1,10 @@ APP_NAME = "lxmf" -# WARNING! These field specifiers are floating and not -# yet final! Consider highly experiemental, and expect -# them to change in the future! You have been warned :) - +########################################################## +# The following core fields are provided to facilitate # +# interoperability in data exchange between various LXMF # +# clients and systems. # +########################################################## FIELD_EMBEDDED_LXMS = 0x01 FIELD_TELEMETRY = 0x02 FIELD_TELEMETRY_STREAM = 0x03 @@ -13,4 +14,152 @@ FIELD_IMAGE = 0x06 FIELD_AUDIO = 0x07 FIELD_THREAD = 0x08 FIELD_COMMANDS = 0x09 -FIELD_RESULTS = 0x0A \ No newline at end of file +FIELD_RESULTS = 0x0A +FIELD_GROUP = 0x0B +FIELD_TICKET = 0x0C +FIELD_EVENT = 0x0D +FIELD_RNR_REFS = 0x0E +FIELD_RENDERER = 0x0F + +# For usecases such as including custom data structures, +# embedding or encapsulating other data types or protocols +# that are not native to LXMF, or bridging/tunneling +# external protocols or services over LXMF, the following +# fields are available. A format/type/protocol (or other) +# identifier can be included in the CUSTOM_TYPE field, and +# the embedded payload can be included in the CUSTOM_DATA +# field. It is up to the client application to correctly +# discern and potentially utilise any data embedded using +# this mechanism. +FIELD_CUSTOM_TYPE = 0xFB +FIELD_CUSTOM_DATA = 0xFC +FIELD_CUSTOM_META = 0xFD + +# The non-specific and debug fields are intended for +# development, testing and debugging use. +FIELD_NON_SPECIFIC = 0xFE +FIELD_DEBUG = 0xFF + +########################################################## +# The following section lists field-specific specifiers, # +# modes and identifiers that are native to LXMF. It is # +# optional for any client or system to support any of # +# these, and they are provided as template for easing # +# interoperability without sacrificing expandability # +# and flexibility of the format. # +########################################################## + +# Audio modes for the data structure in FIELD_AUDIO + +# Codec2 Audio Modes +AM_CODEC2_450PWB = 0x01 +AM_CODEC2_450 = 0x02 +AM_CODEC2_700C = 0x03 +AM_CODEC2_1200 = 0x04 +AM_CODEC2_1300 = 0x05 +AM_CODEC2_1400 = 0x06 +AM_CODEC2_1600 = 0x07 +AM_CODEC2_2400 = 0x08 +AM_CODEC2_3200 = 0x09 + +# Opus Audio Modes +AM_OPUS_OGG = 0x10 +AM_OPUS_LBW = 0x11 +AM_OPUS_MBW = 0x12 +AM_OPUS_PTT = 0x13 +AM_OPUS_RT_HDX = 0x14 +AM_OPUS_RT_FDX = 0x15 +AM_OPUS_STANDARD = 0x16 +AM_OPUS_HQ = 0x17 +AM_OPUS_BROADCAST = 0x18 +AM_OPUS_LOSSLESS = 0x19 + +# Custom, unspecified audio mode, the client must +# determine it itself based on the included data. +AM_CUSTOM = 0xFF + +# Message renderer specifications for FIELD_RENDERER. +# The renderer specification is completely optional, +# and only serves as an indication to the receiving +# client on how to render the message contents. It is +# not mandatory to implement, either on sending or +# receiving sides, but is the recommended way to +# signal how to render a message, if non-plaintext +# formatting is used. +RENDERER_PLAIN = 0x00 +RENDERER_MICRON = 0x01 +RENDERER_MARKDOWN = 0x02 +RENDERER_BBCODE = 0x03 + +########################################################## +# The following helper functions makes it easier to # +# handle and operate on LXMF data in client programs # +########################################################## + +import RNS +import RNS.vendor.umsgpack as msgpack +def display_name_from_app_data(app_data=None): + if app_data == None: + return None + elif len(app_data) == 0: + return None + else: + # Version 0.5.0+ announce format + if (app_data[0] >= 0x90 and app_data[0] <= 0x9f) or app_data[0] == 0xdc: + peer_data = msgpack.unpackb(app_data) + if type(peer_data) == list: + if len(peer_data) < 1: + return None + else: + dn = peer_data[0] + if dn == None: + return None + else: + try: + decoded = dn.decode("utf-8") + return decoded + except Exception as e: + RNS.log(f"Could not decode display name in included announce data. The contained exception was: {e}", RNS.LOG_ERROR) + return None + + # Original announce format + else: + return app_data.decode("utf-8") + +def stamp_cost_from_app_data(app_data=None): + if app_data == None or app_data == b"": + return None + else: + # Version 0.5.0+ announce format + if (app_data[0] >= 0x90 and app_data[0] <= 0x9f) or app_data[0] == 0xdc: + peer_data = msgpack.unpackb(app_data) + if type(peer_data) == list: + if len(peer_data) < 2: + return None + else: + return peer_data[1] + + # Original announce format + else: + return None + +def pn_announce_data_is_valid(data): + try: + if type(data) == bytes: + data = msgpack.unpackb(data) + + if len(data) < 3: + raise ValueError("Invalid announce data: Insufficient peer data") + else: + if data[0] != True and data[0] != False: + raise ValueError("Invalid announce data: Indeterminate propagation node status") + try: + int(data[1]) + except: + raise ValueError("Invalid announce data: Could not decode peer timebase") + + except Exception as e: + RNS.log(f"Could not validate propagation node announce data: {e}", RNS.LOG_DEBUG) + return False + + return True \ No newline at end of file diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 611a874..c1294bd 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -4,6 +4,7 @@ import time import RNS import RNS.vendor.umsgpack as msgpack +from collections import deque from .LXMF import APP_NAME class LXMPeer: @@ -19,6 +20,7 @@ class LXMPeer: ERROR_NO_IDENTITY = 0xf0 ERROR_NO_ACCESS = 0xf1 + ERROR_TIMEOUT = 0xfe # Maximum amount of time a peer can # be unreachable before it is removed @@ -38,24 +40,82 @@ class LXMPeer: @staticmethod def from_bytes(peer_bytes, router): dictionary = msgpack.unpackb(peer_bytes) + peer_destination_hash = dictionary["destination_hash"] + peer_peering_timebase = dictionary["peering_timebase"] + peer_alive = dictionary["alive"] + peer_last_heard = dictionary["last_heard"] + + peer = LXMPeer(router, peer_destination_hash) + peer.peering_timebase = peer_peering_timebase + peer.alive = peer_alive + peer.last_heard = peer_last_heard - peer = LXMPeer(router, dictionary["destination_hash"]) - peer.peering_timebase = dictionary["peering_timebase"] - peer.alive = dictionary["alive"] - peer.last_heard = dictionary["last_heard"] if "link_establishment_rate" in dictionary: peer.link_establishment_rate = dictionary["link_establishment_rate"] else: peer.link_establishment_rate = 0 + + if "sync_transfer_rate" in dictionary: + peer.sync_transfer_rate = dictionary["sync_transfer_rate"] + else: + peer.sync_transfer_rate = 0 + if "propagation_transfer_limit" in dictionary: + try: + peer.propagation_transfer_limit = float(dictionary["propagation_transfer_limit"]) + except Exception as e: + peer.propagation_transfer_limit = None + else: + peer.propagation_transfer_limit = None + + if "offered" in dictionary: + peer.offered = dictionary["offered"] + else: + peer.offered = 0 + + if "outgoing" in dictionary: + peer.outgoing = dictionary["outgoing"] + else: + peer.outgoing = 0 + + if "incoming" in dictionary: + peer.incoming = dictionary["incoming"] + else: + peer.incoming = 0 + + if "rx_bytes" in dictionary: + peer.rx_bytes = dictionary["rx_bytes"] + else: + peer.rx_bytes = 0 + + if "tx_bytes" in dictionary: + peer.tx_bytes = dictionary["tx_bytes"] + else: + peer.tx_bytes = 0 + + if "last_sync_attempt" in dictionary: + peer.last_sync_attempt = dictionary["last_sync_attempt"] + else: + peer.last_sync_attempt = 0 + + hm_count = 0 for transient_id in dictionary["handled_ids"]: if transient_id in router.propagation_entries: - peer.handled_messages[transient_id] = router.propagation_entries[transient_id] + peer.add_handled_message(transient_id) + hm_count += 1 + um_count = 0 for transient_id in dictionary["unhandled_ids"]: if transient_id in router.propagation_entries: - peer.unhandled_messages[transient_id] = router.propagation_entries[transient_id] + peer.add_unhandled_message(transient_id) + um_count += 1 + peer._hm_count = hm_count + peer._um_count = um_count + peer._hm_counts_synced = True + peer._um_counts_synced = True + + del dictionary return peer def to_bytes(self): @@ -65,6 +125,14 @@ class LXMPeer: dictionary["last_heard"] = self.last_heard dictionary["destination_hash"] = self.destination_hash dictionary["link_establishment_rate"] = self.link_establishment_rate + dictionary["sync_transfer_rate"] = self.sync_transfer_rate + dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit + dictionary["last_sync_attempt"] = self.last_sync_attempt + dictionary["offered"] = self.offered + dictionary["outgoing"] = self.outgoing + dictionary["incoming"] = self.incoming + dictionary["rx_bytes"] = self.rx_bytes + dictionary["tx_bytes"] = self.tx_bytes handled_ids = [] for transient_id in self.handled_messages: @@ -77,7 +145,10 @@ class LXMPeer: dictionary["handled_ids"] = handled_ids dictionary["unhandled_ids"] = unhandled_ids - return msgpack.packb(dictionary) + peer_bytes = msgpack.packb(dictionary) + del dictionary + + return peer_bytes def __init__(self, router, destination_hash): self.alive = False @@ -87,17 +158,35 @@ class LXMPeer: self.sync_backoff = 0 self.peering_timebase = 0 self.link_establishment_rate = 0 + self.sync_transfer_rate = 0 + self.propagation_transfer_limit = None + self.handled_messages_queue = deque() + self.unhandled_messages_queue = deque() + + self.offered = 0 # Messages offered to this peer + self.outgoing = 0 # Messages transferred to this peer + self.incoming = 0 # Messages received from this peer + self.rx_bytes = 0 # Bytes received from this peer + self.tx_bytes = 0 # Bytes sent to this peer + + self._hm_count = 0 + self._um_count = 0 + self._hm_counts_synced = False + self._um_counts_synced = False self.link = None self.state = LXMPeer.IDLE - self.unhandled_messages = {} - self.handled_messages = {} + self.last_offer = [] self.router = router self.destination_hash = destination_hash self.identity = RNS.Identity.recall(destination_hash) - self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + if self.identity != None: + self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + else: + self.destination = None + RNS.log(f"Could not recall identity for LXMF propagation peer {RNS.prettyhexrep(self.destination_hash)}, will retry identity resolution on next sync", RNS.LOG_WARNING) def sync(self): RNS.log("Initiating LXMF Propagation Node sync with peer "+RNS.prettyhexrep(self.destination_hash), RNS.LOG_DEBUG) @@ -115,9 +204,10 @@ class LXMPeer: else: if self.identity == None: self.identity = RNS.Identity.recall(destination_hash) - self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + if self.identity != None: + self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") - if self.identity != None: + if self.destination != None: if len(self.unhandled_messages) > 0: if self.state == LXMPeer.IDLE: RNS.log("Establishing link for sync to peer "+RNS.prettyhexrep(self.destination_hash)+"...", RNS.LOG_DEBUG) @@ -133,23 +223,49 @@ class LXMPeer: self.sync_backoff = 0 RNS.log("Synchronisation link to peer "+RNS.prettyhexrep(self.destination_hash)+" established, preparing request...", RNS.LOG_DEBUG) + unhandled_entries = [] unhandled_ids = [] purged_ids = [] for transient_id in self.unhandled_messages: if transient_id in self.router.propagation_entries: - unhandled_ids.append(transient_id) + unhandled_entry = [ + transient_id, + self.router.get_weight(transient_id), + self.router.get_size(transient_id), + ] + unhandled_entries.append(unhandled_entry) else: purged_ids.append(transient_id) for transient_id in purged_ids: RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) - self.unhandled_messages.pop(transient_id) + self.remove_unhandled_message(transient_id) - RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) + unhandled_entries.sort(key=lambda e: e[1], reverse=False) + per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now + cumulative_size = 24 # Initialised to highest reasonable binary structure overhead + for unhandled_entry in unhandled_entries: + transient_id = unhandled_entry[0] + weight = unhandled_entry[1] + lxm_size = unhandled_entry[2] + next_size = cumulative_size + (lxm_size+per_message_overhead) + if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): + if lxm_size+per_message_overhead > (self.propagation_transfer_limit*1000): + self.remove_unhandled_message(transient_id) + self.add_handled_message(transient_id) + RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) + else: + cumulative_size += (lxm_size+per_message_overhead) + unhandled_ids.append(transient_id) + + RNS.log(f"Offering {len(unhandled_ids)} messages to peer {RNS.prettyhexrep(self.destination.hash)}", RNS.LOG_VERBOSE) + self.last_offer = unhandled_ids self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT + else: RNS.log("Could not request sync to peer "+RNS.prettyhexrep(self.destination_hash)+" since its identity could not be recalled.", RNS.LOG_ERROR) + else: RNS.log("Postponing sync with peer "+RNS.prettyhexrep(self.destination_hash)+" for "+RNS.prettytime(self.next_sync_attempt-time.time())+" due to previous failures", RNS.LOG_DEBUG) if self.last_sync_attempt > self.last_heard: @@ -159,8 +275,8 @@ class LXMPeer: RNS.log("Sync request to peer "+str(self.destination)+" failed", RNS.LOG_DEBUG) if self.link != None: self.link.teardown() - else: - self.state = LXMPeer.IDLE + + self.state = LXMPeer.IDLE def offer_response(self, request_receipt): try: @@ -172,44 +288,48 @@ class LXMPeer: if response == LXMPeer.ERROR_NO_IDENTITY: if self.link != None: - RNS.log("Remote peer indicated that no identification was received, retrying...", RNS.LOG_DEBUG) - self.link.indentify() + RNS.log("Remote peer indicated that no identification was received, retrying...", RNS.LOG_VERBOSE) + self.link.identify() self.state = LXMPeer.LINK_READY self.sync() + return + + elif response == LXMPeer.ERROR_NO_ACCESS: + RNS.log("Remote indicated that access was denied, breaking peering", RNS.LOG_VERBOSE) + self.router.unpeer(self.destination_hash) + return elif response == False: # Peer already has all advertised messages - for transient_id in self.unhandled_messages: - message_entry = self.unhandled_messages[transient_id] - self.handled_messages[transient_id] = message_entry - - self.unhandled_messages = {} + for transient_id in self.last_offer: + if transient_id in self.unhandled_messages: + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) + elif response == True: # Peer wants all advertised messages - for transient_id in self.unhandled_messages: - wanted_messages.append(self.unhandled_messages[transient_id]) + for transient_id in self.last_offer: + wanted_messages.append(self.router.propagation_entries[transient_id]) wanted_message_ids.append(transient_id) else: # Peer wants some advertised messages - peer_had_messages = [] - for transient_id in self.unhandled_messages.copy(): + for transient_id in self.last_offer.copy(): # If the peer did not want the message, it has # already received it from another peer. if not transient_id in response: - message_entry = self.unhandled_messages.pop(transient_id) - self.handled_messages[transient_id] = message_entry + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) for transient_id in response: - wanted_messages.append(self.unhandled_messages[transient_id]) + wanted_messages.append(self.router.propagation_entries[transient_id]) wanted_message_ids.append(transient_id) if len(wanted_messages) > 0: - RNS.log("Peer wanted "+str(len(wanted_messages))+" of the available messages", RNS.LOG_DEBUG) + RNS.log("Peer wanted "+str(len(wanted_messages))+" of the available messages", RNS.LOG_VERBOSE) lxm_list = [] - for message_entry in wanted_messages: file_path = message_entry[1] if os.path.isfile(file_path): @@ -221,9 +341,16 @@ class LXMPeer: data = msgpack.packb([time.time(), lxm_list]) resource = RNS.Resource(data, self.link, callback = self.resource_concluded) resource.transferred_messages = wanted_message_ids + resource.sync_transfer_started = time.time() self.state = LXMPeer.RESOURCE_TRANSFERRING + else: - RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_DEBUG) + RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_VERBOSE) + self.offered += len(self.last_offer) + if self.link != None: + self.link.teardown() + + self.link = None self.state = LXMPeer.IDLE except Exception as e: @@ -236,25 +363,37 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - def resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: for transient_id in resource.transferred_messages: - message = self.unhandled_messages.pop(transient_id) - self.handled_messages[transient_id] = message - self.state = LXMPeer.IDLE - self.link.teardown() - RNS.log("Sync to peer "+RNS.prettyhexrep(self.destination_hash)+" completed", RNS.LOG_DEBUG) - self.alive = True - self.last_heard = time.time() - else: - RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_DEBUG) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) + if self.link != None: self.link.teardown() - else: - self.state = LXMPeer.IDLE + self.link = None + self.state = LXMPeer.IDLE + rate_str = "" + if hasattr(resource, "sync_transfer_started") and resource.sync_transfer_started: + self.sync_transfer_rate = (resource.get_transfer_size()*8)/(time.time()-resource.sync_transfer_started) + rate_str = f" at {RNS.prettyspeed(self.sync_transfer_rate)}" + + RNS.log(f"Syncing {len(resource.transferred_messages)} messages to peer {RNS.prettyhexrep(self.destination_hash)} completed{rate_str}", RNS.LOG_VERBOSE) + self.alive = True + self.last_heard = time.time() + self.offered += len(self.last_offer) + self.outgoing += len(resource.transferred_messages) + self.tx_bytes += resource.get_data_size() + + else: + RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_VERBOSE) + if self.link != None: + self.link.teardown() + + self.link = None + self.state = LXMPeer.IDLE def link_established(self, link): self.link.identify(self.router.identity) @@ -270,11 +409,103 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - def handle_message(self, transient_id): - if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - # TODO: Remove at some point - RNS.log("The message "+RNS.prettyhexrep(transient_id)+" was added to distribution queue for "+RNS.prettyhexrep(self.destination_hash), RNS.LOG_EXTREME) - self.unhandled_messages[transient_id] = self.router.propagation_entries[transient_id] + def queued_items(self): + return len(self.handled_messages_queue) > 0 or len(self.unhandled_messages_queue) > 0 + + def queue_unhandled_message(self, transient_id): + self.unhandled_messages_queue.append(transient_id) + + def queue_handled_message(self, transient_id): + self.handled_messages_queue.append(transient_id) + + def process_queues(self): + if len(self.unhandled_messages_queue) > 0 or len(self.handled_messages_queue) > 0: + # TODO: Remove debug + # st = time.time(); lu = len(self.unhandled_messages_queue); lh = len(self.handled_messages_queue) + + handled_messages = self.handled_messages + unhandled_messages = self.unhandled_messages + + while len(self.handled_messages_queue) > 0: + transient_id = self.handled_messages_queue.pop() + if not transient_id in handled_messages: + self.add_handled_message(transient_id) + if transient_id in unhandled_messages: + self.remove_unhandled_message(transient_id) + + while len(self.unhandled_messages_queue) > 0: + transient_id = self.unhandled_messages_queue.pop() + if not transient_id in handled_messages and not transient_id in unhandled_messages: + self.add_unhandled_message(transient_id) + + del handled_messages, unhandled_messages + # TODO: Remove debug + # RNS.log(f"{self} processed {lh}/{lu} in {RNS.prettytime(time.time()-st)}") + + @property + def handled_messages(self): + pes = self.router.propagation_entries.copy() + hm = list(filter(lambda tid: self.destination_hash in pes[tid][4], pes)) + self._hm_count = len(hm); del pes + self._hm_counts_synced = True + return hm + + @property + def unhandled_messages(self): + pes = self.router.propagation_entries.copy() + um = list(filter(lambda tid: self.destination_hash in pes[tid][5], pes)) + self._um_count = len(um); del pes + self._um_counts_synced = True + return um + + @property + def handled_message_count(self): + if not self._hm_counts_synced: + self._update_counts() + + return self._hm_count + + @property + def unhandled_message_count(self): + if not self._um_counts_synced: + self._update_counts() + + return self._um_count + + @property + def acceptance_rate(self): + return 0 if self.offered == 0 else (self.outgoing/self.offered) + + def _update_counts(self): + if not self._hm_counts_synced: + hm = self.handled_messages; del hm + + if not self._um_counts_synced: + um = self.unhandled_messages; del um + + def add_handled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if not self.destination_hash in self.router.propagation_entries[transient_id][4]: + self.router.propagation_entries[transient_id][4].append(self.destination_hash) + self._hm_counts_synced = False + + def add_unhandled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if not self.destination_hash in self.router.propagation_entries[transient_id][5]: + self.router.propagation_entries[transient_id][5].append(self.destination_hash) + self._um_count += 1 + + def remove_handled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if self.destination_hash in self.router.propagation_entries[transient_id][4]: + self.router.propagation_entries[transient_id][4].remove(self.destination_hash) + self._hm_counts_synced = False + + def remove_unhandled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if self.destination_hash in self.router.propagation_entries[transient_id][5]: + self.router.propagation_entries[transient_id][5].remove(self.destination_hash) + self._um_counts_synced = False def __str__(self): if self.destination_hash: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index fb7f3b0..d502ee6 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1,88 +1,122 @@ import os +import sys import time +import math import random import base64 import atexit +import signal import threading +from collections import deque + import RNS import RNS.vendor.umsgpack as msgpack from .LXMF import APP_NAME +from .LXMF import FIELD_TICKET from .LXMPeer import LXMPeer from .LXMessage import LXMessage from .Handlers import LXMFDeliveryAnnounceHandler from .Handlers import LXMFPropagationAnnounceHandler +import LXMF.LXStamper as LXStamper + class LXMRouter: MAX_DELIVERY_ATTEMPTS = 5 PROCESSING_INTERVAL = 4 - DELIVERY_RETRY_WAIT = 7 - PATH_REQUEST_WAIT = 5 + DELIVERY_RETRY_WAIT = 10 + PATH_REQUEST_WAIT = 7 + MAX_PATHLESS_TRIES = 1 LINK_MAX_INACTIVITY = 10*60 + P_LINK_MAX_INACTIVITY = 3*60 MESSAGE_EXPIRY = 30*24*60*60 + STAMP_COST_EXPIRY = 45*24*60*60 NODE_ANNOUNCE_DELAY = 20 + MAX_PEERS = 50 AUTOPEER = True AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 + ROTATION_HEADROOM_PCT = 10 + ROTATION_AR_MAX = 0.5 - PR_PATH_TIMEOUT = 10 + PROPAGATION_LIMIT = 256 + DELIVERY_LIMIT = 1000 - PR_IDLE = 0x00 - PR_PATH_REQUESTED = 0x01 - PR_LINK_ESTABLISHING = 0x02 - PR_LINK_ESTABLISHED = 0x03 - PR_REQUEST_SENT = 0x04 - PR_RECEIVING = 0x05 - PR_RESPONSE_RECEIVED = 0x06 - PR_COMPLETE = 0x07 - PR_NO_PATH = 0xf0 - PR_LINK_FAILED = 0xf1 - PR_TRANSFER_FAILED = 0xf2 - PR_NO_IDENTITY_RCVD = 0xf3 - PR_NO_ACCESS = 0xf4 - PR_FAILED = 0xfe + PR_PATH_TIMEOUT = 10 - PR_ALL_MESSAGES = 0x00 + PR_IDLE = 0x00 + PR_PATH_REQUESTED = 0x01 + PR_LINK_ESTABLISHING = 0x02 + PR_LINK_ESTABLISHED = 0x03 + PR_REQUEST_SENT = 0x04 + PR_RECEIVING = 0x05 + PR_RESPONSE_RECEIVED = 0x06 + PR_COMPLETE = 0x07 + PR_NO_PATH = 0xf0 + PR_LINK_FAILED = 0xf1 + PR_TRANSFER_FAILED = 0xf2 + PR_NO_IDENTITY_RCVD = 0xf3 + PR_NO_ACCESS = 0xf4 + PR_FAILED = 0xfe + + PR_ALL_MESSAGES = 0x00 + + STATS_GET_PATH = "/pn/get/stats" ### Developer-facing API ############################## ####################################################### - def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None): + def __init__(self, identity=None, storagepath=None, autopeer=AUTOPEER, autopeer_maxdepth=None, + propagation_limit=PROPAGATION_LIMIT, delivery_limit=DELIVERY_LIMIT, enforce_ratchets=False, + enforce_stamps=False, static_peers = [], max_peers=None, from_static_only=False): + random.seed(os.urandom(10)) self.pending_inbound = [] self.pending_outbound = [] self.failed_outbound = [] self.direct_links = {} + self.backchannel_links = {} self.delivery_destinations = {} self.prioritised_list = [] self.ignored_list = [] self.allowed_list = [] self.auth_required = False + self.retain_synced_on_node = False self.processing_outbound = False self.processing_inbound = False self.processing_count = 0 self.propagation_node = False + self.propagation_node_start_time = None if storagepath == None: raise ValueError("LXMF cannot be initialised without a storage path") else: self.storagepath = storagepath+"/lxmf" + self.ratchetpath = self.storagepath+"/ratchets" self.outbound_propagation_node = None self.outbound_propagation_link = None + if delivery_limit == None: + delivery_limit = LXMRouter.DELIVERY_LIMIT + self.message_storage_limit = None self.information_storage_limit = None + self.propagation_per_transfer_limit = propagation_limit + self.delivery_per_transfer_limit = delivery_limit + self.enforce_ratchets = enforce_ratchets + self._enforce_stamps = enforce_stamps + self.pending_deferred_stamps = {} self.wants_download_on_path_available_from = None self.wants_download_on_path_available_to = None @@ -90,15 +124,28 @@ class LXMRouter: self.propagation_transfer_progress = 0.0 self.propagation_transfer_last_result = None self.propagation_transfer_max_messages = None + self.prioritise_rotating_unreachable_peers = False + self.active_propagation_links = [] self.locally_delivered_transient_ids = {} self.locally_processed_transient_ids = {} + self.outbound_stamp_costs = {} + self.available_tickets = {"outbound": {}, "inbound": {}, "last_deliveries": {}} + + self.cost_file_lock = threading.Lock() + self.ticket_file_lock = threading.Lock() + self.stamp_gen_lock = threading.Lock() + self.exit_handler_running = False if identity == None: identity = RNS.Identity() self.identity = identity - self.lxmf_query_destination = RNS.Destination(None, RNS.Destination.IN, RNS.Destination.PLAIN, APP_NAME, "query") self.propagation_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation") + self.control_destination = None + self.client_propagation_messages_received = 0 + self.client_propagation_messages_served = 0 + self.unpeered_propagation_incoming = 0 + self.unpeered_propagation_rx_bytes = 0 if autopeer != None: self.autopeer = autopeer @@ -110,9 +157,32 @@ class LXMRouter: else: self.autopeer_maxdepth = LXMRouter.AUTOPEER_MAXDEPTH + if max_peers == None: + self.max_peers = LXMRouter.MAX_PEERS + else: + if type(max_peers) == int and max_peers >= 0: + self.max_peers = max_peers + else: + raise ValueError(f"Invalid value for max_peers: {max_peers}") + + self.from_static_only = from_static_only + if type(static_peers) != list: + raise ValueError(f"Invalid type supplied for static peer list: {type(static_peers)}") + else: + for static_peer in static_peers: + if type(static_peer) != bytes: + raise ValueError(f"Invalid static peer destination hash: {static_peer}") + else: + if len(static_peer) != RNS.Reticulum.TRUNCATED_HASHLENGTH//8: + raise ValueError(f"Invalid static peer destination hash: {static_peer}") + + self.static_peers = static_peers + self.peers = {} self.propagation_entries = {} + self.peer_distribution_queue = deque() + RNS.Transport.register_announce_handler(LXMFDeliveryAnnounceHandler(self)) RNS.Transport.register_announce_handler(LXMFPropagationAnnounceHandler(self)) @@ -122,69 +192,196 @@ class LXMRouter: if os.path.isfile(self.storagepath+"/local_deliveries"): locally_delivered_file = open(self.storagepath+"/local_deliveries", "rb") data = locally_delivered_file.read() - self.locally_delivered_transient_ids = msgpack.unpackb(data) locally_delivered_file.close() - - if os.path.isfile(self.storagepath+"/locally_processed"): - locally_processed_file = open(self.storagepath+"/locally_processed", "rb") - data = locally_processed_file.read() - self.locally_processed_transient_ids = msgpack.unpackb(data) - locally_processed_file.close() - - self.clean_transient_id_caches() + self.locally_delivered_transient_ids = msgpack.unpackb(data) + if not type(self.locally_delivered_transient_ids) == dict: + RNS.log("Invalid data format for loaded locally delivered transient IDs, recreating...", RNS.LOG_ERROR) + self.locally_delivered_transient_ids = {} except Exception as e: RNS.log("Could not load locally delivered message ID cache from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + self.locally_delivered_transient_ids = {} + + try: + if os.path.isfile(self.storagepath+"/locally_processed"): + locally_processed_file = open(self.storagepath+"/locally_processed", "rb") + data = locally_processed_file.read() + locally_processed_file.close() + self.locally_processed_transient_ids = msgpack.unpackb(data) + if not type(self.locally_processed_transient_ids) == dict: + RNS.log("Invalid data format for loaded locally processed transient IDs, recreating...", RNS.LOG_ERROR) + self.locally_processed_transient_ids = {} + + + except Exception as e: + RNS.log("Could not load locally processed message ID cache from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + self.locally_processed_transient_ids = {} + + try: + self.clean_transient_id_caches() + + except Exception as e: + RNS.log("Could not clean transient ID caches. The contained exception was : "+str(e), RNS.LOG_ERROR) + self.locally_delivered_transient_ids = {} + self.locally_processed_transient_ids = {} + + try: + if os.path.isfile(self.storagepath+"/outbound_stamp_costs"): + with self.cost_file_lock: + with open(self.storagepath+"/outbound_stamp_costs", "rb") as outbound_stamp_cost_file: + data = outbound_stamp_cost_file.read() + self.outbound_stamp_costs = msgpack.unpackb(data) + if not type(self.outbound_stamp_costs) == dict: + RNS.log("Invalid data format for loaded outbound stamp costs, recreating...", RNS.LOG_ERROR) + self.outbound_stamp_costs = {} + + self.clean_outbound_stamp_costs() + self.save_outbound_stamp_costs() + + except Exception as e: + RNS.log("Could not load outbound stamp costs from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + + try: + if os.path.isfile(self.storagepath+"/available_tickets"): + with self.ticket_file_lock: + with open(self.storagepath+"/available_tickets", "rb") as available_tickets_file: + data = available_tickets_file.read() + self.available_tickets = msgpack.unpackb(data) + if not type(self.available_tickets) == dict: + RNS.log("Invalid data format for loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets = {"outbound": {}, "inbound": {}, "last_deliveries": {}} + if not "outbound" in self.available_tickets: + RNS.log("Missing outbound entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["outbound"] = {} + if not "inbound" in self.available_tickets: + RNS.log("Missing inbound entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["inbound"] = {} + if not "last_deliveries" in self.available_tickets: + RNS.log("Missing local_deliveries entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["last_deliveries"] = {} + + self.clean_available_tickets() + self.save_available_tickets() + + except Exception as e: + RNS.log("Could not load outbound stamp costs from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) atexit.register(self.exit_handler) + signal.signal(signal.SIGINT, self.sigint_handler) + signal.signal(signal.SIGTERM, self.sigterm_handler) job_thread = threading.Thread(target=self.jobloop) job_thread.setDaemon(True) job_thread.start() - def announce(self, destination_hash): + def announce(self, destination_hash, attached_interface=None): if destination_hash in self.delivery_destinations: - delivery_destination = self.delivery_destinations[destination_hash] - delivery_destination.announce(delivery_destination.display_name.encode("utf-8")) + self.delivery_destinations[destination_hash].announce(app_data=self.get_announce_app_data(destination_hash), attached_interface=attached_interface) def announce_propagation_node(self): def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) - data = msgpack.packb([self.propagation_node, int(time.time())]) + node_state = self.propagation_node and not self.from_static_only + announce_data = [ + node_state, # Boolean flag signalling propagation node state + int(time.time()), # Current node timebase + self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes + None, # How many more inbound peers this node wants + ] + + data = msgpack.packb(announce_data) self.propagation_destination.announce(app_data=data) da_thread = threading.Thread(target=delayed_announce) da_thread.setDaemon(True) da_thread.start() - def register_delivery_identity(self, identity, display_name = None): + def register_delivery_identity(self, identity, display_name = None, stamp_cost = None): + if len(self.delivery_destinations) != 0: + RNS.log("Currently only one delivery identity is supported per LXMF router instance", RNS.LOG_ERROR) + return None + + if not os.path.isdir(self.ratchetpath): + os.makedirs(self.ratchetpath) + delivery_destination = RNS.Destination(identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "delivery") + delivery_destination.enable_ratchets(f"{self.ratchetpath}/{RNS.hexrep(delivery_destination.hash, delimit=False)}.ratchets") delivery_destination.set_packet_callback(self.delivery_packet) delivery_destination.set_link_established_callback(self.delivery_link_established) delivery_destination.display_name = display_name + if self.enforce_ratchets: + delivery_destination.enforce_ratchets() + if display_name != None: - delivery_destination.set_default_app_data(display_name.encode("utf-8")) + def get_app_data(): + return self.get_announce_app_data(delivery_destination.hash) + delivery_destination.set_default_app_data(get_app_data) self.delivery_destinations[delivery_destination.hash] = delivery_destination + self.set_inbound_stamp_cost(delivery_destination.hash, stamp_cost) + return delivery_destination def register_delivery_callback(self, callback): self.__delivery_callback = callback + def set_inbound_stamp_cost(self, destination_hash, stamp_cost): + if destination_hash in self.delivery_destinations: + delivery_destination = self.delivery_destinations[destination_hash] + if stamp_cost == None: + delivery_destination.stamp_cost = None + return True + elif type(stamp_cost) == int: + if stamp_cost < 1: + delivery_destination.stamp_cost = None + elif stamp_cost < 255: + delivery_destination.stamp_cost = stamp_cost + else: + return False + + return True + + return False + + def get_outbound_stamp_cost(self, destination_hash): + if destination_hash in self.outbound_stamp_costs: + stamp_cost = self.outbound_stamp_costs[destination_hash][1] + return stamp_cost + else: + return None + + def set_active_propagation_node(self, destination_hash): + self.set_outbound_propagation_node(destination_hash) + # self.set_inbound_propagation_node(destination_hash) + def set_outbound_propagation_node(self, destination_hash): if len(destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH//8 or type(destination_hash) != bytes: raise ValueError("Invalid destination hash for outbound propagation node") else: - self.outbound_propagation_node = destination_hash - if self.outbound_propagation_link != None: - if self.outbound_propagation_link.destination.hash != destination_hash: - self.outbound_propagation_link.teardown() - self.outbound_propagation_link = None + if self.outbound_propagation_node != destination_hash: + self.outbound_propagation_node = destination_hash + if self.outbound_propagation_link != None: + if self.outbound_propagation_link.destination.hash != destination_hash: + self.outbound_propagation_link.teardown() + self.outbound_propagation_link = None def get_outbound_propagation_node(self): return self.outbound_propagation_node + def set_inbound_propagation_node(self, destination_hash): + # TODO: Implement + raise NotImplementedError("Inbound/outbound propagation node differentiation is currently not implemented") + + def get_inbound_propagation_node(self): + return self.get_outbound_propagation_node() + + def set_retain_node_lxms(self, retain): + if retain == True: + self.retain_synced_on_node = True + else: + self.retain_synced_on_node = False + def set_authentication(self, required=None): if required != None: self.auth_required = required @@ -229,10 +426,11 @@ class LXMRouter: if self.outbound_propagation_node != None: if self.outbound_propagation_link != None and self.outbound_propagation_link.status == RNS.Link.ACTIVE: self.propagation_transfer_state = LXMRouter.PR_LINK_ESTABLISHED + RNS.log("Requesting message list from propagation node", RNS.LOG_DEBUG) self.outbound_propagation_link.identify(identity) self.outbound_propagation_link.request( LXMPeer.MESSAGE_GET_PATH, - [None, None], + [None, None], # Set both want and have fields to None to get message list response_callback=self.message_list_response, failed_callback=self.message_get_failed ) @@ -258,7 +456,6 @@ class LXMRouter: self.propagation_transfer_state = LXMRouter.PR_PATH_REQUESTED self.request_messages_path_job() else: - # TODO: Remove at some point RNS.log("Waiting for propagation node link to become active", RNS.LOG_EXTREME) else: RNS.log("Cannot request LXMF propagation node sync, no default propagation node configured", RNS.LOG_WARNING) @@ -281,6 +478,8 @@ class LXMRouter: os.makedirs(self.messagepath) self.propagation_entries = {} + + st = time.time(); RNS.log("Indexing messagestore...", RNS.LOG_NOTICE) for filename in os.listdir(self.messagepath): components = filename.split("_") if len(components) == 2: @@ -297,38 +496,94 @@ class LXMRouter: file.close() self.propagation_entries[transient_id] = [ - destination_hash, - filepath, - received, - msg_size, + destination_hash, # 0: Destination hash + filepath, # 1: Storage location + received, # 2: Receive timestamp + msg_size, # 3: Message size + [], # 4: Handled peers + [], # 5: Unhandled peers ] except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) + et = time.time(); mps = 0 if et-st == 0 else math.floor(len(self.propagation_entries)/(et-st)) + RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {mps} msgs/s", RNS.LOG_NOTICE) + RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) + st = time.time(); + if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") peers_data = peers_file.read() + peers_file.close() if len(peers_data) > 0: serialised_peers = msgpack.unpackb(peers_data) + del peers_data - for serialised_peer in serialised_peers: + while len(serialised_peers) > 0: + serialised_peer = serialised_peers.pop() peer = LXMPeer.from_bytes(serialised_peer, self) + del serialised_peer + if peer.destination_hash in self.static_peers and peer.last_heard == 0: + # TODO: Allow path request responses through announce handler + # momentarily here, so peering config can be updated even if + # the static peer is not available to directly send an announce. + RNS.Transport.request_path(peer.destination_hash) if peer.identity != None: self.peers[peer.destination_hash] = peer - RNS.log("Loaded peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(len(peer.unhandled_messages))+" unhandled messages", RNS.LOG_DEBUG) + lim_str = ", no transfer limit" + if peer.propagation_transfer_limit != None: + lim_str = ", "+RNS.prettysize(peer.propagation_transfer_limit*1000)+" transfer limit" + RNS.log("Rebuilt peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(peer.unhandled_message_count)+" unhandled messages"+lim_str, RNS.LOG_DEBUG) else: RNS.log("Peer "+RNS.prettyhexrep(peer.destination_hash)+" could not be loaded, because its identity could not be recalled. Dropping peer.", RNS.LOG_DEBUG) + del peer + del serialised_peers + + if len(self.static_peers) > 0: + for static_peer in self.static_peers: + if not static_peer in self.peers: + RNS.log(f"Activating static peering with {RNS.prettyhexrep(static_peer)}", RNS.LOG_NOTICE) + self.peers[static_peer] = LXMPeer(self, static_peer) + if self.peers[static_peer].last_heard == 0: + # TODO: Allow path request responses through announce handler + # momentarily here, so peering config can be updated even if + # the static peer is not available to directly send an announce. + RNS.Transport.request_path(static_peer) + + RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + + try: + if os.path.isfile(self.storagepath+"/node_stats"): + node_stats_file = open(self.storagepath+"/node_stats", "rb") + data = node_stats_file.read() + node_stats_file.close() + node_stats = msgpack.unpackb(data) + + if not type(node_stats) == dict: + RNS.log("Invalid data format for loaded local node stats, node stats will be reset", RNS.LOG_ERROR) + else: + self.client_propagation_messages_received = node_stats["client_propagation_messages_received"] + self.client_propagation_messages_served = node_stats["client_propagation_messages_served"] + self.unpeered_propagation_incoming = node_stats["unpeered_propagation_incoming"] + self.unpeered_propagation_rx_bytes = node_stats["unpeered_propagation_rx_bytes"] + + except Exception as e: + RNS.log("Could not load local node stats. The contained exception was: "+str(e), RNS.LOG_ERROR) self.propagation_node = True + self.propagation_node_start_time = time.time() self.propagation_destination.set_link_established_callback(self.propagation_link_established) self.propagation_destination.set_packet_callback(self.propagation_packet) self.propagation_destination.register_request_handler(LXMPeer.OFFER_REQUEST_PATH, self.offer_request, allow = RNS.Destination.ALLOW_ALL) self.propagation_destination.register_request_handler(LXMPeer.MESSAGE_GET_PATH, self.message_get_request, allow = RNS.Destination.ALLOW_ALL) + self.control_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + self.control_destination.register_request_handler(LXMRouter.STATS_GET_PATH, self.stats_get_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=[self.identity.hash]) + if self.message_storage_limit != None: limit_str = ", limit is "+RNS.prettysize(self.message_storage_limit) else: @@ -347,6 +602,12 @@ class LXMRouter: self.propagation_node = False self.announce_propagation_node() + def enforce_stamps(self): + self._enforce_stamps = True + + def ignore_stamps(self): + self._enforce_stamps = False + def ignore_destination(self, destination_hash): if not destination_hash in self.ignored_list: self.ignored_list.append(destination_hash) @@ -418,45 +679,150 @@ class LXMRouter: def information_storage_size(self): pass + def delivery_link_available(self, destination_hash): + if destination_hash in self.direct_links or destination_hash in self.backchannel_links: + return True + else: + return False + + + ### Propagation Node Control ########################## + ####################################################### + + def compile_stats(self): + if not self.propagation_node: + return None + else: + peer_stats = {} + for peer_id in self.peers.copy(): + peer = self.peers[peer_id] + peer_stats[peer_id] = { + "type": "static" if peer_id in self.static_peers else "discovered", + "state": peer.state, + "alive": peer.alive, + "last_heard": int(peer.last_heard), + "next_sync_attempt": peer.next_sync_attempt, + "last_sync_attempt": peer.last_sync_attempt, + "sync_backoff": peer.sync_backoff, + "peering_timebase": peer.peering_timebase, + "ler": int(peer.link_establishment_rate), + "str": int(peer.sync_transfer_rate), + "transfer_limit": peer.propagation_transfer_limit, + "network_distance": RNS.Transport.hops_to(peer_id), + "rx_bytes": peer.rx_bytes, + "tx_bytes": peer.tx_bytes, + "messages": { + "offered": peer.offered, + "outgoing": peer.outgoing, + "incoming": peer.incoming, + "unhandled": peer.unhandled_message_count + }, + } + + node_stats = { + "identity_hash": self.identity.hash, + "destination_hash": self.propagation_destination.hash, + "uptime": time.time()-self.propagation_node_start_time, + "delivery_limit": self.delivery_per_transfer_limit, + "propagation_limit": self.propagation_per_transfer_limit, + "autopeer_maxdepth": self.autopeer_maxdepth, + "from_static_only": self.from_static_only, + "messagestore": { + "count": len(self.propagation_entries), + "bytes": self.message_storage_size(), + "limit": self.message_storage_limit, + }, + "clients" : { + "client_propagation_messages_received": self.client_propagation_messages_received, + "client_propagation_messages_served": self.client_propagation_messages_served, + }, + "unpeered_propagation_incoming": self.unpeered_propagation_incoming, + "unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes, + "static_peers": len(self.static_peers), + "discovered_peers": len(self.peers)-len(self.static_peers), + "total_peers": len(self.peers), + "max_peers": self.max_peers, + "peers": peer_stats, + } + + return node_stats + + def stats_get_request(self, path, data, request_id, remote_identity, requested_at): + if remote_identity == None: + return LXMPeer.ERROR_NO_IDENTITY + elif remote_identity.hash != self.identity.hash: + return LXMPeer.ERROR_NO_ACCESS + else: + return self.compile_stats() + ### Utility & Maintenance ############################# ####################################################### JOB_OUTBOUND_INTERVAL = 1 + JOB_STAMPS_INTERVAL = 1 JOB_LINKS_INTERVAL = 1 JOB_TRANSIENT_INTERVAL = 60 JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 + JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL + JOB_ROTATE_INTERVAL = 56*JOB_PEERINGEST_INTERVAL def jobs(self): - self.processing_count += 1 + if not self.exit_handler_running: + self.processing_count += 1 - if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0: - self.process_outbound() + if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0: + self.process_outbound() - if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0: - self.clean_links() + if self.processing_count % LXMRouter.JOB_STAMPS_INTERVAL == 0: + threading.Thread(target=self.process_deferred_stamps, daemon=True).start() - if self.processing_count % LXMRouter.JOB_TRANSIENT_INTERVAL == 0: - self.clean_transient_id_caches() + if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0: + self.clean_links() - if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: - self.clean_message_store() + if self.processing_count % LXMRouter.JOB_TRANSIENT_INTERVAL == 0: + self.clean_transient_id_caches() - if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: - self.sync_peers() + if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: + if self.propagation_node == True: + self.clean_message_store() + + if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: + if self.propagation_node == True: + self.flush_queues() + + if self.processing_count % LXMRouter.JOB_ROTATE_INTERVAL == 0: + if self.propagation_node == True: + self.rotate_peers() + + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: + if self.propagation_node == True: + self.sync_peers() def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual # triggers can delay next run - try: self.jobs() except Exception as e: RNS.log("An error ocurred while running LXMF Router jobs.", RNS.LOG_ERROR) RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR) + RNS.trace_exception(e) time.sleep(LXMRouter.PROCESSING_INTERVAL) + def flush_queues(self): + if len(self.peers) > 0: + self.flush_peer_distribution_queue() + RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + if peer.queued_items(): + peer.process_queues() + + RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) + def clean_links(self): closed_links = [] for link_hash in self.direct_links: @@ -471,6 +837,19 @@ class LXMRouter: cleaned_link = self.direct_links.pop(link_hash) RNS.log("Cleaned link "+str(cleaned_link), RNS.LOG_DEBUG) + try: + inactive_links = [] + for link in self.active_propagation_links: + if link.no_data_for() > LXMRouter.P_LINK_MAX_INACTIVITY: + inactive_links.append(link) + + for link in inactive_links: + self.active_propagation_links.remove(link) + link.teardown() + + except Exception as e: + RNS.log("An error occurred while cleaning inbound propagation links. The contained exception was: "+str(e), RNS.LOG_ERROR) + if self.outbound_propagation_link != None and self.outbound_propagation_link.status == RNS.Link.CLOSED: self.outbound_propagation_link = None if self.propagation_transfer_state == LXMRouter.PR_COMPLETE: @@ -490,7 +869,7 @@ class LXMRouter: removed_entries = [] for transient_id in self.locally_delivered_transient_ids: timestamp = self.locally_delivered_transient_ids[transient_id] - if now > timestamp+LXMRouter.MESSAGE_EXPIRY*1.25: + if now > timestamp+LXMRouter.MESSAGE_EXPIRY*6.0: removed_entries.append(transient_id) for transient_id in removed_entries: @@ -500,18 +879,133 @@ class LXMRouter: removed_entries = [] for transient_id in self.locally_processed_transient_ids: timestamp = self.locally_processed_transient_ids[transient_id] - if now > timestamp+LXMRouter.MESSAGE_EXPIRY*1.25: + if now > timestamp+LXMRouter.MESSAGE_EXPIRY*6.0: removed_entries.append(transient_id) for transient_id in removed_entries: self.locally_processed_transient_ids.pop(transient_id) RNS.log("Cleaned "+RNS.prettyhexrep(transient_id)+" from locally processed cache", RNS.LOG_DEBUG) + def update_stamp_cost(self, destination_hash, stamp_cost): + RNS.log(f"Updating outbound stamp cost for {RNS.prettyhexrep(destination_hash)} to {stamp_cost}", RNS.LOG_DEBUG) + self.outbound_stamp_costs[destination_hash] = [time.time(), stamp_cost] + + def job(): + self.save_outbound_stamp_costs() + threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() + + def get_wanted_inbound_peers(self): + # TODO: Implement/rethink. + # Probably not necessary anymore. + return None + + def get_announce_app_data(self, destination_hash): + if destination_hash in self.delivery_destinations: + delivery_destination = self.delivery_destinations[destination_hash] + + display_name = None + if delivery_destination.display_name != None: + display_name = delivery_destination.display_name.encode("utf-8") + + stamp_cost = None + if delivery_destination.stamp_cost != None and type(delivery_destination.stamp_cost) == int: + if delivery_destination.stamp_cost > 0 and delivery_destination.stamp_cost < 255: + stamp_cost = delivery_destination.stamp_cost + + peer_data = [display_name, stamp_cost] + + return msgpack.packb(peer_data) + + def get_weight(self, transient_id): + dst_hash = self.propagation_entries[transient_id][0] + lxm_rcvd = self.propagation_entries[transient_id][2] + lxm_size = self.propagation_entries[transient_id][3] + + now = time.time() + age_weight = max(1, (now - lxm_rcvd)/60/60/24/4) + + if dst_hash in self.prioritised_list: + priority_weight = 0.1 + else: + priority_weight = 1.0 + + weight = priority_weight * age_weight * lxm_size + + return weight + + def generate_ticket(self, destination_hash, expiry=LXMessage.TICKET_EXPIRY): + now = time.time() + ticket = None + if destination_hash in self.available_tickets["last_deliveries"]: + last_delivery = self.available_tickets["last_deliveries"][destination_hash] + elapsed = now - last_delivery + if elapsed < LXMessage.TICKET_INTERVAL: + RNS.log(f"A ticket for {RNS.prettyhexrep(destination_hash)} was already delivered {RNS.prettytime(elapsed)} ago, not including another ticket yet", RNS.LOG_DEBUG) + return None + + if destination_hash in self.available_tickets["inbound"]: + for ticket in self.available_tickets["inbound"][destination_hash]: + ticket_entry = self.available_tickets["inbound"][destination_hash][ticket] + expires = ticket_entry[0]; validity_left = expires - now + if validity_left > LXMessage.TICKET_RENEW: + RNS.log(f"Found generated ticket for {RNS.prettyhexrep(destination_hash)} with {RNS.prettytime(validity_left)} of validity left, re-using this one", RNS.LOG_DEBUG) + return [expires, ticket] + + else: + self.available_tickets["inbound"][destination_hash] = {} + + RNS.log(f"No generated tickets for {RNS.prettyhexrep(destination_hash)} with enough validity found, generating a new one", RNS.LOG_DEBUG) + expires = now+expiry + ticket = os.urandom(LXMessage.TICKET_LENGTH) + self.available_tickets["inbound"][destination_hash][ticket] = [expires] + self.save_available_tickets() + + return [expires, ticket] + + def remember_ticket(self, destination_hash, ticket_entry): + expires = ticket_entry[0]-time.time() + RNS.log(f"Remembering ticket for {RNS.prettyhexrep(destination_hash)}, expires in {RNS.prettytime(expires)}", RNS.LOG_DEBUG) + self.available_tickets["outbound"][destination_hash] = [ticket_entry[0], ticket_entry[1]] + + def get_outbound_ticket(self, destination_hash): + if destination_hash in self.available_tickets["outbound"]: + entry = self.available_tickets["outbound"][destination_hash] + if entry[0] > time.time(): + return entry[1] + + return None + + def get_outbound_ticket_expiry(self, destination_hash): + if destination_hash in self.available_tickets["outbound"]: + entry = self.available_tickets["outbound"][destination_hash] + if entry[0] > time.time(): + return entry[0] + + return None + + def get_inbound_tickets(self, destination_hash): + now = time.time() + available_tickets = [] + if destination_hash in self.available_tickets["inbound"]: + for inbound_ticket in self.available_tickets["inbound"][destination_hash]: + if now < self.available_tickets["inbound"][destination_hash][inbound_ticket][0]: + available_tickets.append(inbound_ticket) + + if len(available_tickets) == 0: + return None + else: + return available_tickets + + def get_size(self, transient_id): + lxm_size = self.propagation_entries[transient_id][3] + return lxm_size + def clean_message_store(self): + RNS.log("Cleaning message store", RNS.LOG_VERBOSE) # Check and remove expired messages now = time.time() removed_entries = {} - for transient_id in self.propagation_entries: + for transient_id in self.propagation_entries.copy(): entry = self.propagation_entries[transient_id] filepath = entry[1] components = filepath.split("_") @@ -519,7 +1013,7 @@ class LXMRouter: if len(components) == 2 and float(components[1]) > 0 and len(os.path.split(components[0])[1]) == (RNS.Identity.HASHLENGTH//8)*2: timestamp = float(components[1]) if now > timestamp+LXMRouter.MESSAGE_EXPIRY: - RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_DEBUG) + RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_EXTREME) removed_entries[transient_id] = filepath else: RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to invalid file path", RNS.LOG_WARNING) @@ -537,7 +1031,7 @@ class LXMRouter: RNS.log("Could not remove "+RNS.prettyhexrep(transient_id)+" from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) if removed_count > 0: - RNS.log("Cleaned "+str(removed_count)+" entries from the message store", RNS.LOG_DEBUG) + RNS.log("Cleaned "+str(removed_count)+" entries from the message store", RNS.LOG_VERBOSE) # Check size of message store and cull if needed try: @@ -548,22 +1042,13 @@ class LXMRouter: bytes_needed = message_storage_size - self.message_storage_limit bytes_cleaned = 0 - now = time.time() weighted_entries = [] - for transient_id in self.propagation_entries: - entry = self.propagation_entries[transient_id] - - dst_hash = entry[0] - lxm_rcvd = entry[2] - lxm_size = entry[3] - age_weight = max(1, (now - lxm_rcvd)/60/60/24/4) - if dst_hash in self.prioritised_list: - priority_weight = 0.1 - else: - priority_weight = 1.0 - - weight = priority_weight * age_weight * lxm_size - weighted_entries.append([entry, weight, transient_id]) + for transient_id in self.propagation_entries.copy(): + weighted_entries.append([ + self.propagation_entries[transient_id], + self.get_weight(transient_id), + transient_id + ]) weighted_entries.sort(key=lambda we: we[1], reverse=True) @@ -589,8 +1074,7 @@ class LXMRouter: finally: i += 1 - RNS.log("LXMF message store size is now "+RNS.prettysize(self.message_storage_size()), RNS.LOG_EXTREME) - RNS.log("PE len "+str(len(self.propagation_entries))) + RNS.log("LXMF message store size is now "+RNS.prettysize(self.message_storage_size())+" for "+str(len(self.propagation_entries))+" items", RNS.LOG_EXTREME) except Exception as e: @@ -598,33 +1082,178 @@ class LXMRouter: def save_locally_delivered_transient_ids(self): try: - if not os.path.isdir(self.storagepath): + if len(self.locally_delivered_transient_ids) > 0: + if not os.path.isdir(self.storagepath): os.makedirs(self.storagepath) - locally_delivered_file = open(self.storagepath+"/local_deliveries", "wb") - locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) - locally_delivered_file.close() + with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: + locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) except Exception as e: RNS.log("Could not save locally delivered message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) def save_locally_processed_transient_ids(self): try: - if not os.path.isdir(self.storagepath): + if len(self.locally_processed_transient_ids) > 0: + if not os.path.isdir(self.storagepath): os.makedirs(self.storagepath) - locally_processed_file = open(self.storagepath+"/locally_processed", "wb") - locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) - locally_processed_file.close() + with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: + locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) except Exception as e: - RNS.log("Could not save locally processed message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + RNS.log("Could not save locally processed transient ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + + def save_node_stats(self): + try: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) + + with open(self.storagepath+"/node_stats", "wb") as stats_file: + node_stats = { + "client_propagation_messages_received": self.client_propagation_messages_received, + "client_propagation_messages_served": self.client_propagation_messages_served, + "unpeered_propagation_incoming": self.unpeered_propagation_incoming, + "unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes, + } + stats_file.write(msgpack.packb(node_stats)) + + except Exception as e: + RNS.log("Could not save local node stats to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + + + def clean_outbound_stamp_costs(self): + try: + expired = [] + for destination_hash in self.outbound_stamp_costs: + entry = self.outbound_stamp_costs[destination_hash] + if time.time() > entry[0] + LXMRouter.STAMP_COST_EXPIRY: + expired.append(destination_hash) + + for destination_hash in expired: + self.outbound_stamp_costs.pop(destination_hash) + + except Exception as e: + RNS.log(f"Error while cleaning outbound stamp costs. The contained exception was: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + def save_outbound_stamp_costs(self): + with self.cost_file_lock: + try: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) + + outbound_stamp_costs_file = open(self.storagepath+"/outbound_stamp_costs", "wb") + outbound_stamp_costs_file.write(msgpack.packb(self.outbound_stamp_costs)) + outbound_stamp_costs_file.close() + + except Exception as e: + RNS.log("Could not save outbound stamp costs to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + + def clean_available_tickets(self): + try: + # Clean outbound tickets + expired_outbound = [] + for destination_hash in self.available_tickets["outbound"]: + entry = self.available_tickets["outbound"][destination_hash] + if time.time() > entry[0]: + expired_outbound.append(destination_hash) + + for destination_hash in expired_outbound: + self.available_tickets["outbound"].pop(destination_hash) + + # Clean inbound tickets + for destination_hash in self.available_tickets["inbound"]: + expired_inbound = [] + for inbound_ticket in self.available_tickets["inbound"][destination_hash]: + entry = self.available_tickets["inbound"][destination_hash][inbound_ticket] + ticket_expiry = entry[0] + if time.time() > ticket_expiry+LXMessage.TICKET_GRACE: + expired_inbound.append(inbound_ticket) + + for inbound_ticket in expired_inbound: + self.available_tickets["inbound"][destination_hash].pop(inbound_ticket) + + except Exception as e: + RNS.log(f"Error while cleaning available tickets. The contained exception was: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + def save_available_tickets(self): + with self.ticket_file_lock: + try: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) + + available_tickets_file = open(self.storagepath+"/available_tickets", "wb") + available_tickets_file.write(msgpack.packb(self.available_tickets)) + available_tickets_file.close() + + except Exception as e: + RNS.log("Could not save available tickets to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + + def reload_available_tickets(self): + RNS.log("Reloading available tickets from storage", RNS.LOG_DEBUG) + try: + with self.ticket_file_lock: + with open(self.storagepath+"/available_tickets", "rb") as available_tickets_file: + data = available_tickets_file.read() + self.available_tickets = msgpack.unpackb(data) + if not type(self.available_tickets) == dict: + RNS.log("Invalid data format for loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets = {"outbound": {}, "inbound": {}, "last_deliveries": {}} + if not "outbound" in self.available_tickets: + RNS.log("Missing outbound entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["outbound"] = {} + if not "inbound" in self.available_tickets: + RNS.log("Missing inbound entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["inbound"] = {} + if not "last_deliveries" in self.available_tickets: + RNS.log("Missing local_deliveries entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["last_deliveries"] = {} + + except Exception as e: + RNS.log(f"An error occurred while reloading available tickets from storage: {e}", RNS.LOG_ERROR) def exit_handler(self): + if self.exit_handler_running: + return + + self.exit_handler_running = True + + RNS.log("Tearing down delivery destinations...", RNS.LOG_NOTICE) + for destination_hash in self.delivery_destinations: + delivery_destination = self.delivery_destinations[destination_hash] + delivery_destination.set_packet_callback(None) + delivery_destination.set_link_established_callback(None) + for link in delivery_destination.links: + try: + if link.status == RNS.Link.ACTIVE: + link.teardown() + except Exception as e: + RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) + + if self.propagation_node: + RNS.log("Tearing down propagation node destination...", RNS.LOG_NOTICE) + self.propagation_destination.set_link_established_callback(None) + self.propagation_destination.set_packet_callback(None) + self.propagation_destination.deregister_request_handler(LXMPeer.OFFER_REQUEST_PATH) + self.propagation_destination.deregister_request_handler(LXMPeer.MESSAGE_GET_PATH) + self.propagation_destination.deregister_request_handler(LXMRouter.STATS_GET_PATH) + for link in self.active_propagation_links: + try: + if link.status == RNS.Link.ACTIVE: + link.teardown() + except Exception as e: + RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) + + RNS.log("Persisting LXMF state data to storage...", RNS.LOG_NOTICE) + self.flush_queues() if self.propagation_node: try: + st = time.time(); RNS.log(f"Saving {len(self.peers)} peer synchronisation states to storage...", RNS.LOG_NOTICE) serialised_peers = [] - for peer_id in self.peers: + peer_dict = self.peers.copy() + for peer_id in peer_dict: peer = self.peers[peer_id] serialised_peers.append(peer.to_bytes()) @@ -632,13 +1261,28 @@ class LXMRouter: peers_file.write(msgpack.packb(serialised_peers)) peers_file.close() - RNS.log("Saved "+str(len(serialised_peers))+" peers to storage", RNS.LOG_DEBUG) + RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettyshorttime(time.time()-st)}", RNS.LOG_NOTICE) except Exception as e: RNS.log("Could not save propagation node peers to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) self.save_locally_delivered_transient_ids() self.save_locally_processed_transient_ids() + self.save_node_stats() + + def sigint_handler(self, signal, frame): + if not self.exit_handler_running: + RNS.log("Received SIGINT, shutting down now!", RNS.LOG_WARNING) + sys.exit(0) + else: + RNS.log("Received SIGINT, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) + + def sigterm_handler(self, signal, frame): + if not self.exit_handler_running: + RNS.log("Received SIGTERM, shutting down now!", RNS.LOG_WARNING) + sys.exit(0) + else: + RNS.log("Received SIGTERM, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) def __str__(self): return "" @@ -653,7 +1297,8 @@ class LXMRouter: job_thread.start() def __request_messages_path_job(self): - while not RNS.Transport.has_path(self.wants_download_on_path_available_from) and time.time() < self.wants_download_on_path_available_timeout: + path_timeout = self.wants_download_on_path_available_timeout + while not RNS.Transport.has_path(self.wants_download_on_path_available_from) and time.time() < path_timeout: time.sleep(0.1) if RNS.Transport.has_path(self.wants_download_on_path_available_from): @@ -720,6 +1365,16 @@ class LXMRouter: # Process wanted messages response_messages = [] if data[0] != None and len(data[0]) > 0: + client_transfer_limit = None + if len(data) >= 3: + try: + client_transfer_limit = float(data[2])*1000 + RNS.log("Client indicates transfer limit of "+RNS.prettysize(client_transfer_limit), RNS.LOG_DEBUG) + except: + pass + + per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now + cumulative_size = 24 # Initialised to highest reasonable binary structure overhead for transient_id in data[0]: if transient_id in self.propagation_entries and self.propagation_entries[transient_id][0] == remote_destination.hash: try: @@ -728,12 +1383,21 @@ class LXMRouter: message_file = open(filepath, "rb") lxmf_data = message_file.read() - response_messages.append(lxmf_data) message_file.close() + lxm_size = len(lxmf_data) + next_size = cumulative_size + (lxm_size+per_message_overhead) + + if client_transfer_limit != None and next_size > client_transfer_limit: + pass + else: + response_messages.append(lxmf_data) + cumulative_size += (lxm_size+per_message_overhead) + except Exception as e: RNS.log("Error while processing message download request from "+RNS.prettyhexrep(remote_destination.hash)+". The contained exception was: "+str(e), RNS.LOG_ERROR) + self.client_propagation_messages_served += len(response_messages) return response_messages @@ -761,23 +1425,31 @@ class LXMRouter: if len(request_receipt.response) > 0: for transient_id in request_receipt.response: if self.has_message(transient_id): - haves.append(transient_id) + if not self.retain_synced_on_node: + haves.append(transient_id) else: if self.propagation_transfer_max_messages == LXMRouter.PR_ALL_MESSAGES or len(wants) < self.propagation_transfer_max_messages: wants.append(transient_id) + ms = "" if len(wants) == 1 else "s" + RNS.log(f"Requesting {len(wants)} message{ms} from propagation node", RNS.LOG_DEBUG) request_receipt.link.request( LXMPeer.MESSAGE_GET_PATH, - [wants, haves], + [wants, haves, self.delivery_per_transfer_limit], response_callback=self.message_get_response, failed_callback=self.message_get_failed, - progress_callback=self.message_get_progress - ) + progress_callback=self.message_get_progress) + else: self.propagation_transfer_state = LXMRouter.PR_COMPLETE self.propagation_transfer_progress = 1.0 self.propagation_transfer_last_result = 0 + else: + RNS.log("Invalid message list data received from propagation node", RNS.LOG_DEBUG) + if self.outbound_propagation_link != None: + self.outbound_propagation_link.teardown() + def message_get_response(self, request_receipt): if request_receipt.response == LXMPeer.ERROR_NO_IDENTITY: RNS.log("Propagation node indicated missing identification on get request, tearing down link.", RNS.LOG_DEBUG) @@ -840,33 +1512,162 @@ class LXMRouter: return True else: return False + + def cancel_outbound(self, message_id): + try: + if message_id in self.pending_deferred_stamps: + lxm = self.pending_deferred_stamps[message_id] + RNS.log(f"Cancelling deferred stamp generation for {lxm}", RNS.LOG_DEBUG) + lxm.state = LXMessage.CANCELLED + LXStamper.cancel_work(message_id) + + lxmessage = None + for lxm in self.pending_outbound: + if lxm.message_id == message_id: + lxmessage = lxm + + if lxmessage != None: + lxmessage.state = LXMessage.CANCELLED + if lxmessage in self.pending_outbound: + RNS.log(f"Cancelling {lxmessage} in outbound queue", RNS.LOG_DEBUG) + if lxmessage.representation == LXMessage.RESOURCE: + if lxmessage.resource_representation != None: + lxmessage.resource_representation.cancel() + + self.process_outbound() + + except Exception as e: + RNS.log(f"An error occurred while cancelling {lxmessage}: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + def handle_outbound(self, lxmessage): + destination_hash = lxmessage.get_destination().hash + + if lxmessage.stamp_cost == None: + if destination_hash in self.outbound_stamp_costs: + stamp_cost = self.outbound_stamp_costs[destination_hash][1] + lxmessage.stamp_cost = stamp_cost + RNS.log(f"No stamp cost set on LXM to {RNS.prettyhexrep(destination_hash)}, autoconfigured to {stamp_cost}, as required by latest announce", RNS.LOG_DEBUG) + + lxmessage.state = LXMessage.OUTBOUND + + # If an outbound ticket is available for this + # destination, attach it to the message. + lxmessage.outbound_ticket = self.get_outbound_ticket(destination_hash) + if lxmessage.outbound_ticket != None and lxmessage.defer_stamp: + RNS.log(f"Deferred stamp generation was requested for {lxmessage}, but outbound ticket was applied, processing immediately", RNS.LOG_DEBUG) + lxmessage.defer_stamp = False + + # If requested, include a ticket to allow the + # destination to reply without generating a stamp. + if lxmessage.include_ticket: + ticket = self.generate_ticket(lxmessage.destination_hash) + if ticket: + lxmessage.fields[FIELD_TICKET] = ticket + + if not lxmessage.packed: + lxmessage.pack() + + unknown_path_requested = False + if not RNS.Transport.has_path(destination_hash) and lxmessage.method == LXMessage.OPPORTUNISTIC: + RNS.log(f"Pre-emptively requesting unknown path for opportunistic {lxmessage}", RNS.LOG_DEBUG) + RNS.Transport.request_path(destination_hash) + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + unknown_path_requested = True + + lxmessage.determine_transport_encryption() + + if lxmessage.defer_stamp and lxmessage.stamp_cost == None: + RNS.log(f"Deferred stamp generation was requested for {lxmessage}, but no stamp is required, processing immediately", RNS.LOG_DEBUG) + lxmessage.defer_stamp = False + + if not lxmessage.defer_stamp: + while not unknown_path_requested and self.processing_outbound: + time.sleep(0.05) + + self.pending_outbound.append(lxmessage) + if not unknown_path_requested: + self.process_outbound() + + else: + self.pending_deferred_stamps[lxmessage.message_id] = lxmessage + + def get_outbound_progress(self, lxm_hash): + for lxm in self.pending_outbound: + if lxm.hash == lxm_hash: + return lxm.progress + + for lxm_id in self.pending_deferred_stamps: + if self.pending_deferred_stamps[lxm_id].hash == lxm_hash: + return self.pending_deferred_stamps[lxm_id].progress + + return None + + def get_outbound_lxm_stamp_cost(self, lxm_hash): + for lxm in self.pending_outbound: + if lxm.hash == lxm_hash: + return lxm.stamp_cost + + for lxm_id in self.pending_deferred_stamps: + if self.pending_deferred_stamps[lxm_id].hash == lxm_hash: + return self.pending_deferred_stamps[lxm_id].stamp_cost + + return None ### Message Routing & Delivery ######################## ####################################################### - - def handle_outbound(self, lxmessage): - lxmessage.state = LXMessage.OUTBOUND - if not lxmessage.packed: - lxmessage.pack() - lxmessage.determine_transport_encryption() - - while self.processing_outbound: - time.sleep(0.1) - - self.pending_outbound.append(lxmessage) - self.process_outbound() - - def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None): + def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None, no_stamp_enforcement=False, allow_duplicate=False): try: message = LXMessage.unpack_from_bytes(lxmf_data) + if ratchet_id and not message.ratchet_id: + message.ratchet_id = ratchet_id + + if method: + message.method = method + + if message.signature_validated and FIELD_TICKET in message.fields: + ticket_entry = message.fields[FIELD_TICKET] + if type(ticket_entry) == list and len(ticket_entry) > 1: + expires = ticket_entry[0] + ticket = ticket_entry[1] + + if time.time() < expires: + if type(ticket) == bytes and len(ticket) == LXMessage.TICKET_LENGTH: + self.remember_ticket(message.source_hash, ticket_entry) + def save_job(): + self.save_available_tickets() + threading.Thread(target=save_job, daemon=True).start() + + required_stamp_cost = self.delivery_destinations[message.destination_hash].stamp_cost + if required_stamp_cost != None: + destination_tickets = self.get_inbound_tickets(message.source_hash) + if message.validate_stamp(required_stamp_cost, tickets=destination_tickets): + message.stamp_valid = True + message.stamp_checked = True + else: + message.stamp_valid = False + message.stamp_checked = True + + if not message.stamp_valid: + if no_stamp_enforcement: + RNS.log(f"Received {message} with invalid stamp, but allowing anyway, since stamp enforcement was temporarily disabled", RNS.LOG_NOTICE) + else: + if self._enforce_stamps: + RNS.log(f"Dropping {message} with invalid stamp", RNS.LOG_NOTICE) + return False + else: + RNS.log(f"Received {message} with invalid stamp, but allowing anyway, since stamp enforcement is disabled", RNS.LOG_NOTICE) + else: + RNS.log(f"Received {message} with valid stamp", RNS.LOG_DEBUG) if phy_stats != None: if "rssi" in phy_stats: message.rssi = phy_stats["rssi"] if "snr" in phy_stats: message.snr = phy_stats["snr"] if "q" in phy_stats: message.q = phy_stats["q"] + # TODO: Update these descriptions to account for ratchets if destination_type == RNS.Destination.SINGLE: message.transport_encrypted = True message.transport_encryption = LXMessage.ENCRYPTION_DESCRIPTION_EC @@ -884,11 +1685,18 @@ class LXMRouter: RNS.log(str(self)+" ignored message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) return False + if not allow_duplicate and self.has_message(message.hash): + RNS.log(str(self)+" ignored already received message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) + return False + else: + self.locally_delivered_transient_ids[message.hash] = time.time() + if self.__delivery_callback != None and callable(self.__delivery_callback): try: self.__delivery_callback(message) except Exception as e: RNS.log("An error occurred in the external delivery callback for "+str(message), RNS.LOG_ERROR) + RNS.trace_exception(e) return True @@ -898,12 +1706,16 @@ class LXMRouter: return False def delivery_packet(self, data, packet): + packet.prove() try: + method = None if packet.destination_type != RNS.Destination.LINK: + method = LXMessage.OPPORTUNISTIC lxmf_data = b"" lxmf_data += packet.destination.hash lxmf_data += data else: + method = LXMessage.DIRECT lxmf_data = data try: @@ -915,8 +1727,8 @@ class LXMRouter: RNS.log("Error while retrieving physical link stats for LXMF delivery packet: "+str(e), RNS.LOG_ERROR) phy_stats = {"rssi": packet.rssi, "snr": packet.snr, "q": packet.q} - if self.lxmf_delivery(lxmf_data, packet.destination_type, phy_stats=phy_stats): - packet.prove() + + self.lxmf_delivery(lxmf_data, packet.destination_type, phy_stats=phy_stats, ratchet_id=packet.ratchet_id, method=method) except Exception as e: RNS.log("Exception occurred while parsing incoming LXMF data.", RNS.LOG_ERROR) @@ -925,9 +1737,11 @@ class LXMRouter: def delivery_link_established(self, link): link.track_phy_stats(True) link.set_packet_callback(self.delivery_packet) - link.set_resource_strategy(RNS.Link.ACCEPT_ALL) + link.set_resource_strategy(RNS.Link.ACCEPT_APP) + link.set_resource_callback(self.delivery_resource_advertised) link.set_resource_started_callback(self.resource_transfer_began) link.set_resource_concluded_callback(self.delivery_resource_concluded) + link.set_remote_identified_callback(self.delivery_remote_identified) def delivery_link_closed(self, link): pass @@ -935,33 +1749,56 @@ class LXMRouter: def resource_transfer_began(self, resource): RNS.log("Transfer began for LXMF delivery resource "+str(resource), RNS.LOG_DEBUG) + def delivery_resource_advertised(self, resource): + size = resource.get_data_size() + limit = self.delivery_per_transfer_limit*1000 + if limit != None and size > limit: + RNS.log("Rejecting "+RNS.prettysize(size)+" incoming LXMF delivery resource, since it exceeds the limit of "+RNS.prettysize(limit), RNS.LOG_DEBUG) + return False + else: + return True + def delivery_resource_concluded(self, resource): RNS.log("Transfer concluded for LXMF delivery resource "+str(resource), RNS.LOG_DEBUG) if resource.status == RNS.Resource.COMPLETE: + ratchet_id = None + # Set ratchet ID to link ID if available + if resource.link and hasattr(resource.link, "link_id"): + ratchet_id = resource.link.link_id phy_stats = {"rssi": resource.link.rssi, "snr": resource.link.snr, "q": resource.link.q} - self.lxmf_delivery(resource.data.read(), resource.link.type, phy_stats=phy_stats) + self.lxmf_delivery(resource.data.read(), resource.link.type, phy_stats=phy_stats, ratchet_id=ratchet_id, method=LXMessage.DIRECT) + + def delivery_remote_identified(self, link, identity): + destination_hash = RNS.Destination.hash_from_name_and_identity("lxmf.delivery", identity) + self.backchannel_links[destination_hash] = link + RNS.log(f"Backchannel became available for {RNS.prettyhexrep(destination_hash)} on delivery link {link}", RNS.LOG_DEBUG) ### Peer Sync & Propagation ########################### ####################################################### - def peer(self, destination_hash, timestamp): + def peer(self, destination_hash, timestamp, propagation_transfer_limit, wanted_inbound_peers = None): if destination_hash in self.peers: peer = self.peers[destination_hash] if timestamp > peer.peering_timebase: peer.alive = True peer.sync_backoff = 0 peer.next_sync_attempt = 0 - - peer.peering_timebase = timestamp - peer.last_heard = time.time() + peer.peering_timebase = timestamp + peer.last_heard = time.time() + peer.propagation_transfer_limit = propagation_transfer_limit + RNS.log(f"Peering config updated for {RNS.prettyhexrep(destination_hash)}", RNS.LOG_VERBOSE) else: - peer = LXMPeer(self, destination_hash) - peer.alive = True - peer.last_heard = time.time() - self.peers[destination_hash] = peer - RNS.log("Peered with "+str(peer.destination)) + if len(self.peers) < self.max_peers: + peer = LXMPeer(self, destination_hash) + peer.alive = True + peer.last_heard = time.time() + peer.propagation_transfer_limit = propagation_transfer_limit + self.peers[destination_hash] = peer + RNS.log(f"Peered with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_NOTICE) + else: + RNS.log(f"Max peers reached, not peering with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_DEBUG) def unpeer(self, destination_hash, timestamp = None): if timestamp == None: @@ -974,14 +1811,92 @@ class LXMRouter: self.peers.pop(destination_hash) RNS.log("Broke peering with "+str(peer.destination)) + def rotate_peers(self): + try: + rotation_headroom = max(1, math.floor(self.max_peers*(LXMRouter.ROTATION_HEADROOM_PCT/100.0))) + required_drops = len(self.peers) - (self.max_peers - rotation_headroom) + if required_drops > 0 and len(self.peers) - required_drops > 1: + peers = self.peers.copy() + untested_peers = [] + for peer_id in self.peers: + peer = self.peers[peer_id] + if peer.last_sync_attempt == 0: + untested_peers.append(peer) + + if len(untested_peers) >= rotation_headroom: + RNS.log("Newly added peer threshold reached, postponing peer rotation", RNS.LOG_DEBUG) + return + + fully_synced_peers = {} + for peer_id in peers: + peer = peers[peer_id] + if peer.unhandled_message_count == 0: + fully_synced_peers[peer_id] = peer + + if len(fully_synced_peers) > 0: + peers = fully_synced_peers + ms = "" if len(fully_synced_peers) == 1 else "s" + RNS.log(f"Found {len(fully_synced_peers)} fully synced peer{ms}, using as peer rotation pool basis", RNS.LOG_DEBUG) + + culled_peers = [] + waiting_peers = [] + unresponsive_peers = [] + for peer_id in peers: + peer = peers[peer_id] + if not peer_id in self.static_peers and peer.state == LXMPeer.IDLE: + if peer.alive: + if peer.offered == 0: + # Don't consider for unpeering until at + # least one message has been offered + pass + else: + waiting_peers.append(peer) + else: + unresponsive_peers.append(peer) + + drop_pool = [] + if len(unresponsive_peers) > 0: + drop_pool.extend(unresponsive_peers) + if not self.prioritise_rotating_unreachable_peers: + drop_pool.extend(waiting_peers) + + else: + drop_pool.extend(waiting_peers) + + if len(drop_pool) > 0: + drop_count = min(required_drops, len(drop_pool)) + low_acceptance_rate_peers = sorted( + drop_pool, + key=lambda p: ( 0 if p.offered == 0 else (p.outgoing/p.offered) ), + reverse=False + )[0:drop_count] + + dropped_peers = 0 + for peer in low_acceptance_rate_peers: + ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) + if ar < LXMRouter.ROTATION_AR_MAX*100: + reachable_str = "reachable" if peer.alive else "unreachable" + RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) + self.unpeer(peer.destination_hash) + dropped_peers += 1 + + ms = "" if dropped_peers == 1 else "s" + RNS.log(f"Dropped {dropped_peers} low acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) + + except Exception as e: + RNS.log(f"An error occurred during peer rotation: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + def sync_peers(self): culled_peers = [] waiting_peers = [] unresponsive_peers = [] - for peer_id in self.peers: - peer = self.peers[peer_id] + peers = self.peers.copy() + for peer_id in peers: + peer = peers[peer_id] if time.time() > peer.last_heard + LXMPeer.MAX_UNREACHABLE: - culled_peers.append(peer_id) + if not peer_id in self.static_peers: + culled_peers.append(peer_id) else: if peer.state == LXMPeer.IDLE and len(peer.unhandled_messages) > 0: if peer.alive: @@ -997,12 +1912,12 @@ class LXMRouter: if len(waiting_peers) > 0: fastest_peers = sorted( waiting_peers, - key=lambda p: p.link_establishment_rate, + key=lambda p: p.sync_transfer_rate, reverse=True )[0:min(LXMRouter.FASTEST_N_RANDOM_POOL, len(waiting_peers))] peer_pool.extend(fastest_peers) - unknown_speed_peers = [p for p in waiting_peers if p.link_establishment_rate == 0] + unknown_speed_peers = [p for p in waiting_peers if p.sync_transfer_rate == 0] if len(unknown_speed_peers) > 0: peer_pool.extend( unknown_speed_peers[ @@ -1034,9 +1949,33 @@ class LXMRouter: def propagation_link_established(self, link): link.set_packet_callback(self.propagation_packet) - link.set_resource_strategy(RNS.Link.ACCEPT_ALL) + link.set_resource_strategy(RNS.Link.ACCEPT_APP) + link.set_resource_callback(self.propagation_resource_advertised) link.set_resource_started_callback(self.resource_transfer_began) link.set_resource_concluded_callback(self.propagation_resource_concluded) + self.active_propagation_links.append(link) + + def propagation_resource_advertised(self, resource): + if self.from_static_only: + remote_identity = resource.link.get_remote_identity() + if remote_identity == None: + RNS.log(f"Rejecting propagation resource from unidentified peer", RNS.LOG_DEBUG) + return False + else: + remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) + if not remote_hash in self.static_peers: + RNS.log(f"Rejecting propagation resource from {remote_str} not in static peers list", RNS.LOG_DEBUG) + return False + + size = resource.get_data_size() + limit = self.propagation_per_transfer_limit*1000 + if limit != None and size > limit: + RNS.log(f"Rejecting {RNS.prettysize(size)} incoming propagation resource, since it exceeds the limit of {RNS.prettysize(limit)}", RNS.LOG_DEBUG) + return False + else: + return True def propagation_packet(self, data, packet): try: @@ -1049,6 +1988,7 @@ class LXMRouter: messages = data[1] for lxmf_data in messages: self.lxmf_propagation(lxmf_data) + self.client_propagation_messages_received += 1 packet.prove() @@ -1056,10 +1996,18 @@ class LXMRouter: RNS.log("Exception occurred while parsing incoming LXMF propagation data.", RNS.LOG_ERROR) RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR) - def offer_request(self, path, data, request_id, remote_identity, requested_at): + def offer_request(self, path, data, request_id, link_id, remote_identity, requested_at): if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY else: + if self.from_static_only: + remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) + if not remote_hash in self.static_peers: + RNS.log(f"Rejecting propagation request from {remote_str} not in static peers list", RNS.LOG_DEBUG) + return LXMPeer.ERROR_NO_ACCESS + try: transient_ids = data wanted_ids = [] @@ -1082,10 +2030,9 @@ class LXMRouter: return None def propagation_resource_concluded(self, resource): - RNS.log("Transfer concluded for incoming propagation resource "+str(resource), RNS.LOG_DEBUG) if resource.status == RNS.Resource.COMPLETE: # TODO: The peer this was received from should - # have the transient id added to it's list of + # have the transient id added to its list of # already handled messages. try: data = msgpack.unpackb(resource.data.read()) @@ -1094,38 +2041,83 @@ class LXMRouter: # This is a series of propagation messages from a peer or originator remote_timebase = data[0] remote_hash = None + remote_str = "unknown peer" remote_identity = resource.link.get_remote_identity() if remote_identity != None: remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) if not remote_hash in self.peers: if self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: - self.peer(remote_hash, remote_timebase) + # TODO: Query cache for an announce and get propagation + # transfer limit from that. For now, initialise it to a + # sane default value, and wait for an announce to arrive + # that will update the peering config to the actual limit. + propagation_transfer_limit = LXMRouter.PROPAGATION_LIMIT//4 + wanted_inbound_peers = None + self.peer(remote_hash, remote_timebase, propagation_transfer_limit, wanted_inbound_peers) + else: + remote_str = f"peer {remote_str}" messages = data[1] + ms = "" if len(messages) == 1 else "s" + RNS.log(f"Received {len(messages)} message{ms} from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: + peer = None + transient_id = RNS.Identity.full_hash(lxmf_data) if remote_hash != None and remote_hash in self.peers: - transient_id = RNS.Identity.full_hash(lxmf_data) peer = self.peers[remote_hash] - peer.handled_messages[transient_id] = [transient_id, remote_timebase, lxmf_data] + peer.incoming += 1 + peer.rx_bytes += len(lxmf_data) + else: + if remote_identity != None: + self.unpeered_propagation_incoming += 1 + self.unpeered_propagation_rx_bytes += len(lxmf_data) + else: + self.client_propagation_messages_received += 1 + + self.lxmf_propagation(lxmf_data, from_peer=peer) + if peer != None: + peer.queue_handled_message(transient_id) - self.lxmf_propagation(lxmf_data) else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) except Exception as e: RNS.log("Error while unpacking received propagation resource", RNS.LOG_DEBUG) + RNS.trace_exception(e) + + def enqueue_peer_distribution(self, transient_id, from_peer): + self.peer_distribution_queue.append([transient_id, from_peer]) + + def flush_peer_distribution_queue(self): + if len(self.peer_distribution_queue) > 0: + entries = [] + while len(self.peer_distribution_queue) > 0: + entries.append(self.peer_distribution_queue.pop()) + + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + for entry in entries: + transient_id = entry[0] + from_peer = entry[1] + if peer != from_peer: + peer.queue_unhandled_message(transient_id) + + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False, is_paper_message=False, from_peer=None): + no_stamp_enforcement = False + if is_paper_message: + no_stamp_enforcement = True - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None): try: if len(lxmf_data) >= LXMessage.LXMF_OVERHEAD: transient_id = RNS.Identity.full_hash(lxmf_data) - if not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids: + if (not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids) or allow_duplicate == True: received = time.time() - propagation_entry = [transient_id, received, lxmf_data] destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] self.locally_processed_transient_ids[transient_id] = received @@ -1134,12 +2126,13 @@ class LXMRouter: delivery_destination = self.delivery_destinations[destination_hash] encrypted_lxmf_data = lxmf_data[LXMessage.DESTINATION_LENGTH:] decrypted_lxmf_data = delivery_destination.decrypt(encrypted_lxmf_data) - delivery_data = lxmf_data[:LXMessage.DESTINATION_LENGTH]+decrypted_lxmf_data - self.lxmf_delivery(delivery_data, delivery_destination.type) - self.locally_delivered_transient_ids[transient_id] = time.time() + if decrypted_lxmf_data != None: + delivery_data = lxmf_data[:LXMessage.DESTINATION_LENGTH]+decrypted_lxmf_data + self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED, no_stamp_enforcement=no_stamp_enforcement, allow_duplicate=allow_duplicate) + self.locally_delivered_transient_ids[transient_id] = time.time() - if signal_local_delivery != None: - return signal_local_delivery + if signal_local_delivery != None: + return signal_local_delivery else: if self.propagation_node: @@ -1148,12 +2141,9 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data)] - - RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) - for peer_id in self.peers: - peer = self.peers[peer_id] - peer.handle_message(transient_id) + RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_EXTREME) + self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] + self.enqueue_peer_distribution(transient_id, from_peer) else: # TODO: Add message to sneakernet queues when implemented @@ -1173,9 +2163,10 @@ class LXMRouter: except Exception as e: RNS.log("Could not assemble propagated LXMF message from received data", RNS.LOG_DEBUG) RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG) + RNS.trace_exception(e) return False - def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None): + def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False): try: if not uri.lower().startswith(LXMessage.URI_SCHEMA+"://"): RNS.log("Cannot ingest LXM, invalid URI provided.", RNS.LOG_ERROR) @@ -1185,7 +2176,7 @@ class LXMRouter: lxmf_data = base64.urlsafe_b64decode(uri.replace(LXMessage.URI_SCHEMA+"://", "").replace("/", "")+"==") transient_id = RNS.Identity.full_hash(lxmf_data) - router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate) + router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate, allow_duplicate=allow_duplicate, is_paper_message=True) if router_propagation_result != False: RNS.log("LXM with transient ID "+RNS.prettyhexrep(transient_id)+" was ingested.", RNS.LOG_DEBUG) return router_propagation_result @@ -1200,13 +2191,67 @@ class LXMRouter: def fail_message(self, lxmessage): RNS.log(str(lxmessage)+" failed to send", RNS.LOG_DEBUG) - self.pending_outbound.remove(lxmessage) + if lxmessage in self.pending_outbound: + self.pending_outbound.remove(lxmessage) + self.failed_outbound.append(lxmessage) - lxmessage.state = LXMessage.FAILED + if lxmessage.state != LXMessage.REJECTED: + lxmessage.state = LXMessage.FAILED + if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): lxmessage.failed_callback(lxmessage) + def process_deferred_stamps(self): + if len(self.pending_deferred_stamps) > 0: + + if self.stamp_gen_lock.locked(): + return + + else: + with self.stamp_gen_lock: + selected_lxm = None + selected_message_id = None + for message_id in self.pending_deferred_stamps: + lxmessage = self.pending_deferred_stamps[message_id] + if selected_lxm == None: + selected_lxm = lxmessage + selected_message_id = message_id + + if selected_lxm != None: + if selected_lxm.state == LXMessage.CANCELLED: + RNS.log(f"Message cancelled during deferred stamp generation for {selected_lxm}.", RNS.LOG_DEBUG) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + if selected_lxm.failed_callback != None and callable(selected_lxm.failed_callback): + selected_lxm.failed_callback(lxmessage) + + return + + RNS.log(f"Starting stamp generation for {selected_lxm}...", RNS.LOG_DEBUG) + generated_stamp = selected_lxm.get_stamp() + if generated_stamp: + selected_lxm.stamp = generated_stamp + selected_lxm.defer_stamp = False + selected_lxm.packed = None + selected_lxm.pack() + self.pending_deferred_stamps.pop(selected_message_id) + self.pending_outbound.append(selected_lxm) + RNS.log(f"Stamp generation completed for {selected_lxm}", RNS.LOG_DEBUG) + else: + if selected_lxm.state == LXMessage.CANCELLED: + RNS.log(f"Message cancelled during deferred stamp generation for {selected_lxm}.", RNS.LOG_DEBUG) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + if selected_lxm.failed_callback != None and callable(selected_lxm.failed_callback): + selected_lxm.failed_callback(lxmessage) + else: + RNS.log(f"Deferred stamp generation did not succeed. Failing {selected_lxm}.", RNS.LOG_ERROR) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + self.fail_message(selected_lxm) + + def process_outbound(self, sender = None): if self.processing_outbound: return @@ -1215,19 +2260,75 @@ class LXMRouter: if lxmessage.state == LXMessage.DELIVERED: RNS.log("Delivery has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) - elif lxmessage.state == LXMessage.SENT: + + # Udate ticket delivery stats + if lxmessage.include_ticket and FIELD_TICKET in lxmessage.fields: + RNS.log(f"Updating latest ticket delivery for {RNS.prettyhexrep(lxmessage.destination_hash)}", RNS.LOG_DEBUG) + self.available_tickets["last_deliveries"][lxmessage.destination_hash] = time.time() + self.save_available_tickets() + + # Prepare link for backchannel communications + delivery_destination_hash = lxmessage.get_destination().hash + if lxmessage.method == LXMessage.DIRECT and delivery_destination_hash in self.direct_links: + direct_link = self.direct_links[delivery_destination_hash] + if not hasattr(direct_link, "backchannel_identified") or direct_link.backchannel_identified == False: + if direct_link.initiator == True: + source_destination_hash = lxmessage.get_source().hash + if source_destination_hash in self.delivery_destinations: + backchannel_identity = self.delivery_destinations[source_destination_hash].identity + backchannel_desthash = RNS.Destination.hash_from_name_and_identity("lxmf.delivery", backchannel_identity) + direct_link.identify(backchannel_identity) + direct_link.backchannel_identified = True + self.delivery_link_established(direct_link) + RNS.log(f"Performed backchannel identification as {RNS.prettyhexrep(backchannel_desthash)} on {direct_link}", RNS.LOG_DEBUG) + + elif lxmessage.method == LXMessage.PROPAGATED and lxmessage.state == LXMessage.SENT: RNS.log("Propagation has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) + + elif lxmessage.state == LXMessage.CANCELLED: + RNS.log("Cancellation requested for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) + self.pending_outbound.remove(lxmessage) + if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): + lxmessage.failed_callback(lxmessage) + + elif lxmessage.state == LXMessage.REJECTED: + RNS.log("Receiver rejected "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) + self.pending_outbound.remove(lxmessage) + if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): + lxmessage.failed_callback(lxmessage) + else: - RNS.log("Starting outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + RNS.log("Outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + + if lxmessage.progress == None or lxmessage.progress < 0.01: + lxmessage.progress = 0.01 + # Outbound handling for opportunistic messages - if lxmessage.method == LXMessage.OPPORTUNISTIC: + if lxmessage.method == LXMessage.OPPORTUNISTIC: if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: - if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: + if lxmessage.delivery_attempts >= LXMRouter.MAX_PATHLESS_TRIES and not RNS.Transport.has_path(lxmessage.get_destination().hash): + RNS.log(f"Requesting path to {RNS.prettyhexrep(lxmessage.get_destination().hash)} after {lxmessage.delivery_attempts} pathless tries for {lxmessage}", RNS.LOG_DEBUG) lxmessage.delivery_attempts += 1 - lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT - RNS.log("Opportunistic delivery attempt "+str(lxmessage.delivery_attempts)+" for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - lxmessage.send() + RNS.Transport.request_path(lxmessage.get_destination().hash) + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + lxmessage.progress = 0.01 + elif lxmessage.delivery_attempts == LXMRouter.MAX_PATHLESS_TRIES+1 and RNS.Transport.has_path(lxmessage.get_destination().hash): + RNS.log(f"Opportunistic delivery for {lxmessage} still unsuccessful after {lxmessage.delivery_attempts} attempts, trying to rediscover path to {RNS.prettyhexrep(lxmessage.get_destination().hash)}", RNS.LOG_DEBUG) + lxmessage.delivery_attempts += 1 + RNS.Reticulum.get_instance().drop_path(lxmessage.get_destination().hash) + def rediscover_job(): + time.sleep(0.5) + RNS.Transport.request_path(lxmessage.get_destination().hash) + threading.Thread(target=rediscover_job, daemon=True).start() + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + lxmessage.progress = 0.01 + else: + if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: + lxmessage.delivery_attempts += 1 + lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT + RNS.log("Opportunistic delivery attempt "+str(lxmessage.delivery_attempts)+" for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + lxmessage.send() else: RNS.log("Max delivery attempts reached for oppertunistic "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) self.fail_message(lxmessage) @@ -1237,14 +2338,28 @@ class LXMRouter: elif lxmessage.method == LXMessage.DIRECT: if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: delivery_destination_hash = lxmessage.get_destination().hash - + direct_link = None + if delivery_destination_hash in self.direct_links: - # A link already exists, so we'll try to use it - # to deliver the message + # An established direct link already exists to + # the destination, so we'll try to use it for + # delivering the message direct_link = self.direct_links[delivery_destination_hash] + RNS.log(f"Using available direct link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}", RNS.LOG_DEBUG) + + elif delivery_destination_hash in self.backchannel_links: + # An established backchannel link exists to + # the destination, so we'll try to use it for + # delivering the message + direct_link = self.backchannel_links[delivery_destination_hash] + RNS.log(f"Using available backchannel link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}", RNS.LOG_DEBUG) + + if direct_link != None: if direct_link.status == RNS.Link.ACTIVE: + if lxmessage.progress == None or lxmessage.progress < 0.05: + lxmessage.progress = 0.05 if lxmessage.state != LXMessage.SENDING: - RNS.log("Starting transfer of "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + RNS.log("Starting transfer of "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" on link "+str(direct_link), RNS.LOG_DEBUG) lxmessage.set_delivery_destination(direct_link) lxmessage.send() else: @@ -1266,11 +2381,13 @@ class LXMRouter: lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT lxmessage.set_delivery_destination(None) - self.direct_links.pop(delivery_destination_hash) + if delivery_destination_hash in self.direct_links: + self.direct_links.pop(delivery_destination_hash) + if delivery_destination_hash in self.backchannel_links: + self.backchannel_links.pop(delivery_destination_hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT else: - # Simply wait for the link to become - # active or close + # Simply wait for the link to become active or close RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" is pending, waiting for link to become active", RNS.LOG_DEBUG) else: # No link exists, so we'll try to establish one, but @@ -1286,10 +2403,12 @@ class LXMRouter: delivery_link = RNS.Link(lxmessage.get_destination()) delivery_link.set_link_established_callback(self.process_outbound) self.direct_links[delivery_destination_hash] = delivery_link + lxmessage.progress = 0.03 else: RNS.log("No path known for delivery attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+". Requesting path...", RNS.LOG_DEBUG) RNS.Transport.request_path(lxmessage.get_destination().hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + lxmessage.progress = 0.01 else: RNS.log("Max delivery attempts reached for direct "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) self.fail_message(lxmessage) @@ -1325,7 +2444,7 @@ class LXMRouter: else: # Simply wait for the link to become # active or close - RNS.log("The propagation link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" is pending, waiting for link to become active: "+str(self.outbound_propagation_link.status), RNS.LOG_DEBUG) + RNS.log("The propagation link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" is pending, waiting for link to become active", RNS.LOG_DEBUG) else: # No link exists, so we'll try to establish one, but # only if we've never tried before, or the retry wait diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 5c59202..515ab11 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -1,19 +1,25 @@ import RNS import RNS.vendor.umsgpack as msgpack +import os import time import base64 +import multiprocessing +import LXMF.LXStamper as LXStamper from .LXMF import APP_NAME + class LXMessage: - DRAFT = 0x00 + GENERATING = 0x00 OUTBOUND = 0x01 SENDING = 0x02 SENT = 0x04 DELIVERED = 0x08 + REJECTED = 0xFD + CANCELLED = 0xFE FAILED = 0xFF - states = [DRAFT, OUTBOUND, SENDING, SENT, DELIVERED, FAILED] + states = [GENERATING, OUTBOUND, SENDING, SENT, DELIVERED, REJECTED, CANCELLED, FAILED] UNKNOWN = 0x00 PACKET = 0x01 @@ -32,19 +38,33 @@ class LXMessage: DESTINATION_LENGTH = RNS.Identity.TRUNCATED_HASHLENGTH//8 SIGNATURE_LENGTH = RNS.Identity.SIGLENGTH//8 + TICKET_LENGTH = RNS.Identity.TRUNCATED_HASHLENGTH//8 - # LXMF overhead is 111 bytes per message: + # Default ticket expiry is 3 weeks, with an + # additional grace period of 5 days, allowing + # for timekeeping inaccuracies. Tickets will + # automatically renew when there is less than + # 14 days to expiry. + TICKET_EXPIRY = 21*24*60*60 + TICKET_GRACE = 5*24*60*60 + TICKET_RENEW = 14*24*60*60 + TICKET_INTERVAL = 1*24*60*60 + COST_TICKET = 0x100 + + # LXMF overhead is 112 bytes per message: # 16 bytes for destination hash # 16 bytes for source hash # 64 bytes for Ed25519 signature # 8 bytes for timestamp - # 7 bytes for msgpack structure - LXMF_OVERHEAD = 2*DESTINATION_LENGTH + SIGNATURE_LENGTH + 8 + 7 + # 8 bytes for msgpack structure + TIMESTAMP_SIZE = 8 + STRUCT_OVERHEAD = 8 + LXMF_OVERHEAD = 2*DESTINATION_LENGTH + SIGNATURE_LENGTH + TIMESTAMP_SIZE + STRUCT_OVERHEAD # With an MTU of 500, the maximum amount of data # we can send in a single encrypted packet is - # 383 bytes. - ENCRYPTED_PACKET_MDU = RNS.Packet.ENCRYPTED_MDU + # 391 bytes. + ENCRYPTED_PACKET_MDU = RNS.Packet.ENCRYPTED_MDU + TIMESTAMP_SIZE # The max content length we can fit in LXMF message # inside a single RNS packet is the encrypted MDU, minus @@ -53,7 +73,7 @@ class LXMessage: # field of the packet, therefore we also add the length # of a destination hash to the calculation. With default # RNS and LXMF parameters, the largest single-packet - # LXMF message we can send is 288 bytes. If a message + # LXMF message we can send is 295 bytes. If a message # is larger than that, a Reticulum link will be used. ENCRYPTED_PACKET_MAX_CONTENT = ENCRYPTED_PACKET_MDU - LXMF_OVERHEAD + DESTINATION_LENGTH @@ -63,13 +83,13 @@ class LXMessage: LINK_PACKET_MDU = RNS.Link.MDU # Which means that we can deliver single-packet LXMF - # messages with content of up to 320 bytes over a link. + # messages with content of up to 319 bytes over a link. # If a message is larger than that, LXMF will sequence # and transfer it as a RNS resource over the link instead. LINK_PACKET_MAX_CONTENT = LINK_PACKET_MDU - LXMF_OVERHEAD # For plain packets without encryption, we can - # fit up to 369 bytes of content. + # fit up to 368 bytes of content. PLAIN_PACKET_MDU = RNS.Packet.PLAIN_MDU PLAIN_PACKET_MAX_CONTENT = PLAIN_PACKET_MDU - LXMF_OVERHEAD + DESTINATION_LENGTH @@ -90,7 +110,8 @@ class LXMessage: else: return "" - def __init__(self, destination, source, content = "", title = "", fields = None, desired_method = None, destination_hash = None, source_hash = None): + def __init__(self, destination, source, content = "", title = "", fields = None, desired_method = None, destination_hash = None, source_hash = None, stamp_cost=None, include_ticket=False): + if isinstance(destination, RNS.Destination) or destination == None: self.__destination = destination if destination != None: @@ -109,21 +130,41 @@ class LXMessage: else: raise ValueError("LXMessage initialised with invalid source") - self.set_title_from_string(title) - self.set_content_from_string(content) + if title == None: + title = "" + + if type(title) == bytes: + self.set_title_from_bytes(title) + else: + self.set_title_from_string(title) + + if type(content) == bytes: + self.set_content_from_bytes(content) + else: + self.set_content_from_string(content) + self.set_fields(fields) - self.payload = None - self.timestamp = None - self.signature = None - self.hash = None - self.packed = None - self.progress = None - self.state = LXMessage.DRAFT - self.method = LXMessage.UNKNOWN - self.rssi = None - self.snr = None - self.q = None + self.payload = None + self.timestamp = None + self.signature = None + self.hash = None + self.packed = None + self.state = LXMessage.GENERATING + self.method = LXMessage.UNKNOWN + self.progress = 0.0 + self.rssi = None + self.snr = None + self.q = None + + self.stamp = None + self.stamp_cost = stamp_cost + self.stamp_value = None + self.stamp_valid = False + self.stamp_checked = False + self.defer_stamp = True + self.outbound_ticket = None + self.include_ticket = include_ticket self.propagation_packed = None self.paper_packed = None @@ -131,17 +172,21 @@ class LXMessage: self.incoming = False self.signature_validated = False self.unverified_reason = None + self.ratchet_id = None self.representation = LXMessage.UNKNOWN self.desired_method = desired_method self.delivery_attempts = 0 self.transport_encrypted = False self.transport_encryption = None + self.ratchet_id = None self.packet_representation = None self.resource_representation = None self.__delivery_destination = None self.__delivery_callback = None - self.failed_callback = None + self.failed_callback = None + + self.deferred_stamp_generating = False def set_title_from_string(self, title_string): self.title = title_string.encode("utf-8") @@ -159,7 +204,11 @@ class LXMessage: self.content = content_bytes def content_as_string(self): - return self.content.decode("utf-8") + try: + return self.content.decode("utf-8") + except Exception as e: + RNS.log(f"{self} could not decode message content as string: {e}") + return None def set_fields(self, fields): if isinstance(fields, dict) or fields == None: @@ -168,7 +217,18 @@ class LXMessage: raise ValueError("LXMessage property \"fields\" can only be dict or None") def get_fields(self): - return self.__fields + return self.fields + + @property + def destination(self): + return self.__destination + + @destination.setter + def destination(self, destination): + self.set_destination(destination) + + def get_destination(self): + return self.destination def set_destination(self, destination): if self.destination == None: @@ -179,8 +239,16 @@ class LXMessage: else: raise ValueError("Cannot reassign destination on LXMessage") - def get_destination(self): - return self.__destination + @property + def source(self): + return self.__source + + @source.setter + def source(self, source): + self.set_source(source) + + def get_source(self): + return self.source def set_source(self, source): if self.source == None: @@ -191,9 +259,6 @@ class LXMessage: else: raise ValueError("Cannot reassign source on LXMessage") - def get_source(self): - return self.__source - def set_delivery_destination(self, delivery_destination): self.__delivery_destination = delivery_destination @@ -203,6 +268,71 @@ class LXMessage: def register_failed_callback(self, callback): self.failed_callback = callback + @staticmethod + def stamp_valid(stamp, target_cost, workblock): + target = 0b1 << 256-target_cost + result = RNS.Identity.full_hash(workblock+stamp) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + def validate_stamp(self, target_cost, tickets=None): + if tickets != None: + for ticket in tickets: + try: + if self.stamp == RNS.Identity.truncated_hash(ticket+self.message_id): + RNS.log(f"Stamp on {self} validated by inbound ticket", RNS.LOG_DEBUG) # TODO: Remove at some point + self.stamp_value = LXMessage.COST_TICKET + return True + except Exception as e: + RNS.log(f"Error while validating ticket: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + if self.stamp == None: + return False + else: + workblock = LXStamper.stamp_workblock(self.message_id) + if LXMessage.stamp_valid(self.stamp, target_cost, workblock): + RNS.log(f"Stamp on {self} validated", RNS.LOG_DEBUG) # TODO: Remove at some point + self.stamp_value = LXStamper.stamp_value(workblock, self.stamp) + return True + else: + return False + + def get_stamp(self, timeout=None): + # If an outbound ticket exists, use this for + # generating a valid stamp. + if self.outbound_ticket != None and type(self.outbound_ticket) == bytes and len(self.outbound_ticket) == LXMessage.TICKET_LENGTH: + generated_stamp = RNS.Identity.truncated_hash(self.outbound_ticket+self.message_id) + self.stamp_value = LXMessage.COST_TICKET + RNS.log(f"Generated stamp with outbound ticket {RNS.hexrep(self.outbound_ticket)} for {self}", RNS.LOG_DEBUG) # TODO: Remove at some point + return generated_stamp + + # If no stamp cost is required, we can just + # return immediately. + elif self.stamp_cost == None: + self.stamp_value = None + return None + + # If a stamp was already generated, return + # it immediately. + elif self.stamp != None: + return self.stamp + + # Otherwise, we will need to generate a + # valid stamp according to the cost that + # the receiver has specified. + else: + generated_stamp, value = LXStamper.generate_stamp(self.message_id, self.stamp_cost) + if generated_stamp: + self.stamp_value = value + self.stamp_valid = True + return generated_stamp + + else: + return None + def pack(self): if not self.packed: if self.timestamp == None: @@ -219,6 +349,11 @@ class LXMessage: hashed_part += msgpack.packb(self.payload) self.hash = RNS.Identity.full_hash(hashed_part) self.message_id = self.hash + + if not self.defer_stamp: + self.stamp = self.get_stamp() + if self.stamp != None: + self.payload.append(self.stamp) signed_part = b"" signed_part += hashed_part @@ -226,21 +361,29 @@ class LXMessage: self.signature = self.__source.sign(signed_part) self.signature_validated = True + packed_payload = msgpack.packb(self.payload) self.packed = b"" self.packed += self.__destination.hash self.packed += self.__source.hash self.packed += self.signature - packed_payload = msgpack.packb(self.payload) self.packed += packed_payload self.packed_size = len(self.packed) - content_size = len(packed_payload) + content_size = len(packed_payload)-LXMessage.TIMESTAMP_SIZE-LXMessage.STRUCT_OVERHEAD # If no desired delivery method has been defined, # one will be chosen according to these rules: if self.desired_method == None: self.desired_method = LXMessage.DIRECT - # TODO: Expand rules to something more intelligent + + # If opportunistic delivery was requested, check + # that message will fit within packet size limits + if self.desired_method == LXMessage.OPPORTUNISTIC: + if self.__destination.type == RNS.Destination.SINGLE: + if content_size > LXMessage.ENCRYPTED_PACKET_MAX_CONTENT: + RNS.log(f"Opportunistic delivery was requested for {self}, but content of length {content_size} exceeds packet size limit. Falling back to link-based delivery.", RNS.LOG_DEBUG) + self.desired_method = LXMessage.DIRECT + # Set delivery parameters according to delivery method if self.desired_method == LXMessage.OPPORTUNISTIC: if self.__destination.type == RNS.Destination.SINGLE: single_packet_content_limit = LXMessage.ENCRYPTED_PACKET_MAX_CONTENT @@ -248,7 +391,7 @@ class LXMessage: single_packet_content_limit = LXMessage.PLAIN_PACKET_MAX_CONTENT if content_size > single_packet_content_limit: - raise TypeError("LXMessage desired opportunistic delivery method, but content exceeds single-packet size.") + raise TypeError(f"LXMessage desired opportunistic delivery method, but content of length {content_size} exceeds single-packet content limit of {single_packet_content_limit}.") else: self.method = LXMessage.OPPORTUNISTIC self.representation = LXMessage.PACKET @@ -267,6 +410,7 @@ class LXMessage: single_packet_content_limit = LXMessage.LINK_PACKET_MAX_CONTENT encrypted_data = self.__destination.encrypt(self.packed[LXMessage.DESTINATION_LENGTH:]) + self.ratchet_id = self.__destination.latest_ratchet_id self.propagation_packed = msgpack.packb([time.time(), [self.packed[:LXMessage.DESTINATION_LENGTH]+encrypted_data]]) content_size = len(self.propagation_packed) @@ -281,6 +425,7 @@ class LXMessage: paper_content_limit = LXMessage.PAPER_MDU encrypted_data = self.__destination.encrypt(self.packed[LXMessage.DESTINATION_LENGTH:]) + self.ratchet_id = self.__destination.latest_ratchet_id self.paper_packed = self.packed[:LXMessage.DESTINATION_LENGTH]+encrypted_data content_size = len(self.paper_packed) @@ -297,23 +442,31 @@ class LXMessage: self.determine_transport_encryption() if self.method == LXMessage.OPPORTUNISTIC: - self.__as_packet().send().set_delivery_callback(self.__mark_delivered) + lxm_packet = self.__as_packet() + lxm_packet.send().set_delivery_callback(self.__mark_delivered) + self.progress = 0.50 + self.ratchet_id = lxm_packet.ratchet_id self.state = LXMessage.SENT elif self.method == LXMessage.DIRECT: self.state = LXMessage.SENDING if self.representation == LXMessage.PACKET: - receipt = self.__as_packet().send() + lxm_packet = self.__as_packet() + receipt = lxm_packet.send() + self.ratchet_id = self.__delivery_destination.link_id if receipt: receipt.set_delivery_callback(self.__mark_delivered) receipt.set_timeout_callback(self.__link_packet_timed_out) + self.progress = 0.50 else: if self.__delivery_destination: self.__delivery_destination.teardown() elif self.representation == LXMessage.RESOURCE: self.resource_representation = self.__as_resource() + self.ratchet_id = self.__delivery_destination.link_id + self.progress = 0.10 elif self.method == LXMessage.PROPAGATED: self.state = LXMessage.SENDING @@ -323,14 +476,19 @@ class LXMessage: if receipt: receipt.set_delivery_callback(self.__mark_propagated) receipt.set_timeout_callback(self.__link_packet_timed_out) + self.progress = 0.50 else: self.__delivery_destination.teardown() elif self.representation == LXMessage.RESOURCE: self.resource_representation = self.__as_resource() + self.progress = 0.10 def determine_transport_encryption(self): + # TODO: These descriptions are old and outdated. + # Update the transport encryption descriptions to + # account for ratchets and other changes. if self.method == LXMessage.OPPORTUNISTIC: if self.__destination.type == RNS.Destination.SINGLE: self.transport_encrypted = True @@ -371,56 +529,67 @@ class LXMessage: def __mark_delivered(self, receipt = None): RNS.log("Received delivery notification for "+str(self), RNS.LOG_DEBUG) self.state = LXMessage.DELIVERED + self.progress = 1.0 if self.__delivery_callback != None and callable(self.__delivery_callback): try: self.__delivery_callback(self) except Exception as e: - RNS.log("An error occurred in the external delivery callback for "+str(message), RNS.LOG_ERROR) + RNS.log("An error occurred in the external delivery callback for "+str(self), RNS.LOG_ERROR) + RNS.trace_exception(e) def __mark_propagated(self, receipt = None): RNS.log("Received propagation success notification for "+str(self), RNS.LOG_DEBUG) self.state = LXMessage.SENT + self.progress = 1.0 if self.__delivery_callback != None and callable(self.__delivery_callback): try: self.__delivery_callback(self) except Exception as e: - RNS.log("An error occurred in the external delivery callback for "+str(message), RNS.LOG_ERROR) + RNS.log("An error occurred in the external delivery callback for "+str(self), RNS.LOG_ERROR) + RNS.trace_exception(e) def __mark_paper_generated(self, receipt = None): RNS.log("Paper message generation succeeded for "+str(self), RNS.LOG_DEBUG) self.state = LXMessage.PAPER + self.progress = 1.0 if self.__delivery_callback != None and callable(self.__delivery_callback): try: self.__delivery_callback(self) except Exception as e: - RNS.log("An error occurred in the external delivery callback for "+str(message), RNS.LOG_ERROR) + RNS.log("An error occurred in the external delivery callback for "+str(self), RNS.LOG_ERROR) + RNS.trace_exception(e) def __resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: self.__mark_delivered() else: - resource.link.teardown() - self.state = LXMessage.OUTBOUND + if resource.status == RNS.Resource.REJECTED: + self.state = LXMessage.REJECTED + + elif self.state != LXMessage.CANCELLED: + resource.link.teardown() + self.state = LXMessage.OUTBOUND def __propagation_resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: self.__mark_propagated() else: - resource.link.teardown() - self.state = LXMessage.OUTBOUND + if self.state != LXMessage.CANCELLED: + resource.link.teardown() + self.state = LXMessage.OUTBOUND def __link_packet_timed_out(self, packet_receipt): - if packet_receipt: - packet_receipt.destination.teardown() - - self.state = LXMessage.OUTBOUND - + if self.state != LXMessage.CANCELLED: + if packet_receipt: + packet_receipt.destination.teardown() + + self.state = LXMessage.OUTBOUND def __update_transfer_progress(self, resource): - self.progress = resource.get_progress() + self.progress = 0.10 + (resource.get_progress()*0.90) def __as_packet(self): if not self.packed: @@ -449,8 +618,6 @@ class LXMessage: if not self.__delivery_destination.status == RNS.Link.ACTIVE: raise ConnectionError("Tried to synthesize resource for LXMF message on a link that was not active") - self.progress = 0.0 - if self.method == LXMessage.DIRECT: return RNS.Resource(self.packed, self.__delivery_destination, callback = self.__resource_concluded, progress_callback = self.__update_transfer_progress) elif self.method == LXMessage.PROPAGATED: @@ -542,10 +709,19 @@ class LXMessage: source_hash = lxmf_bytes[LXMessage.DESTINATION_LENGTH:2*LXMessage.DESTINATION_LENGTH] signature = lxmf_bytes[2*LXMessage.DESTINATION_LENGTH:2*LXMessage.DESTINATION_LENGTH+LXMessage.SIGNATURE_LENGTH] packed_payload = lxmf_bytes[2*LXMessage.DESTINATION_LENGTH+LXMessage.SIGNATURE_LENGTH:] + unpacked_payload = msgpack.unpackb(packed_payload) + + # Extract stamp from payload if included + if len(unpacked_payload) > 4: + stamp = unpacked_payload[4] + unpacked_payload = unpacked_payload[:4] + packed_payload = msgpack.packb(unpacked_payload) + else: + stamp = None + hashed_part = b"" + destination_hash + source_hash + packed_payload message_hash = RNS.Identity.full_hash(hashed_part) signed_part = b"" + hashed_part + message_hash - unpacked_payload = msgpack.unpackb(packed_payload) timestamp = unpacked_payload[0] title_bytes = unpacked_payload[1] content_bytes = unpacked_payload[2] @@ -574,7 +750,9 @@ class LXMessage: desired_method = original_method) message.hash = message_hash + message.message_id = message.hash message.signature = signature + message.stamp = stamp message.incoming = True message.timestamp = timestamp message.packed = lxmf_bytes diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py new file mode 100644 index 0000000..bcfa95b --- /dev/null +++ b/LXMF/LXStamper.py @@ -0,0 +1,328 @@ +import RNS +import RNS.vendor.umsgpack as msgpack + +import os +import time +import multiprocessing + +WORKBLOCK_EXPAND_ROUNDS = 3000 + +active_jobs = {} + +def stamp_workblock(message_id): + wb_st = time.time() + expand_rounds = WORKBLOCK_EXPAND_ROUNDS + workblock = b"" + for n in range(expand_rounds): + workblock += RNS.Cryptography.hkdf( + length=256, + derive_from=message_id, + salt=RNS.Identity.full_hash(message_id+msgpack.packb(n)), + context=None, + ) + wb_time = time.time() - wb_st + RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) + + return workblock + +def stamp_value(workblock, stamp): + value = 0 + bits = 256 + material = RNS.Identity.full_hash(workblock+stamp) + i = int.from_bytes(material, byteorder="big") + while ((i & (1 << (bits - 1))) == 0): + i = (i << 1) + value += 1 + + return value + +def generate_stamp(message_id, stamp_cost): + RNS.log(f"Generating stamp with cost {stamp_cost} for {RNS.prettyhexrep(message_id)}...", RNS.LOG_DEBUG) + workblock = stamp_workblock(message_id) + + start_time = time.time() + stamp = None + rounds = 0 + value = 0 + + if RNS.vendor.platformutils.is_windows() or RNS.vendor.platformutils.is_darwin(): + stamp, rounds = job_simple(stamp_cost, workblock, message_id) + + elif RNS.vendor.platformutils.is_android(): + stamp, rounds = job_android(stamp_cost, workblock, message_id) + + else: + stamp, rounds = job_linux(stamp_cost, workblock, message_id) + + duration = time.time() - start_time + speed = rounds/duration + if stamp != None: + value = stamp_value(workblock, stamp) + + RNS.log(f"Stamp with value {value} generated in {RNS.prettytime(duration)}, {rounds} rounds, {int(speed)} rounds per second", RNS.LOG_DEBUG) + + return stamp, value + +def cancel_work(message_id): + if RNS.vendor.platformutils.is_windows() or RNS.vendor.platformutils.is_darwin(): + try: + if message_id in active_jobs: + active_jobs[message_id] = True + + except Exception as e: + RNS.log("Error while terminating stamp generation workers: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + elif RNS.vendor.platformutils.is_android(): + try: + if message_id in active_jobs: + active_jobs[message_id] = True + + except Exception as e: + RNS.log("Error while terminating stamp generation workers: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + else: + try: + if message_id in active_jobs: + stop_event = active_jobs[message_id][0] + result_queue = active_jobs[message_id][1] + stop_event.set() + result_queue.put(None) + active_jobs.pop(message_id) + + except Exception as e: + RNS.log("Error while terminating stamp generation workers: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + +def job_simple(stamp_cost, workblock, message_id): + # A simple, single-process stamp generator. + # should work on any platform, and is used + # as a fall-back, in case of limited multi- + # processing and/or acceleration support. + + platform = RNS.vendor.platformutils.get_platform() + RNS.log(f"Running stamp generation on {platform}, work limited to single CPU core. This will be slower than ideal.", RNS.LOG_WARNING) + + rounds = 0 + pstamp = os.urandom(256//8) + st = time.time() + + active_jobs[message_id] = False; + + def sv(s, c, w): + target = 0b1<<256-c; m = w+s + result = RNS.Identity.full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + while not sv(pstamp, stamp_cost, workblock) and not active_jobs[message_id]: + pstamp = os.urandom(256//8); rounds += 1 + if rounds % 2500 == 0: + speed = rounds / (time.time()-st) + RNS.log(f"Stamp generation running. {rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) + + if active_jobs[message_id] == True: + pstamp = None + + active_jobs.pop(message_id) + + return pstamp, rounds + +def job_linux(stamp_cost, workblock, message_id): + allow_kill = True + stamp = None + total_rounds = 0 + jobs = multiprocessing.cpu_count() + stop_event = multiprocessing.Event() + result_queue = multiprocessing.Queue(1) + rounds_queue = multiprocessing.Queue() + + def job(stop_event, pn, sc, wb): + terminated = False + rounds = 0 + pstamp = os.urandom(256//8) + + def sv(s, c, w): + target = 0b1<<256-c; m = w+s + result = RNS.Identity.full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + while not stop_event.is_set() and not sv(pstamp, sc, wb): + pstamp = os.urandom(256//8); rounds += 1 + + if not stop_event.is_set(): + stop_event.set() + result_queue.put(pstamp) + rounds_queue.put(rounds) + + job_procs = [] + RNS.log(f"Starting {jobs} stamp generation workers", RNS.LOG_DEBUG) + for jpn in range(jobs): + process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event, "pn": jpn, "sc": stamp_cost, "wb": workblock}, daemon=True) + job_procs.append(process) + process.start() + + active_jobs[message_id] = [stop_event, result_queue] + + stamp = result_queue.get() + RNS.log("Got stamp result from worker", RNS.LOG_DEBUG) # TODO: Remove + + # Collect any potential spurious + # results from worker queue. + try: + while True: + result_queue.get_nowait() + except: + pass + + for j in range(jobs): + nrounds = 0 + try: + nrounds = rounds_queue.get(timeout=2) + except Exception as e: + RNS.log(f"Failed to get round stats part {j}: {e}", RNS.LOG_ERROR) + total_rounds += nrounds + + all_exited = False + exit_timeout = time.time() + 5 + while time.time() < exit_timeout: + if not any(p.is_alive() for p in job_procs): + all_exited = True + break + time.sleep(0.1) + + if not all_exited: + RNS.log("Stamp generation IPC timeout, possible worker deadlock. Terminating remaining processes.", RNS.LOG_ERROR) + if allow_kill: + for j in range(jobs): + process = job_procs[j] + process.kill() + else: + return None + + else: + for j in range(jobs): + process = job_procs[j] + process.join() + # RNS.log(f"Joined {j} / {process}", RNS.LOG_DEBUG) # TODO: Remove + + return stamp, total_rounds + +def job_android(stamp_cost, workblock, message_id): + # Semaphore support is flaky to non-existent on + # Android, so we need to manually dispatch and + # manage workloads here, while periodically + # checking in on the progress. + + stamp = None + start_time = time.time() + total_rounds = 0 + rounds_per_worker = 1000 + + use_nacl = False + try: + import nacl.encoding + import nacl.hash + use_nacl = True + except: + pass + + if use_nacl: + def full_hash(m): + return nacl.hash.sha256(m, encoder=nacl.encoding.RawEncoder) + else: + def full_hash(m): + return RNS.Identity.full_hash(m) + + def sv(s, c, w): + target = 0b1<<256-c + m = w+s + result = full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + wm = multiprocessing.Manager() + jobs = multiprocessing.cpu_count() + + def job(procnum=None, results_dict=None, wb=None, sc=None, jr=None): + # RNS.log(f"Worker {procnum} starting for {jr} rounds...") # TODO: Remove + try: + rounds = 0 + found_stamp = None + + while True: + pstamp = os.urandom(256//8) + rounds += 1 + if sv(pstamp, sc, wb): + found_stamp = pstamp + break + + if rounds >= jr: + # RNS.log(f"Worker {procnum} found no result in {rounds} rounds") # TODO: Remove + break + + results_dict[procnum] = [found_stamp, rounds] + except Exception as e: + RNS.log(f"Stamp generation worker error: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + active_jobs[message_id] = False; + + RNS.log(f"Dispatching {jobs} workers for stamp generation...", RNS.LOG_DEBUG) # TODO: Remove + + results_dict = wm.dict() + while stamp == None and active_jobs[message_id] == False: + job_procs = [] + try: + for pnum in range(jobs): + pargs = {"procnum":pnum, "results_dict": results_dict, "wb": workblock, "sc":stamp_cost, "jr":rounds_per_worker} + process = multiprocessing.Process(target=job, kwargs=pargs) + job_procs.append(process) + process.start() + + for process in job_procs: + process.join() + + for j in results_dict: + r = results_dict[j] + total_rounds += r[1] + if r[0] != None: + stamp = r[0] + + if stamp == None: + elapsed = time.time() - start_time + speed = total_rounds/elapsed + RNS.log(f"Stamp generation running. {total_rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) + + except Exception as e: + RNS.log(f"Stamp generation job error: {e}") + RNS.trace_exception(e) + + active_jobs.pop(message_id) + + return stamp, total_rounds + +if __name__ == "__main__": + import sys + if len(sys.argv) < 2: + RNS.log("No cost argument provided", RNS.LOG_ERROR) + exit(1) + else: + try: + cost = int(sys.argv[1]) + except Exception as e: + RNS.log(f"Invalid cost argument provided: {e}", RNS.LOG_ERROR) + exit(1) + + RNS.loglevel = RNS.LOG_DEBUG + RNS.log("Testing LXMF stamp generation", RNS.LOG_DEBUG) + message_id = os.urandom(32) + generate_stamp(message_id, cost) \ No newline at end of file diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 3dc34df..48885b2 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -35,6 +35,7 @@ import time import os from LXMF._version import __version__ +from LXMF import APP_NAME from RNS.vendor.configobj import ConfigObj @@ -77,6 +78,13 @@ def apply_config(): active_configuration["peer_announce_interval"] = lxmd_config["lxmf"].as_int("announce_interval")*60 else: active_configuration["peer_announce_interval"] = None + + if "lxmf" in lxmd_config and "delivery_transfer_max_accepted_size" in lxmd_config["lxmf"]: + active_configuration["delivery_transfer_max_accepted_size"] = lxmd_config["lxmf"].as_float("delivery_transfer_max_accepted_size") + if active_configuration["delivery_transfer_max_accepted_size"] < 0.38: + active_configuration["delivery_transfer_max_accepted_size"] = 0.38 + else: + active_configuration["delivery_transfer_max_accepted_size"] = 1000 if "lxmf" in lxmd_config and "on_inbound" in lxmd_config["lxmf"]: active_configuration["on_inbound"] = lxmd_config["lxmf"]["on_inbound"] @@ -119,13 +127,38 @@ def apply_config(): if active_configuration["message_storage_limit"] < 0.005: active_configuration["message_storage_limit"] = 0.005 else: - active_configuration["message_storage_limit"] = 2000 + active_configuration["message_storage_limit"] = 500 + + if "propagation" in lxmd_config and "propagation_transfer_max_accepted_size" in lxmd_config["propagation"]: + active_configuration["propagation_transfer_max_accepted_size"] = lxmd_config["propagation"].as_float("propagation_transfer_max_accepted_size") + if active_configuration["propagation_transfer_max_accepted_size"] < 0.38: + active_configuration["propagation_transfer_max_accepted_size"] = 0.38 + else: + active_configuration["propagation_transfer_max_accepted_size"] = 256 if "propagation" in lxmd_config and "prioritise_destinations" in lxmd_config["propagation"]: active_configuration["prioritised_lxmf_destinations"] = lxmd_config["propagation"].as_list("prioritise_destinations") else: active_configuration["prioritised_lxmf_destinations"] = [] + if "propagation" in lxmd_config and "static_peers" in lxmd_config["propagation"]: + static_peers = lxmd_config["propagation"].as_list("static_peers") + active_configuration["static_peers"] = [] + for static_peer in static_peers: + active_configuration["static_peers"].append(bytes.fromhex(static_peer)) + else: + active_configuration["static_peers"] = [] + + if "propagation" in lxmd_config and "max_peers" in lxmd_config["propagation"]: + active_configuration["max_peers"] = lxmd_config["propagation"].as_int("max_peers") + else: + active_configuration["max_peers"] = None + + if "propagation" in lxmd_config and "from_static_only" in lxmd_config["propagation"]: + active_configuration["from_static_only"] = lxmd_config["propagation"].as_bool("from_static_only") + else: + active_configuration["from_static_only"] = False + # Load various settings if "logging" in lxmd_config and "loglevel" in lxmd_config["logging"]: targetloglevel = lxmd_config["logging"].as_int("loglevel") @@ -289,7 +322,12 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo storagepath = storagedir, autopeer = active_configuration["autopeer"], autopeer_maxdepth = active_configuration["autopeer_maxdepth"], - ) + propagation_limit = active_configuration["propagation_transfer_max_accepted_size"], + delivery_limit = active_configuration["delivery_transfer_max_accepted_size"], + max_peers = active_configuration["max_peers"], + static_peers = active_configuration["static_peers"], + from_static_only = active_configuration["from_static_only"]) + message_router.register_delivery_callback(lxmf_delivery) for destination_hash in active_configuration["ignored_lxmf_destinations"]: @@ -346,13 +384,13 @@ def jobs(): try: if "peer_announce_interval" in active_configuration and active_configuration["peer_announce_interval"] != None: if time.time() > last_peer_announce + active_configuration["peer_announce_interval"]: - RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_EXTREME) + RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_VERBOSE) message_router.announce(lxmf_destination.hash) last_peer_announce = time.time() if "node_announce_interval" in active_configuration and active_configuration["node_announce_interval"] != None: if time.time() > last_node_announce + active_configuration["node_announce_interval"]: - RNS.log("Sending announce for LXMF Propagation Node", RNS.LOG_EXTREME) + RNS.log("Sending announce for LXMF Propagation Node", RNS.LOG_VERBOSE) message_router.announce_propagation_node() last_node_announce = time.time() @@ -365,7 +403,7 @@ def deferred_start_jobs(): global active_configuration, last_peer_announce, last_node_announce global message_router, lxmf_destination time.sleep(DEFFERED_JOBS_DELAY) - RNS.log("Running deferred start jobs") + RNS.log("Running deferred start jobs", RNS.LOG_DEBUG) if active_configuration["peer_announce_at_start"]: RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_EXTREME) message_router.announce(lxmf_destination.hash) @@ -378,6 +416,190 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() +def query_status(identity, timeout=5, exit_on_fail=False): + control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + if exit_on_fail: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) + exit(200) + else: + return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT + else: + time.sleep(0.1) + + if not RNS.Transport.has_path(control_destination.hash): + RNS.Transport.request_path(control_destination.hash) + while not RNS.Transport.has_path(control_destination.hash): + tc = check_timeout() + if tc: + return tc + + link = RNS.Link(control_destination) + while not link.status == RNS.Link.ACTIVE: + tc = check_timeout() + if tc: + return tc + + link.identify(identity) + request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) + while not request_receipt.get_status() == RNS.RequestReceipt.READY: + tc = check_timeout() + if tc: + return tc + + link.teardown() + return request_receipt.get_response() + +def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): + global configpath, identitypath, storagedir, lxmdir + global lxmd_config, active_configuration, targetloglevel + targetlogdest = RNS.LOG_STDOUT + + if identity_path == None: + if configdir == None: + if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): + configdir = "/etc/lxmd" + elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): + configdir = RNS.Reticulum.userdir+"/.config/lxmd" + else: + configdir = RNS.Reticulum.userdir+"/.lxmd" + + configpath = configdir+"/config" + identitypath = configdir+"/identity" + identity = None + + if not os.path.isdir(configdir): + RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) + exit(201) + if not os.path.isfile(identitypath): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identitypath) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) + exit(4) + + else: + if not os.path.isfile(identity_path): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identity_path) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identity_path, RNS.LOG_ERROR) + exit(4) + + if targetloglevel == None: + targetloglevel = 3 + if verbosity != 0 or quietness != 0: + targetloglevel = targetloglevel+verbosity-quietness + + reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) + response = query_status(identity, timeout=timeout, exit_on_fail=True) + + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: + RNS.log("Remote received no identity") + exit(203) + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_ACCESS: + RNS.log("Access denied") + exit(204) + else: + s = response + mutil = round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2) + ms_util = f"{mutil}%" + if s["from_static_only"]: + who_str = "static peers only" + else: + who_str = "all nodes" + + available_peers = 0 + unreachable_peers = 0 + peered_incoming = 0 + peered_outgoing = 0 + peered_rx_bytes = 0 + peered_tx_bytes = 0 + for peer_id in s["peers"]: + p = s["peers"][peer_id] + pm = p["messages"] + peered_incoming += pm["incoming"] + peered_outgoing += pm["outgoing"] + peered_rx_bytes += p["rx_bytes"] + peered_tx_bytes += p["tx_bytes"] + if p["alive"]: + available_peers += 1 + else: + unreachable_peers += 1 + + total_incoming = peered_incoming+s["unpeered_propagation_incoming"]+s["clients"]["client_propagation_messages_received"] + total_rx_bytes = peered_rx_bytes+s["unpeered_propagation_rx_bytes"] + df = round(peered_outgoing/total_incoming, 2) + + dhs = RNS.prettyhexrep(s["destination_hash"]); uts = RNS.prettytime(s["uptime"]) + print(f"\nLXMF Propagation Node running on {dhs}, uptime is {uts}") + + if show_status: + msb = RNS.prettysize(s["messagestore"]["bytes"]); msl = RNS.prettysize(s["messagestore"]["limit"]) + ptl = RNS.prettysize(s["propagation_limit"]*1000); uprx = RNS.prettysize(s["unpeered_propagation_rx_bytes"]) + mscnt = s["messagestore"]["count"]; stp = s["total_peers"]; smp = s["max_peers"]; sdp = s["discovered_peers"] + ssp = s["static_peers"]; cprr = s["clients"]["client_propagation_messages_received"] + cprs = s["clients"]["client_propagation_messages_served"]; upi = s["unpeered_propagation_incoming"] + print(f"Messagestore contains {mscnt} messages, {msb} ({ms_util} utilised of {msl})") + print(f"Accepting propagated messages from {who_str}, {ptl} per-transfer limit") + print(f"") + print(f"Peers : {stp} total (peer limit is {smp})") + print(f" {sdp} discovered, {ssp} static") + print(f" {available_peers} available, {unreachable_peers} unreachable") + print(f"") + print(f"Traffic : {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") + print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") + print(f" {upi} messages received from unpeered nodes ({uprx})") + print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") + print(f" {cprr} propagation messages received directly from clients") + print(f" {cprs} propagation messages served to clients") + print(f" Distribution factor is {df}") + print(f"") + + if show_peers: + if not show_status: + print("") + + for peer_id in s["peers"]: + ind = " " + p = s["peers"][peer_id] + if p["type"] == "static": + t = "Static peer " + elif p["type"] == "discovered": + t = "Discovered peer " + else: + t = "Unknown peer " + a = "Available" if p["alive"] == True else "Unreachable" + h = max(time.time()-p["last_heard"], 0) + hops = p["network_distance"] + hs = "hops unknown" if hops == RNS.Transport.PATHFINDER_M else f"{hops} hop away" if hops == 1 else f"{hops} hops away" + pm = p["messages"] + if p["last_sync_attempt"] != 0: + lsa = p["last_sync_attempt"] + ls = f"last synced {RNS.prettytime(max(time.time()-lsa, 0))} ago" + else: + ls = "never synced" + + sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]); stl = RNS.prettysize(p["transfer_limit"]*1000) + srxb = RNS.prettysize(p["rx_bytes"]); stxb = RNS.prettysize(p["tx_bytes"]); pmo = pm["offered"]; pmout = pm["outgoing"] + pmi = pm["incoming"]; pmuh = pm["unhandled"] + print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") + print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") + print(f"{ind*2}Speeds : {sstr} STR, {sler} LER, {stl} transfer limit") + print(f"{ind*2}Messages : {pmo} offered, {pmout} outgoing, {pmi} incoming") + print(f"{ind*2}Traffic : {srxb} received, {stxb} sent") + ms = "" if pm["unhandled"] == 1 else "s" + print(f"{ind*2}Sync state : {pmuh} unhandled message{ms}, {ls}") + print("") + + def main(): try: parser = argparse.ArgumentParser(description="Lightweight Extensible Messaging Daemon") @@ -388,6 +610,10 @@ def main(): parser.add_argument("-v", "--verbose", action="count", default=0) parser.add_argument("-q", "--quiet", action="count", default=0) parser.add_argument("-s", "--service", action="store_true", default=False, help="lxmd is running as a service and should log to file") + parser.add_argument("--status", action="store_true", default=False, help="display node status") + parser.add_argument("--peers", action="store_true", default=False, help="display peered nodes") + parser.add_argument("--timeout", action="store", default=5, help="timeout in seconds for query operations", type=float) + parser.add_argument("--identity", action="store", default=None, help="path to identity used for query request", type=str) parser.add_argument("--exampleconfig", action="store_true", default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version="lxmd {version}".format(version=__version__)) @@ -397,15 +623,24 @@ def main(): print(__default_lxmd_config__) exit() - program_setup( - configdir = args.config, - rnsconfigdir=args.rnsconfig, - run_pn=args.propagation_node, - on_inbound=args.on_inbound, - verbosity=args.verbose, - quietness=args.quiet, - service=args.service - ) + if args.status or args.peers: + get_status(configdir = args.config, + rnsconfigdir=args.rnsconfig, + verbosity=args.verbose, + quietness=args.quiet, + timeout=args.timeout, + show_status=args.status, + show_peers=args.peers, + identity_path=args.identity) + exit() + + program_setup(configdir = args.config, + rnsconfigdir=args.rnsconfig, + run_pn=args.propagation_node, + on_inbound=args.on_inbound, + verbosity=args.verbose, + quietness=args.quiet, + service=args.service) except KeyboardInterrupt: print("") @@ -418,23 +653,41 @@ __default_lxmd_config__ = """# This is an example LXM Daemon config file. [propagation] # Whether to enable propagation node + enable_node = no # Automatic announce interval in minutes. # 6 hours by default. + announce_interval = 360 # Whether to announce when the node starts. + announce_at_start = yes # Wheter to automatically peer with other # propagation nodes on the network. + autopeer = yes # The maximum peering depth (in hops) for # automatically peered nodes. + autopeer_maxdepth = 4 +# The maximum accepted transfer size per in- +# coming propagation transfer, in kilobytes. +# This also sets the upper limit for the size +# of single messages accepted onto this node. +# +# If a node wants to propagate a larger number +# of messages to this node, than what can fit +# within this limit, it will prioritise sending +# the smallest messages first, and try again +# with any remaining messages at a later point. + +propagation_transfer_max_accepted_size = 256 + # The maximum amount of storage to use for # the LXMF Propagation Node message store, # specified in megabytes. When this limit @@ -443,8 +696,9 @@ autopeer_maxdepth = 4 # LXMF prioritises keeping messages that are # new and small. Large and old messages will # be removed first. This setting is optional -# and defaults to 2 gigabytes. -# message_storage_limit = 2000 +# and defaults to 500 megabytes. + +# message_storage_limit = 500 # You can tell the LXMF message router to # prioritise storage for one or more @@ -453,14 +707,35 @@ autopeer_maxdepth = 4 # keeping messages for destinations specified # with this option. This setting is optional, # and generally you do not need to use it. + # prioritise_destinations = 41d20c727598a3fbbdf9106133a3a0ed, d924b81822ca24e68e2effea99bcb8cf +# You can configure the maximum number of other +# propagation nodes that this node will peer +# with automatically. The default is 50. + +# max_peers = 25 + +# You can configure a list of static propagation +# node peers, that this node will always be +# peered with, by specifying a list of +# destination hashes. + +# static_peers = e17f833c4ddf8890dd3a79a6fea8161d, 5a2d0029b6e5ec87020abaea0d746da4 + +# You can configure the propagation node to +# only accept incoming propagation messages +# from configured static peers. + +# from_static_only = True + # By default, any destination is allowed to # connect and download messages, but you can # optionally restrict this. If you enable # authentication, you must provide a list of # allowed identity hashes in the a file named # "allowed" in the lxmd config directory. + auth_required = no @@ -469,23 +744,35 @@ auth_required = no # The LXM Daemon will create an LXMF destination # that it can receive messages on. This option sets # the announced display name for this destination. + display_name = Anonymous Peer # It is possible to announce the internal LXMF # destination when the LXM Daemon starts up. + announce_at_start = no # You can also announce the delivery destination # at a specified interval. This is not enabled by # default. + # announce_interval = 360 +# The maximum accepted unpacked size for mes- +# sages received directly from other peers, +# specified in kilobytes. Messages larger than +# this will be rejected before the transfer +# begins. + +delivery_transfer_max_accepted_size = 1000 + # You can configure an external program to be run # every time a message is received. The program # will receive as an argument the full path to the # message saved as a file. The example below will # simply result in the message getting deleted as # soon as it has been received. + # on_inbound = rm @@ -499,6 +786,7 @@ announce_at_start = no # 5: Verbose logging # 6: Debug logging # 7: Extreme logging + loglevel = 4 """ diff --git a/LXMF/_version.py b/LXMF/_version.py index 8879c6c..63af887 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.3.7" +__version__ = "0.6.3" diff --git a/README.md b/README.md index bd62246..ed7e4f0 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,17 @@ LXMF is efficient enough that it can deliver messages over extremely low-bandwid User-facing clients built on LXMF include: - [Sideband](https://unsigned.io/sideband) +- [MeshChat](https://github.com/liamcottle/reticulum-meshchat) - [Nomad Network](https://unsigned.io/nomadnet) -- [Nexus Messenger](https://github.com/HarlekinSimplex/nexus_messenger) + +Community-provided tools and utilities for LXMF include: + +- [LXMFy](https://lxmfy.quad4.io/) +- [LXMF-Bot](https://github.com/randogoth/lxmf-bot) +- [LXMF Messageboard](https://github.com/chengtripp/lxmf_messageboard) +- [LXMEvent](https://github.com/faragher/LXMEvent) +- [RangeMap](https://github.com/faragher/RangeMap) +- [LXMF Tools](https://github.com/SebastianObi/LXMF-Tools) ## Structure @@ -102,6 +111,10 @@ Assuming the default Reticulum configuration, the binary wire-format is as follo The complete message overhead for LXMF is only 111 bytes, which in return gives you timestamped, digitally signed, infinitely extensible, end-to-end encrypted, zero-conf routed, minimal-infrastructure messaging that's easy to use and build applications with. +## Code Examples + +Before writing your own programs using LXMF, you need to have a basic understanding of how the [Reticulum](https://reticulum.network) protocol and API works. Please see the [Reticulum Manual](https://reticulum.network/manual/). For a few simple examples of how to send and receive messages with LXMF, please see the [receiver example](./docs/example_receiver.py) and the [sender example](./docs/example_sender.py) included in this repository. + ## Example Paper Message You can try out the paper messaging functionality by using the following QR code. It is a paper message sent to the LXMF address `6b3362bd2c1dbf87b66a85f79a8d8c75`. To be able to decrypt and read the message, you will need to import the following Reticulum Identity to an LXMF messaging app: @@ -118,10 +131,6 @@ You can also find the entire message in =7.4.2 +rns>=0.9.1 diff --git a/setup.py b/setup.py index 183736c..19737d6 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.6.3'], - python_requires='>=3.7', + install_requires=["rns>=0.9.3"], + python_requires=">=3.7", )