From 9f051aea17f2d48c0c5b673e20da4f8463231756 Mon Sep 17 00:00:00 2001 From: Petr Blaha Date: Thu, 7 Sep 2023 08:39:57 +0200 Subject: [PATCH 001/161] dockerize LXMF --- Dockerfile | 31 +++++++++++++++++++++++++++++++ Dockerfile.howto | 6 ++++++ requirements.txt | 3 +++ 3 files changed, 40 insertions(+) create mode 100644 Dockerfile create mode 100644 Dockerfile.howto create mode 100644 requirements.txt diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..818f3b3 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,31 @@ +FROM python:alpine +LABEL authors="Petr Blaha petr.blaha@cleverdata.cz" +USER root +RUN apk update +RUN apk add sdl2_ttf sdl2 build-base libc-dev pkgconfig gstreamer sdl2_mixer sdl2_image sdl2_pango linux-headers mesa-dev py3-virtualenv + +RUN addgroup -S myuser && adduser -S -G myuser myuser +USER myuser +WORKDIR /home/myuser + +RUN pip install --upgrade pip + + +ENV PATH="/home/myuser/.local/bin:${PATH}" + +################### BEGIN Sideband ########################################### + +COPY --chown=myuser:myuser requirements.txt requirements.txt + +RUN pip install --user -r requirements.txt + + +COPY --chown=myuser:myuser . . + +#Python create virtual environment +RUN virtualenv /home/myuser/LXMF/venv +RUN source /home/myuser/LXMF/venv/bin/activate + +RUN make all + +################### END Sideband ########################################### diff --git a/Dockerfile.howto b/Dockerfile.howto new file mode 100644 index 0000000..bb20e5f --- /dev/null +++ b/Dockerfile.howto @@ -0,0 +1,6 @@ +# Run docker command one by one(all four), it will build LXMF artifact and copy to dist directory. +# No need to build locally and install dependencies +docker build -t lxmfdockerimage . +docker run -d -it --name lxmfdockercontainer lxmfdockerimage /bin/sh +docker cp lxmfdockercontainer:/home/myuser/dist . +docker rm -f lxmfdockercontainer \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..48086e6 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +qrcode==7.4.2 +rns==0.5.7 +setuptools==68.0.0 From 820c92d38b8c690ab2336e872793c0746a8cfc40 Mon Sep 17 00:00:00 2001 From: Petr Blaha Date: Thu, 7 Sep 2023 08:51:49 +0200 Subject: [PATCH 002/161] dockerize LXMF --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 818f3b3..b7ed71d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,7 @@ RUN pip install --upgrade pip ENV PATH="/home/myuser/.local/bin:${PATH}" -################### BEGIN Sideband ########################################### +################### BEGIN LXMF ########################################### COPY --chown=myuser:myuser requirements.txt requirements.txt @@ -28,4 +28,4 @@ RUN source /home/myuser/LXMF/venv/bin/activate RUN make all -################### END Sideband ########################################### +################### END LXMF ########################################### From 4d356bcda897c10b76c9d4d8e5df2133402415b3 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 2 Nov 2023 04:36:51 +0100 Subject: [PATCH 003/161] Tweaked timing --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index fb7f3b0..3d3e936 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -19,7 +19,7 @@ class LXMRouter: MAX_DELIVERY_ATTEMPTS = 5 PROCESSING_INTERVAL = 4 DELIVERY_RETRY_WAIT = 7 - PATH_REQUEST_WAIT = 5 + PATH_REQUEST_WAIT = 7 LINK_MAX_INACTIVITY = 10*60 MESSAGE_EXPIRY = 30*24*60*60 From 10be1383e5cb6fac565c5f679310536e5c326115 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 2 Nov 2023 12:48:24 +0100 Subject: [PATCH 004/161] Updated version and dependencies --- LXMF/_version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 8879c6c..4ad67eb 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.3.7" +__version__ = "0.3.8" diff --git a/setup.py b/setup.py index 183736c..ccd2c25 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.6.3'], + install_requires=['rns>=0.6.4'], python_requires='>=3.7', ) From 2812a07165239931a1ad7ded56901625de32534d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 2 Nov 2023 18:29:46 +0100 Subject: [PATCH 005/161] Cleanup --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 3d3e936..06e9abf 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1221,7 +1221,7 @@ class LXMRouter: else: RNS.log("Starting outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) # Outbound handling for opportunistic messages - if lxmessage.method == LXMessage.OPPORTUNISTIC: + if lxmessage.method == LXMessage.OPPORTUNISTIC: if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: lxmessage.delivery_attempts += 1 From 1ee0c83168a050265e06ddae60e63c28edb75d9d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 8 Nov 2023 20:41:51 +0100 Subject: [PATCH 006/161] Updated version and dependencies --- LXMF/_version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 4ad67eb..771bc6e 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.3.8" +__version__ = "0.3.9" diff --git a/setup.py b/setup.py index ccd2c25..5c17e3b 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.6.4'], + install_requires=['rns>=0.6.7'], python_requires='>=3.7', ) From 754ae969e10d6ee1002dd31bf130573ef30b1e4d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 3 Jan 2024 12:45:18 +0100 Subject: [PATCH 007/161] Added code examples. Closes #8. --- README.md | 4 ++++ docs/example_receiver.py | 37 +++++++++++++++++++++++++++++++++++++ docs/example_sender.py | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 docs/example_receiver.py create mode 100644 docs/example_sender.py diff --git a/README.md b/README.md index bd62246..11451a8 100644 --- a/README.md +++ b/README.md @@ -102,6 +102,10 @@ Assuming the default Reticulum configuration, the binary wire-format is as follo The complete message overhead for LXMF is only 111 bytes, which in return gives you timestamped, digitally signed, infinitely extensible, end-to-end encrypted, zero-conf routed, minimal-infrastructure messaging that's easy to use and build applications with. +## Code Examples + +Before writing your own programs using LXMF, you need to have a basic understanding of how the [Reticulum](https://reticulum.network) protocol and API works. Please see the [Reticulum Manual](https://reticulum.network/manual/). For a few simple examples of how to send and receive messages with LXMF, please see the [receiver example](./docs/example_receiver.py) and the [sender example](./docs/example_sender.py) included in this repository. + ## Example Paper Message You can try out the paper messaging functionality by using the following QR code. It is a paper message sent to the LXMF address `6b3362bd2c1dbf87b66a85f79a8d8c75`. To be able to decrypt and read the message, you will need to import the following Reticulum Identity to an LXMF messaging app: diff --git a/docs/example_receiver.py b/docs/example_receiver.py new file mode 100644 index 0000000..a637cd4 --- /dev/null +++ b/docs/example_receiver.py @@ -0,0 +1,37 @@ +import RNS +import LXMF +import time + +def delivery_callback(message): + time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(message.timestamp)) + signature_string = "Signature is invalid, reason undetermined" + if message.signature_validated: + signature_string = "Validated" + else: + if message.unverified_reason == LXMF.LXMessage.SIGNATURE_INVALID: + signature_string = "Invalid signature" + if message.unverified_reason == LXMF.LXMessage.SOURCE_UNKNOWN: + signature_string = "Cannot verify, source is unknown" + + RNS.log("\t+--- LXMF Delivery ---------------------------------------------") + RNS.log("\t| Source hash : "+RNS.prettyhexrep(message.source_hash)) + RNS.log("\t| Source instance : "+str(message.get_source())) + RNS.log("\t| Destination hash : "+RNS.prettyhexrep(message.destination_hash)) + RNS.log("\t| Destination instance : "+str(message.get_destination())) + RNS.log("\t| Transport Encryption : "+str(message.transport_encryption)) + RNS.log("\t| Timestamp : "+time_string) + RNS.log("\t| Title : "+message.title_as_string()) + RNS.log("\t| Content : "+message.content_as_string()) + RNS.log("\t| Fields : "+str(message.fields)) + RNS.log("\t| Message signature : "+signature_string) + RNS.log("\t+---------------------------------------------------------------") + +r = RNS.Reticulum() + +router = LXMF.LXMRouter(storagepath="./tmp1") +identity = RNS.Identity() +my_lxmf_destination = router.register_delivery_identity(identity) +router.register_delivery_callback(delivery_callback) + +RNS.log("Ready to receive on: "+RNS.prettyhexrep(my_lxmf_destination.hash)) +input() \ No newline at end of file diff --git a/docs/example_sender.py b/docs/example_sender.py new file mode 100644 index 0000000..ab15a16 --- /dev/null +++ b/docs/example_sender.py @@ -0,0 +1,39 @@ +import LXMF +import RNS +import time +import random + +random_names = ["Tom", "Delilah", "Nancey", "Williams", "Neomi", "Curtis", "Alexa", "Theodora", "Ted", "Dinorah", "Nicol", "Drusilla", "Annalisa", "Verlene", "Latesha", "Tina", "Mia", "Brock", "Timothy", "Philip", "Willian", "Reyna", "Simona", "Mimi", "Stanford", "Ferne", "Catalina", "Lucie", "Jaye", "Natasha", "Willetta", "Isabel", "Esperanza", "Ciara", "Eusebio", "William", "Elma", "Angelica", "Coreen", "Melani", "Jonathan", "Maryland", "Caroline", "Gregg", "Ora", "Jacqui", "Letty", "Roselle", "Oralee", "Angla"] +random_titles = ["Long time", "Hi again", "Re: Hi there", "Test message", "", "", "Something different"] +random_msgs = ["If wishes were horses then beggars might fly. Stuff like that. It's enough to drive you crazy.", "'My ident cards were stolen,' Jason said. 'That fivehundred-dollar bill is yours if you can get me to someone who can replace them. If you're going to do it, do it right now; I'm not going to wait.' Wait to be picked up by a pol or a nat, he thought. Caught here in this rundown dingy hotel.", "A six, no matter what the external circumstances, will always prevail. Because that's the way they genetically defined us.", "'Should be there in an hour,' he called back over his shoulder to Chuck. Then he added, in an afterthought, 'Wonder if the computer’s finished its run. It was due about now.'. Chuck didn’t reply, so George swung round in his saddle. He could just see Chuck’s face, a white oval turned toward the sky."] + +def delivery_callback(message): + pass + +r = RNS.Reticulum() +router = LXMF.LXMRouter(storagepath="./tmp2") +router.register_delivery_callback(delivery_callback) +ident = RNS.Identity() +source = router.register_delivery_identity(ident, display_name=random_names[random.randint(0,len(random_names)-1)]) +router.announce(source.hash) +RNS.log("Source announced") + +print("Recipient: ", end=" ") +recipient_hexhash = input() +recipient_hash = bytes.fromhex(recipient_hexhash) + +if not RNS.Transport.has_path(recipient_hash): + RNS.log("Destination is not yet known. Requesting path and waiting for announce to arrive...") + RNS.Transport.request_path(recipient_hash) + while not RNS.Transport.has_path(recipient_hash): + time.sleep(0.1) + +# Recall the server identity +recipient_identity = RNS.Identity.recall(recipient_hash) + +dest = RNS.Destination(recipient_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, "lxmf", "delivery") + +while True: + lxm = LXMF.LXMessage(dest, source, random_msgs[random.randint(0,len(random_msgs)-1)], random_titles[random.randint(0,len(random_titles)-1)], desired_method=LXMF.LXMessage.DIRECT) + router.handle_outbound(lxm) + input() \ No newline at end of file From 6ed16916d645a84e7aa623c904fcfbeccf1940ef Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 7 Jan 2024 16:51:30 +0100 Subject: [PATCH 008/161] Fixed get fields method --- LXMF/LXMessage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 5c59202..0fe1ccf 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -168,7 +168,7 @@ class LXMessage: raise ValueError("LXMessage property \"fields\" can only be dict or None") def get_fields(self): - return self.__fields + return self.fields def set_destination(self, destination): if self.destination == None: From 87413b93555624084a40e1eb97449870486a2595 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 15 Jan 2024 20:29:29 +0100 Subject: [PATCH 009/161] Updated readme --- README.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 11451a8..2f5c888 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,14 @@ User-facing clients built on LXMF include: - [Sideband](https://unsigned.io/sideband) - [Nomad Network](https://unsigned.io/nomadnet) -- [Nexus Messenger](https://github.com/HarlekinSimplex/nexus_messenger) + +Community-provided tools and utilities for LXMF include: + +- [LXMF-Bot](https://github.com/randogoth/lxmf-bot) +- [LXMF Messageboard](https://github.com/chengtripp/lxmf_messageboard) +- [LXMEvent](https://github.com/faragher/LXMEvent) +- [RangeMap](https://github.com/faragher/RangeMap) +- [LXMF Tools](https://github.com/SebastianObi/LXMF-Tools) ## Structure From 7a1bcc134bfe09e7b4ceac3f401c13fee7505354 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 15 Jan 2024 20:29:39 +0100 Subject: [PATCH 010/161] Updated dependencies --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5c17e3b..4f5654e 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.6.7'], + install_requires=['rns>=0.7.0'], python_requires='>=3.7', ) From f0e9fa66598b31e3ae4bcc9f188bb63bc298ba5e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 16 Jan 2024 13:26:33 +0100 Subject: [PATCH 011/161] Corrected inconsistent property, getter and setter use. Fixed #16. --- LXMF/LXMessage.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 0fe1ccf..3cf8122 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -170,6 +170,17 @@ class LXMessage: def get_fields(self): return self.fields + @property + def destination(self): + return self.__destination + + @destination.setter + def destination(self, destination): + self.set_destination(destination) + + def get_destination(self): + return self.destination + def set_destination(self, destination): if self.destination == None: if isinstance(destination, RNS.Destination): @@ -179,8 +190,16 @@ class LXMessage: else: raise ValueError("Cannot reassign destination on LXMessage") - def get_destination(self): - return self.__destination + @property + def source(self): + return self.__source + + @source.setter + def source(self, source): + self.set_source(source) + + def get_source(self): + return self.source def set_source(self, source): if self.source == None: @@ -191,9 +210,6 @@ class LXMessage: else: raise ValueError("Cannot reassign source on LXMessage") - def get_source(self): - return self.__source - def set_delivery_destination(self, delivery_destination): self.__delivery_destination = delivery_destination From af2dd5670778e99cb13ba8346288378d0ca044f3 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 29 Feb 2024 18:27:19 +0100 Subject: [PATCH 012/161] Added issue templates --- .github/ISSUE_TEMPLATE/config.yml | 11 ++++++++ .github/ISSUE_TEMPLATE/πŸ›-bug-report.md | 35 +++++++++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/πŸ›-bug-report.md diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..caf3250 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +blank_issues_enabled: false +contact_links: + - name: ✨ Feature Request or Idea + url: https://github.com/markqvist/Reticulum/discussions/new?category=ideas + about: Propose and discuss features and ideas + - name: πŸ’¬ Questions, Help & Discussion + about: Ask anything, or get help + url: https://github.com/markqvist/Reticulum/discussions/new/choose + - name: πŸ“– Read the Reticulum Manual + url: https://markqvist.github.io/Reticulum/manual/ + about: The complete documentation for Reticulum diff --git a/.github/ISSUE_TEMPLATE/πŸ›-bug-report.md b/.github/ISSUE_TEMPLATE/πŸ›-bug-report.md new file mode 100644 index 0000000..77ad6c2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/πŸ›-bug-report.md @@ -0,0 +1,35 @@ +--- +name: "\U0001F41B Bug Report" +about: Report a reproducible bug +title: '' +labels: '' +assignees: '' + +--- + +**Read the Contribution Guidelines** +Before creating a bug report on this issue tracker, you **must** read the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md). Issues that do not follow the contribution guidelines **will be deleted without comment**. + +- The issue tracker is used by developers of this project. **Do not use it to ask general questions, or for support requests**. +- Ideas and feature requests can be made on the [Discussions](https://github.com/markqvist/Reticulum/discussions). **Only** feature requests accepted by maintainers and developers are tracked and included on the issue tracker. **Do not post feature requests here**. +- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), delete this section from your bug report. + +**Describe the Bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Describe in detail how to reproduce the bug. + +**Expected Behavior** +A clear and concise description of what you expected to happen. + +**Logs & Screenshots** +Please include any relevant log output. If applicable, also add screenshots to help explain your problem. + +**System Information** +- OS and version +- Python version +- Program version + +**Additional context** +Add any other context about the problem here. From 35dc77152828cc537f3a9484617ebc6df32b519a Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 29 Feb 2024 20:12:54 +0100 Subject: [PATCH 013/161] Don't keep links alive after sync --- LXMF/LXMPeer.py | 19 ++++++++++++------- LXMF/LXMRouter.py | 3 ++- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 611a874..17c0344 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -224,6 +224,10 @@ class LXMPeer: self.state = LXMPeer.RESOURCE_TRANSFERRING else: RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_DEBUG) + if self.link != None: + self.link.teardown() + + self.link = None self.state = LXMPeer.IDLE except Exception as e: @@ -236,25 +240,28 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - def resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: for transient_id in resource.transferred_messages: message = self.unhandled_messages.pop(transient_id) self.handled_messages[transient_id] = message + + if self.link != None: + self.link.teardown() + + self.link = None self.state = LXMPeer.IDLE - self.link.teardown() + RNS.log("Sync to peer "+RNS.prettyhexrep(self.destination_hash)+" completed", RNS.LOG_DEBUG) self.alive = True self.last_heard = time.time() + else: RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_DEBUG) if self.link != None: self.link.teardown() - else: - self.state = LXMPeer.IDLE - + self.state = LXMPeer.IDLE def link_established(self, link): self.link.identify(self.router.identity) @@ -272,8 +279,6 @@ class LXMPeer: def handle_message(self, transient_id): if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - # TODO: Remove at some point - RNS.log("The message "+RNS.prettyhexrep(transient_id)+" was added to distribution queue for "+RNS.prettyhexrep(self.destination_hash), RNS.LOG_EXTREME) self.unhandled_messages[transient_id] = self.router.propagation_entries[transient_id] def __str__(self): diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 06e9abf..e9a4836 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -653,7 +653,8 @@ class LXMRouter: job_thread.start() def __request_messages_path_job(self): - while not RNS.Transport.has_path(self.wants_download_on_path_available_from) and time.time() < self.wants_download_on_path_available_timeout: + path_timeout = self.wants_download_on_path_available_timeout + while not RNS.Transport.has_path(self.wants_download_on_path_available_from) and time.time() < path_timeout: time.sleep(0.1) if RNS.Transport.has_path(self.wants_download_on_path_available_from): From 7aea4ea209911641c476e9f9903af6a6df7fee86 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 29 Feb 2024 20:15:03 +0100 Subject: [PATCH 014/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 771bc6e..6a9beea 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.3.9" +__version__ = "0.4.0" From 696c78ecea794d1f5728b49c00b97c6d89117667 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 29 Feb 2024 23:02:16 +0100 Subject: [PATCH 015/161] Improved propagation node sync and memory consumption --- LXMF/LXMPeer.py | 8 ++++++-- LXMF/LXMRouter.py | 27 +++++++++++++++++++++------ 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 17c0344..97fb747 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -148,8 +148,10 @@ class LXMPeer: RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT + else: RNS.log("Could not request sync to peer "+RNS.prettyhexrep(self.destination_hash)+" since its identity could not be recalled.", RNS.LOG_ERROR) + else: RNS.log("Postponing sync with peer "+RNS.prettyhexrep(self.destination_hash)+" for "+RNS.prettytime(self.next_sync_attempt-time.time())+" due to previous failures", RNS.LOG_DEBUG) if self.last_sync_attempt > self.last_heard: @@ -159,8 +161,8 @@ class LXMPeer: RNS.log("Sync request to peer "+str(self.destination)+" failed", RNS.LOG_DEBUG) if self.link != None: self.link.teardown() - else: - self.state = LXMPeer.IDLE + + self.state = LXMPeer.IDLE def offer_response(self, request_receipt): try: @@ -222,6 +224,7 @@ class LXMPeer: resource = RNS.Resource(data, self.link, callback = self.resource_concluded) resource.transferred_messages = wanted_message_ids self.state = LXMPeer.RESOURCE_TRANSFERRING + else: RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_DEBUG) if self.link != None: @@ -261,6 +264,7 @@ class LXMPeer: if self.link != None: self.link.teardown() + self.link = None self.state = LXMPeer.IDLE def link_established(self, link): diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index e9a4836..04263a0 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -21,6 +21,7 @@ class LXMRouter: DELIVERY_RETRY_WAIT = 7 PATH_REQUEST_WAIT = 7 LINK_MAX_INACTIVITY = 10*60 + P_LINK_MAX_INACTIVITY = 3*60 MESSAGE_EXPIRY = 30*24*60*60 @@ -90,6 +91,7 @@ class LXMRouter: self.propagation_transfer_progress = 0.0 self.propagation_transfer_last_result = None self.propagation_transfer_max_messages = None + self.active_propagation_links = [] self.locally_delivered_transient_ids = {} self.locally_processed_transient_ids = {} @@ -471,6 +473,19 @@ class LXMRouter: cleaned_link = self.direct_links.pop(link_hash) RNS.log("Cleaned link "+str(cleaned_link), RNS.LOG_DEBUG) + try: + inactive_links = [] + for link in self.active_propagation_links: + if link.no_data_for() > LXMRouter.P_LINK_MAX_INACTIVITY: + inactive_links.append(link) + + for link in inactive_links: + self.active_propagation_links.remove(link) + link.teardown() + + except Exception as e: + RNS.log("An error occurred while cleaning inbound propagation links. The contained exception was: "+str(e), RNS.LOG_ERROR) + if self.outbound_propagation_link != None and self.outbound_propagation_link.status == RNS.Link.CLOSED: self.outbound_propagation_link = None if self.propagation_transfer_state == LXMRouter.PR_COMPLETE: @@ -953,9 +968,8 @@ class LXMRouter: peer.alive = True peer.sync_backoff = 0 peer.next_sync_attempt = 0 - - peer.peering_timebase = timestamp - peer.last_heard = time.time() + peer.peering_timebase = timestamp + peer.last_heard = time.time() else: peer = LXMPeer(self, destination_hash) @@ -1038,6 +1052,7 @@ class LXMRouter: link.set_resource_strategy(RNS.Link.ACCEPT_ALL) link.set_resource_started_callback(self.resource_transfer_began) link.set_resource_concluded_callback(self.propagation_resource_concluded) + self.active_propagation_links.append(link) def propagation_packet(self, data, packet): try: @@ -1057,7 +1072,7 @@ class LXMRouter: RNS.log("Exception occurred while parsing incoming LXMF propagation data.", RNS.LOG_ERROR) RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR) - def offer_request(self, path, data, request_id, remote_identity, requested_at): + def offer_request(self, path, data, request_id, link_id, remote_identity, requested_at): if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY else: @@ -1086,7 +1101,7 @@ class LXMRouter: RNS.log("Transfer concluded for incoming propagation resource "+str(resource), RNS.LOG_DEBUG) if resource.status == RNS.Resource.COMPLETE: # TODO: The peer this was received from should - # have the transient id added to it's list of + # have the transient id added to its list of # already handled messages. try: data = msgpack.unpackb(resource.data.read()) @@ -1326,7 +1341,7 @@ class LXMRouter: else: # Simply wait for the link to become # active or close - RNS.log("The propagation link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" is pending, waiting for link to become active: "+str(self.outbound_propagation_link.status), RNS.LOG_DEBUG) + RNS.log("The propagation link to "+RNS.prettyhexrep(self.outbound_propagation_node)+" is pending, waiting for link to become active", RNS.LOG_DEBUG) else: # No link exists, so we'll try to establish one, but # only if we've never tried before, or the retry wait From b3bc8e684eeab86b795f6697ae513119eb0a112c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 29 Feb 2024 23:03:02 +0100 Subject: [PATCH 016/161] Updated dependency --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4f5654e..b66db9d 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.7.0'], + install_requires=['rns>=0.7.1'], python_requires='>=3.7', ) From bb1b9e4163f72af95f197a5b98bf0760752120c7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 1 Mar 2024 00:33:42 +0100 Subject: [PATCH 017/161] Updated dependency version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b66db9d..26ce0d7 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.7.1'], + install_requires=['rns>=0.7.2'], python_requires='>=3.7', ) From c7489dc0fa74b91e9b0cd4f2c6a2183ba8f8e031 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 1 Mar 2024 22:33:53 +0100 Subject: [PATCH 018/161] Added transfer limit handling to announce handler --- LXMF/Handlers.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 32e045e..571a59f 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -34,11 +34,17 @@ class LXMFPropagationAnnounceHandler: data = msgpack.unpackb(app_data) if self.lxmrouter.propagation_node and self.lxmrouter.autopeer: + node_timebase = data[1] + propagation_transfer_limit = None + if len(data) >= 3: + propagation_transfer_limit = data[2] + if data[0] == True: if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, data[1]) + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + elif data[0] == False: - self.lxmrouter.unpeer(destination_hash, data[1]) + self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) From 64050d39bfa6c0dafe15c6b0fd2bd2f0bbd40549 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 1 Mar 2024 22:37:54 +0100 Subject: [PATCH 019/161] Added propagation transfer limit options --- LXMF/Handlers.py | 5 ++- LXMF/LXMPeer.py | 54 +++++++++++++++++------ LXMF/LXMRouter.py | 99 +++++++++++++++++++++++++++--------------- LXMF/Utilities/lxmd.py | 50 +++++++++++++++++++++ 4 files changed, 160 insertions(+), 48 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 571a59f..c09bf1c 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -37,7 +37,10 @@ class LXMFPropagationAnnounceHandler: node_timebase = data[1] propagation_transfer_limit = None if len(data) >= 3: - propagation_transfer_limit = data[2] + try: + propagation_transfer_limit = float(data[2]) + except: + propagation_transfer_limit = None if data[0] == True: if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 97fb747..5ee8986 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -48,6 +48,14 @@ class LXMPeer: else: peer.link_establishment_rate = 0 + if "propagation_transfer_limit" in dictionary: + try: + peer.propagation_transfer_limit = float(dictionary["propagation_transfer_limit"]) + except Exception as e: + peer.propagation_transfer_limit = None + else: + peer.propagation_transfer_limit = None + for transient_id in dictionary["handled_ids"]: if transient_id in router.propagation_entries: peer.handled_messages[transient_id] = router.propagation_entries[transient_id] @@ -65,6 +73,7 @@ class LXMPeer: dictionary["last_heard"] = self.last_heard dictionary["destination_hash"] = self.destination_hash dictionary["link_establishment_rate"] = self.link_establishment_rate + dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit handled_ids = [] for transient_id in self.handled_messages: @@ -87,12 +96,14 @@ class LXMPeer: self.sync_backoff = 0 self.peering_timebase = 0 self.link_establishment_rate = 0 + self.propagation_transfer_limit = None self.link = None self.state = LXMPeer.IDLE self.unhandled_messages = {} self.handled_messages = {} + self.last_offer = [] self.router = router self.destination_hash = destination_hash @@ -133,11 +144,17 @@ class LXMPeer: self.sync_backoff = 0 RNS.log("Synchronisation link to peer "+RNS.prettyhexrep(self.destination_hash)+" established, preparing request...", RNS.LOG_DEBUG) + unhandled_entries = [] unhandled_ids = [] purged_ids = [] for transient_id in self.unhandled_messages: if transient_id in self.router.propagation_entries: - unhandled_ids.append(transient_id) + unhandled_entry = [ + transient_id, + self.router.get_weight(transient_id), + self.router.get_size(transient_id), + ] + unhandled_entries.append(unhandled_entry) else: purged_ids.append(transient_id) @@ -145,8 +162,21 @@ class LXMPeer: RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) self.unhandled_messages.pop(transient_id) + unhandled_entries.sort(key=lambda e: e[1], reverse=False) + cumulative_size = 0 + for unhandled_entry in unhandled_entries: + transient_id = unhandled_entry[0] + weight = unhandled_entry[1] + lxm_size = unhandled_entry[2] + if self.propagation_transfer_limit != None and cumulative_size + lxm_size > (self.propagation_transfer_limit*1000): + pass + else: + cumulative_size += lxm_size + unhandled_ids.append(transient_id) + RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) - self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) + self.last_offer = unhandled_ids + self.link.request(LXMPeer.OFFER_REQUEST_PATH, self.last_offer, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT else: @@ -175,33 +205,31 @@ class LXMPeer: if response == LXMPeer.ERROR_NO_IDENTITY: if self.link != None: RNS.log("Remote peer indicated that no identification was received, retrying...", RNS.LOG_DEBUG) - self.link.indentify() + self.link.identify() self.state = LXMPeer.LINK_READY self.sync() elif response == False: # Peer already has all advertised messages - for transient_id in self.unhandled_messages: - message_entry = self.unhandled_messages[transient_id] - self.handled_messages[transient_id] = message_entry - - self.unhandled_messages = {} + for transient_id in self.last_offer: + if transient_id in self.unhandled_messages: + self.handled_messages[transient_id] = self.unhandled_messages.pop(transient_id) + elif response == True: # Peer wants all advertised messages - for transient_id in self.unhandled_messages: + for transient_id in self.last_offer: wanted_messages.append(self.unhandled_messages[transient_id]) wanted_message_ids.append(transient_id) else: # Peer wants some advertised messages - peer_had_messages = [] - for transient_id in self.unhandled_messages.copy(): + for transient_id in self.last_offer.copy(): # If the peer did not want the message, it has # already received it from another peer. if not transient_id in response: - message_entry = self.unhandled_messages.pop(transient_id) - self.handled_messages[transient_id] = message_entry + if transient_id in self.unhandled_messages: + self.handled_messages[transient_id] = self.unhandled_messages.pop(transient_id) for transient_id in response: wanted_messages.append(self.unhandled_messages[transient_id]) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 04263a0..5141bc6 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -31,30 +31,35 @@ class LXMRouter: AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 - PR_PATH_TIMEOUT = 10 + PROPAGATION_LIMIT = 256 + DELIVERY_LIMIT = 1024 - PR_IDLE = 0x00 - PR_PATH_REQUESTED = 0x01 - PR_LINK_ESTABLISHING = 0x02 - PR_LINK_ESTABLISHED = 0x03 - PR_REQUEST_SENT = 0x04 - PR_RECEIVING = 0x05 - PR_RESPONSE_RECEIVED = 0x06 - PR_COMPLETE = 0x07 - PR_NO_PATH = 0xf0 - PR_LINK_FAILED = 0xf1 - PR_TRANSFER_FAILED = 0xf2 - PR_NO_IDENTITY_RCVD = 0xf3 - PR_NO_ACCESS = 0xf4 - PR_FAILED = 0xfe + PR_PATH_TIMEOUT = 10 - PR_ALL_MESSAGES = 0x00 + PR_IDLE = 0x00 + PR_PATH_REQUESTED = 0x01 + PR_LINK_ESTABLISHING = 0x02 + PR_LINK_ESTABLISHED = 0x03 + PR_REQUEST_SENT = 0x04 + PR_RECEIVING = 0x05 + PR_RESPONSE_RECEIVED = 0x06 + PR_COMPLETE = 0x07 + PR_NO_PATH = 0xf0 + PR_LINK_FAILED = 0xf1 + PR_TRANSFER_FAILED = 0xf2 + PR_NO_IDENTITY_RCVD = 0xf3 + PR_NO_ACCESS = 0xf4 + PR_FAILED = 0xfe + + PR_ALL_MESSAGES = 0x00 ### Developer-facing API ############################## ####################################################### - def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None): + def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None, + propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT): + random.seed(os.urandom(10)) self.pending_inbound = [] @@ -84,6 +89,8 @@ class LXMRouter: self.message_storage_limit = None self.information_storage_limit = None + self.propagation_per_transfer_limit = propagation_limit + self.delivery_per_transfer_limit = delivery_limit self.wants_download_on_path_available_from = None self.wants_download_on_path_available_to = None @@ -152,7 +159,13 @@ class LXMRouter: def announce_propagation_node(self): def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) - data = msgpack.packb([self.propagation_node, int(time.time())]) + announce_data = [ + self.propagation_node, # Boolean flag signalling propagation node state + int(time.time()), # Current node timebase + self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes + ] + + data = msgpack.packb(announce_data) self.propagation_destination.announce(app_data=data) da_thread = threading.Thread(target=delayed_announce) @@ -319,7 +332,10 @@ class LXMRouter: peer = LXMPeer.from_bytes(serialised_peer, self) if peer.identity != None: self.peers[peer.destination_hash] = peer - RNS.log("Loaded peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(len(peer.unhandled_messages))+" unhandled messages", RNS.LOG_DEBUG) + lim_str = ", no transfer limit" + if peer.propagation_transfer_limit != None: + lim_str = ", "+RNS.prettysize(peer.propagation_transfer_limit*1000)+" transfer limit" + RNS.log("Loaded peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(len(peer.unhandled_messages))+" unhandled messages"+lim_str, RNS.LOG_DEBUG) else: RNS.log("Peer "+RNS.prettyhexrep(peer.destination_hash)+" could not be loaded, because its identity could not be recalled. Dropping peer.", RNS.LOG_DEBUG) @@ -522,6 +538,28 @@ class LXMRouter: self.locally_processed_transient_ids.pop(transient_id) RNS.log("Cleaned "+RNS.prettyhexrep(transient_id)+" from locally processed cache", RNS.LOG_DEBUG) + def get_weight(self, transient_id): + dst_hash = self.propagation_entries[transient_id][0] + lxm_rcvd = self.propagation_entries[transient_id][2] + lxm_size = self.propagation_entries[transient_id][3] + + now = time.time() + age_weight = max(1, (now - lxm_rcvd)/60/60/24/4) + + if dst_hash in self.prioritised_list: + priority_weight = 0.1 + else: + priority_weight = 1.0 + + weight = priority_weight * age_weight * lxm_size + + return weight + + def get_size(self, transient_id): + lxm_size = self.propagation_entries[transient_id][3] + return lxm_size + + def clean_message_store(self): # Check and remove expired messages now = time.time() @@ -563,22 +601,13 @@ class LXMRouter: bytes_needed = message_storage_size - self.message_storage_limit bytes_cleaned = 0 - now = time.time() weighted_entries = [] for transient_id in self.propagation_entries: - entry = self.propagation_entries[transient_id] - - dst_hash = entry[0] - lxm_rcvd = entry[2] - lxm_size = entry[3] - age_weight = max(1, (now - lxm_rcvd)/60/60/24/4) - if dst_hash in self.prioritised_list: - priority_weight = 0.1 - else: - priority_weight = 1.0 - - weight = priority_weight * age_weight * lxm_size - weighted_entries.append([entry, weight, transient_id]) + weighted_entries.append([ + self.propagation_entries[transient_id], + self.get_weight(transient_id), + transient_id + ]) weighted_entries.sort(key=lambda we: we[1], reverse=True) @@ -961,7 +990,7 @@ class LXMRouter: ### Peer Sync & Propagation ########################### ####################################################### - def peer(self, destination_hash, timestamp): + def peer(self, destination_hash, timestamp, propagation_transfer_limit): if destination_hash in self.peers: peer = self.peers[destination_hash] if timestamp > peer.peering_timebase: @@ -970,11 +999,13 @@ class LXMRouter: peer.next_sync_attempt = 0 peer.peering_timebase = timestamp peer.last_heard = time.time() + peer.propagation_transfer_limit = propagation_transfer_limit else: peer = LXMPeer(self, destination_hash) peer.alive = True peer.last_heard = time.time() + peer.propagation_transfer_limit = propagation_transfer_limit self.peers[destination_hash] = peer RNS.log("Peered with "+str(peer.destination)) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 3dc34df..3a76dbb 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -77,6 +77,13 @@ def apply_config(): active_configuration["peer_announce_interval"] = lxmd_config["lxmf"].as_int("announce_interval")*60 else: active_configuration["peer_announce_interval"] = None + + if "lxmf" in lxmd_config and "delivery_transfer_max_accepted_size" in lxmd_config["lxmf"]: + active_configuration["delivery_transfer_max_accepted_size"] = lxmd_config["lxmf"].as_float("delivery_transfer_max_accepted_size") + if active_configuration["delivery_transfer_max_accepted_size"] < 0.38: + active_configuration["delivery_transfer_max_accepted_size"] = 0.38 + else: + active_configuration["delivery_transfer_max_accepted_size"] = 1024 if "lxmf" in lxmd_config and "on_inbound" in lxmd_config["lxmf"]: active_configuration["on_inbound"] = lxmd_config["lxmf"]["on_inbound"] @@ -121,6 +128,13 @@ def apply_config(): else: active_configuration["message_storage_limit"] = 2000 + if "propagation" in lxmd_config and "propagation_transfer_max_accepted_size" in lxmd_config["propagation"]: + active_configuration["propagation_transfer_max_accepted_size"] = lxmd_config["propagation"].as_float("propagation_transfer_max_accepted_size") + if active_configuration["propagation_transfer_max_accepted_size"] < 0.38: + active_configuration["propagation_transfer_max_accepted_size"] = 0.38 + else: + active_configuration["propagation_transfer_max_accepted_size"] = 256 + if "propagation" in lxmd_config and "prioritise_destinations" in lxmd_config["propagation"]: active_configuration["prioritised_lxmf_destinations"] = lxmd_config["propagation"].as_list("prioritise_destinations") else: @@ -289,6 +303,8 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo storagepath = storagedir, autopeer = active_configuration["autopeer"], autopeer_maxdepth = active_configuration["autopeer_maxdepth"], + propagation_limit = active_configuration["propagation_transfer_max_accepted_size"], + delivery_limit = active_configuration["delivery_transfer_max_accepted_size"], ) message_router.register_delivery_callback(lxmf_delivery) @@ -418,23 +434,41 @@ __default_lxmd_config__ = """# This is an example LXM Daemon config file. [propagation] # Whether to enable propagation node + enable_node = no # Automatic announce interval in minutes. # 6 hours by default. + announce_interval = 360 # Whether to announce when the node starts. + announce_at_start = yes # Wheter to automatically peer with other # propagation nodes on the network. + autopeer = yes # The maximum peering depth (in hops) for # automatically peered nodes. + autopeer_maxdepth = 4 +# The maximum accepted transfer size per in- +# coming propagation transfer, in kilobytes. +# This also sets the upper limit for the size +# of single messages accepted onto this node. +# +# If a node wants to propagate a larger number +# of messages to this node, than what can fit +# within this limit, it will prioritise sending +# the smallest messages first, and try again +# with any remaining messages at a later point. + +propagation_transfer_max_accepted_size = 256 + # The maximum amount of storage to use for # the LXMF Propagation Node message store, # specified in megabytes. When this limit @@ -444,6 +478,7 @@ autopeer_maxdepth = 4 # new and small. Large and old messages will # be removed first. This setting is optional # and defaults to 2 gigabytes. + # message_storage_limit = 2000 # You can tell the LXMF message router to @@ -453,6 +488,7 @@ autopeer_maxdepth = 4 # keeping messages for destinations specified # with this option. This setting is optional, # and generally you do not need to use it. + # prioritise_destinations = 41d20c727598a3fbbdf9106133a3a0ed, d924b81822ca24e68e2effea99bcb8cf # By default, any destination is allowed to @@ -461,6 +497,7 @@ autopeer_maxdepth = 4 # authentication, you must provide a list of # allowed identity hashes in the a file named # "allowed" in the lxmd config directory. + auth_required = no @@ -469,23 +506,35 @@ auth_required = no # The LXM Daemon will create an LXMF destination # that it can receive messages on. This option sets # the announced display name for this destination. + display_name = Anonymous Peer # It is possible to announce the internal LXMF # destination when the LXM Daemon starts up. + announce_at_start = no # You can also announce the delivery destination # at a specified interval. This is not enabled by # default. + # announce_interval = 360 +# The maximum accepted unpacked size for mes- +# sages received directly from other peers, +# specified in kilobytes. Messages larger than +# this will be rejected before the transfer +# begins. + +delivery_transfer_max_accepted_size = 1024 + # You can configure an external program to be run # every time a message is received. The program # will receive as an argument the full path to the # message saved as a file. The example below will # simply result in the message getting deleted as # soon as it has been received. + # on_inbound = rm @@ -499,6 +548,7 @@ announce_at_start = no # 5: Verbose logging # 6: Debug logging # 7: Extreme logging + loglevel = 4 """ From 7ecd3c0d5e70e98f8202db77d0da22ebd99b7482 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 1 Mar 2024 23:26:27 +0100 Subject: [PATCH 020/161] Added incoming resource transfer size limits --- LXMF/LXMRouter.py | 26 +++++++++++++++++++++++--- LXMF/Utilities/lxmd.py | 4 ++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 5141bc6..cedc2ea 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -32,7 +32,7 @@ class LXMRouter: FASTEST_N_RANDOM_POOL = 2 PROPAGATION_LIMIT = 256 - DELIVERY_LIMIT = 1024 + DELIVERY_LIMIT = 1000 PR_PATH_TIMEOUT = 10 @@ -970,7 +970,8 @@ class LXMRouter: def delivery_link_established(self, link): link.track_phy_stats(True) link.set_packet_callback(self.delivery_packet) - link.set_resource_strategy(RNS.Link.ACCEPT_ALL) + link.set_resource_strategy(RNS.Link.ACCEPT_APP) + link.set_resource_callback(self.delivery_resource_advertised) link.set_resource_started_callback(self.resource_transfer_began) link.set_resource_concluded_callback(self.delivery_resource_concluded) @@ -980,6 +981,15 @@ class LXMRouter: def resource_transfer_began(self, resource): RNS.log("Transfer began for LXMF delivery resource "+str(resource), RNS.LOG_DEBUG) + def delivery_resource_advertised(self, resource): + size = resource.get_data_size() + limit = self.delivery_per_transfer_limit*1000 + if limit != None and size > limit: + RNS.log("Rejecting "+RNS.prettysize(size)+" incoming LXMF delivery resource, since it exceeds the limit of "+RNS.prettysize(limit), RNS.LOG_DEBUG) + return False + else: + return True + def delivery_resource_concluded(self, resource): RNS.log("Transfer concluded for LXMF delivery resource "+str(resource), RNS.LOG_DEBUG) if resource.status == RNS.Resource.COMPLETE: @@ -1080,11 +1090,21 @@ class LXMRouter: def propagation_link_established(self, link): link.set_packet_callback(self.propagation_packet) - link.set_resource_strategy(RNS.Link.ACCEPT_ALL) + link.set_resource_strategy(RNS.Link.ACCEPT_APP) + link.set_resource_callback(self.propagation_resource_advertised) link.set_resource_started_callback(self.resource_transfer_began) link.set_resource_concluded_callback(self.propagation_resource_concluded) self.active_propagation_links.append(link) + def propagation_resource_advertised(self, resource): + size = resource.get_data_size() + limit = self.propagation_per_transfer_limit*1000 + if limit != None and size > limit: + RNS.log("Rejecting "+RNS.prettysize(size)+" incoming LXMF propagation resource, since it exceeds the limit of "+RNS.prettysize(limit), RNS.LOG_DEBUG) + return False + else: + return True + def propagation_packet(self, data, packet): try: if packet.destination_type != RNS.Destination.LINK: diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 3a76dbb..38e71b1 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -83,7 +83,7 @@ def apply_config(): if active_configuration["delivery_transfer_max_accepted_size"] < 0.38: active_configuration["delivery_transfer_max_accepted_size"] = 0.38 else: - active_configuration["delivery_transfer_max_accepted_size"] = 1024 + active_configuration["delivery_transfer_max_accepted_size"] = 1000 if "lxmf" in lxmd_config and "on_inbound" in lxmd_config["lxmf"]: active_configuration["on_inbound"] = lxmd_config["lxmf"]["on_inbound"] @@ -526,7 +526,7 @@ announce_at_start = no # this will be rejected before the transfer # begins. -delivery_transfer_max_accepted_size = 1024 +delivery_transfer_max_accepted_size = 1000 # You can configure an external program to be run # every time a message is received. The program From 22493005dc33531d1df0ff93de6dfddb0ebb5144 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 1 Mar 2024 23:48:12 +0100 Subject: [PATCH 021/161] Overhead calculation for propagation transfer limits --- LXMF/LXMPeer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 5ee8986..b1a49af 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -163,7 +163,8 @@ class LXMPeer: self.unhandled_messages.pop(transient_id) unhandled_entries.sort(key=lambda e: e[1], reverse=False) - cumulative_size = 0 + per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now + cumulative_size = 24 # Initialised to highest reasonable binary structure overhead for unhandled_entry in unhandled_entries: transient_id = unhandled_entry[0] weight = unhandled_entry[1] @@ -171,7 +172,7 @@ class LXMPeer: if self.propagation_transfer_limit != None and cumulative_size + lxm_size > (self.propagation_transfer_limit*1000): pass else: - cumulative_size += lxm_size + cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) From 1d651a9b532bacfc11efe51d002aa8f39ce7e80d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 2 Mar 2024 09:09:51 +0100 Subject: [PATCH 022/161] Added transfer limit awareness to message sync. Added ability to retain messages on node. --- LXMF/LXMPeer.py | 3 ++- LXMF/LXMRouter.py | 36 ++++++++++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index b1a49af..a0b61a5 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -169,7 +169,8 @@ class LXMPeer: transient_id = unhandled_entry[0] weight = unhandled_entry[1] lxm_size = unhandled_entry[2] - if self.propagation_transfer_limit != None and cumulative_size + lxm_size > (self.propagation_transfer_limit*1000): + next_size = cumulative_size + (lxm_size+per_message_overhead) + if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): pass else: cumulative_size += (lxm_size+per_message_overhead) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index cedc2ea..d3a4b7d 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -72,6 +72,7 @@ class LXMRouter: self.ignored_list = [] self.allowed_list = [] self.auth_required = False + self.retain_synced_on_node = False self.processing_outbound = False self.processing_inbound = False @@ -200,6 +201,12 @@ class LXMRouter: def get_outbound_propagation_node(self): return self.outbound_propagation_node + def set_retain_node_lxms(self, retain): + if retain == True: + self.retain_synced_on_node = True + else: + self.retain_synced_on_node = False + def set_authentication(self, required=None): if required != None: self.auth_required = required @@ -247,7 +254,7 @@ class LXMRouter: self.outbound_propagation_link.identify(identity) self.outbound_propagation_link.request( LXMPeer.MESSAGE_GET_PATH, - [None, None], + [None, None], # Set both want and have fields to None to get message list response_callback=self.message_list_response, failed_callback=self.message_get_failed ) @@ -633,8 +640,7 @@ class LXMRouter: finally: i += 1 - RNS.log("LXMF message store size is now "+RNS.prettysize(self.message_storage_size()), RNS.LOG_EXTREME) - RNS.log("PE len "+str(len(self.propagation_entries))) + RNS.log("LXMF message store size is now "+RNS.prettysize(self.message_storage_size())+" for "+str(len(self.propagation_entries))+" items", RNS.LOG_EXTREME) except Exception as e: @@ -765,6 +771,16 @@ class LXMRouter: # Process wanted messages response_messages = [] if data[0] != None and len(data[0]) > 0: + client_transfer_limit = None + if len(data) >= 3: + try: + client_transfer_limit = float(data[2])*1000 + RNS.log("Client indicates transfer limit of "+RNS.prettysize(client_transfer_limit), RNS.LOG_DEBUG) + except: + pass + + per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now + cumulative_size = 24 # Initialised to highest reasonable binary structure overhead for transient_id in data[0]: if transient_id in self.propagation_entries and self.propagation_entries[transient_id][0] == remote_destination.hash: try: @@ -773,9 +789,17 @@ class LXMRouter: message_file = open(filepath, "rb") lxmf_data = message_file.read() - response_messages.append(lxmf_data) message_file.close() + lxm_size = len(lxmf_data) + next_size = cumulative_size + (lxm_size+per_message_overhead) + + if client_transfer_limit != None and next_size > client_transfer_limit: + pass + else: + response_messages.append(lxmf_data) + cumulative_size += (lxm_size+per_message_overhead) + except Exception as e: RNS.log("Error while processing message download request from "+RNS.prettyhexrep(remote_destination.hash)+". The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -805,7 +829,7 @@ class LXMRouter: wants = [] if len(request_receipt.response) > 0: for transient_id in request_receipt.response: - if self.has_message(transient_id): + if not self.retain_synced_on_node and self.has_message(transient_id): haves.append(transient_id) else: if self.propagation_transfer_max_messages == LXMRouter.PR_ALL_MESSAGES or len(wants) < self.propagation_transfer_max_messages: @@ -813,7 +837,7 @@ class LXMRouter: request_receipt.link.request( LXMPeer.MESSAGE_GET_PATH, - [wants, haves], + [wants, haves, self.delivery_per_transfer_limit], response_callback=self.message_get_response, failed_callback=self.message_get_failed, progress_callback=self.message_get_progress From 1e9fe969fda85fb313f848403309b873c2ce9c09 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 2 Mar 2024 09:17:34 +0100 Subject: [PATCH 023/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 6a9beea..3d26edf 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.4.0" +__version__ = "0.4.1" From 994bb07efdf9d53e75700177431474d3d41e3c3a Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 17 Mar 2024 00:35:45 +0100 Subject: [PATCH 024/161] Updated timing --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index d3a4b7d..991a893 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -18,7 +18,7 @@ from .Handlers import LXMFPropagationAnnounceHandler class LXMRouter: MAX_DELIVERY_ATTEMPTS = 5 PROCESSING_INTERVAL = 4 - DELIVERY_RETRY_WAIT = 7 + DELIVERY_RETRY_WAIT = 10 PATH_REQUEST_WAIT = 7 LINK_MAX_INACTIVITY = 10*60 P_LINK_MAX_INACTIVITY = 3*60 From 739349c2a13e7a254d038781b045dda7f8fb9d49 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 17 Mar 2024 00:35:54 +0100 Subject: [PATCH 025/161] Updated field codes --- LXMF/LXMF.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 5225d88..8febabe 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -13,4 +13,5 @@ FIELD_IMAGE = 0x06 FIELD_AUDIO = 0x07 FIELD_THREAD = 0x08 FIELD_COMMANDS = 0x09 -FIELD_RESULTS = 0x0A \ No newline at end of file +FIELD_RESULTS = 0x0A +FIELD_GROUP = 0x0B \ No newline at end of file From 21af6a4e5c8b0469bdcdda34dec224add46d4f91 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 17 Mar 2024 00:40:15 +0100 Subject: [PATCH 026/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 3d26edf..df12433 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.4.1" +__version__ = "0.4.2" From 3fbe2e94da2c348b2cc9e44803f72275d891397d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 19 Mar 2024 09:47:37 +0100 Subject: [PATCH 027/161] Append incoming to locally delivered transient IDs and check on arrival --- LXMF/LXMRouter.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 991a893..f249b0c 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -953,6 +953,12 @@ class LXMRouter: RNS.log(str(self)+" ignored message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) return False + if self.has_message(message.hash): + RNS.log(str(self)+" ignored already received message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) + return False + else: + self.locally_delivered_transient_ids[message.hash] = time.time() + if self.__delivery_callback != None and callable(self.__delivery_callback): try: self.__delivery_callback(message) From d2b2ef54e89dd0df36d89f84c9d7732ca65796d7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 19 Mar 2024 11:03:12 +0100 Subject: [PATCH 028/161] Added outbound transfer progress monitoring --- LXMF/LXMRouter.py | 22 ++++++++++++++++------ LXMF/LXMessage.py | 13 +++++++++---- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index f249b0c..c89c4c8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -909,10 +909,6 @@ class LXMRouter: return True else: return False - - - ### Message Routing & Delivery ######################## - ####################################################### def handle_outbound(self, lxmessage): lxmessage.state = LXMessage.OUTBOUND @@ -927,6 +923,17 @@ class LXMRouter: self.pending_outbound.append(lxmessage) self.process_outbound() + def get_outbound_progress(self, lxm_hash): + for lxm in self.pending_outbound: + if lxm.hash == lxm_hash: + return lxm.progress + + return None + + + ### Message Routing & Delivery ######################## + ####################################################### + def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None): try: message = LXMessage.unpack_from_bytes(lxmf_data) @@ -1340,6 +1347,8 @@ class LXMRouter: # to deliver the message direct_link = self.direct_links[delivery_destination_hash] if direct_link.status == RNS.Link.ACTIVE: + if lxmessage.progress == None or lxmessage.progress < 0.05: + lxmessage.progress = 0.05 if lxmessage.state != LXMessage.SENDING: RNS.log("Starting transfer of "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) lxmessage.set_delivery_destination(direct_link) @@ -1366,8 +1375,7 @@ class LXMRouter: self.direct_links.pop(delivery_destination_hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT else: - # Simply wait for the link to become - # active or close + # Simply wait for the link to become active or close RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" is pending, waiting for link to become active", RNS.LOG_DEBUG) else: # No link exists, so we'll try to establish one, but @@ -1383,10 +1391,12 @@ class LXMRouter: delivery_link = RNS.Link(lxmessage.get_destination()) delivery_link.set_link_established_callback(self.process_outbound) self.direct_links[delivery_destination_hash] = delivery_link + lxmessage.progress = 0.03 else: RNS.log("No path known for delivery attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+". Requesting path...", RNS.LOG_DEBUG) RNS.Transport.request_path(lxmessage.get_destination().hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + lxmessage.progress = 0.01 else: RNS.log("Max delivery attempts reached for direct "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) self.fail_message(lxmessage) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 3cf8122..5b7721c 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -118,7 +118,7 @@ class LXMessage: self.signature = None self.hash = None self.packed = None - self.progress = None + self.progress = 0.0 self.state = LXMessage.DRAFT self.method = LXMessage.UNKNOWN self.rssi = None @@ -324,12 +324,14 @@ class LXMessage: if receipt: receipt.set_delivery_callback(self.__mark_delivered) receipt.set_timeout_callback(self.__link_packet_timed_out) + self.progress = 0.50 else: if self.__delivery_destination: self.__delivery_destination.teardown() elif self.representation == LXMessage.RESOURCE: self.resource_representation = self.__as_resource() + self.progress = 0.10 elif self.method == LXMessage.PROPAGATED: self.state = LXMessage.SENDING @@ -339,11 +341,13 @@ class LXMessage: if receipt: receipt.set_delivery_callback(self.__mark_propagated) receipt.set_timeout_callback(self.__link_packet_timed_out) + self.progress = 0.50 else: self.__delivery_destination.teardown() elif self.representation == LXMessage.RESOURCE: self.resource_representation = self.__as_resource() + self.progress = 0.10 def determine_transport_encryption(self): @@ -387,6 +391,7 @@ class LXMessage: def __mark_delivered(self, receipt = None): RNS.log("Received delivery notification for "+str(self), RNS.LOG_DEBUG) self.state = LXMessage.DELIVERED + self.progress = 1.0 if self.__delivery_callback != None and callable(self.__delivery_callback): try: @@ -397,6 +402,7 @@ class LXMessage: def __mark_propagated(self, receipt = None): RNS.log("Received propagation success notification for "+str(self), RNS.LOG_DEBUG) self.state = LXMessage.SENT + self.progress = 1.0 if self.__delivery_callback != None and callable(self.__delivery_callback): try: @@ -407,6 +413,7 @@ class LXMessage: def __mark_paper_generated(self, receipt = None): RNS.log("Paper message generation succeeded for "+str(self), RNS.LOG_DEBUG) self.state = LXMessage.PAPER + self.progress = 1.0 if self.__delivery_callback != None and callable(self.__delivery_callback): try: @@ -436,7 +443,7 @@ class LXMessage: def __update_transfer_progress(self, resource): - self.progress = resource.get_progress() + self.progress = 0.10 + (resource.get_progress()*0.90) def __as_packet(self): if not self.packed: @@ -465,8 +472,6 @@ class LXMessage: if not self.__delivery_destination.status == RNS.Link.ACTIVE: raise ConnectionError("Tried to synthesize resource for LXMF message on a link that was not active") - self.progress = 0.0 - if self.method == LXMessage.DIRECT: return RNS.Resource(self.packed, self.__delivery_destination, callback = self.__resource_concluded, progress_callback = self.__update_transfer_progress) elif self.method == LXMessage.PROPAGATED: From 27ffea3ea9dafee7fa2cc2f0ddb674eddde8a79c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 19 Mar 2024 13:45:55 +0100 Subject: [PATCH 029/161] Improved message transfer feedback --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index c89c4c8..f7701bf 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1396,7 +1396,7 @@ class LXMRouter: RNS.log("No path known for delivery attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+". Requesting path...", RNS.LOG_DEBUG) RNS.Transport.request_path(lxmessage.get_destination().hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT - lxmessage.progress = 0.01 + lxmessage.progress = 0.00 else: RNS.log("Max delivery attempts reached for direct "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) self.fail_message(lxmessage) From 45e39917b5374b8667605fb6a2dae450297f9fbc Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 19 Mar 2024 13:54:09 +0100 Subject: [PATCH 030/161] Updated dependencies --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 26ce0d7..43817c8 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.7.2'], + install_requires=['rns>=0.7.3'], python_requires='>=3.7', ) From 4b13d7188c537c00e623531ef6b980dc2dfc8811 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 22 Mar 2024 21:11:32 +0100 Subject: [PATCH 031/161] Send packet proof before delivery processing --- LXMF/LXMRouter.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index f7701bf..db19916 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -997,8 +997,9 @@ class LXMRouter: RNS.log("Error while retrieving physical link stats for LXMF delivery packet: "+str(e), RNS.LOG_ERROR) phy_stats = {"rssi": packet.rssi, "snr": packet.snr, "q": packet.q} - if self.lxmf_delivery(lxmf_data, packet.destination_type, phy_stats=phy_stats): - packet.prove() + + packet.prove() + self.lxmf_delivery(lxmf_data, packet.destination_type, phy_stats=phy_stats) except Exception as e: RNS.log("Exception occurred while parsing incoming LXMF data.", RNS.LOG_ERROR) From f6cc9fd1cf108c8cf0c7d3ee2dab96896805bd56 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 23 Mar 2024 19:53:05 +0100 Subject: [PATCH 032/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index df12433..f6b7e26 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.4.2" +__version__ = "0.4.3" From 2b3fa796a1574785df784cc57e6ae9af4f2f6077 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 26 Mar 2024 14:55:59 +0100 Subject: [PATCH 033/161] Updated packet proof logic --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index db19916..30788c9 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -980,6 +980,7 @@ class LXMRouter: return False def delivery_packet(self, data, packet): + packet.prove() try: if packet.destination_type != RNS.Destination.LINK: lxmf_data = b"" @@ -998,7 +999,6 @@ class LXMRouter: phy_stats = {"rssi": packet.rssi, "snr": packet.snr, "q": packet.q} - packet.prove() self.lxmf_delivery(lxmf_data, packet.destination_type, phy_stats=phy_stats) except Exception as e: From d767d40c5fdf3a9fa6b0738d20c9eeb4bf0ffa2d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 5 May 2024 20:09:05 +0200 Subject: [PATCH 034/161] Updated dependency version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 43817c8..da4639d 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.7.3'], + install_requires=['rns>=0.7.4'], python_requires='>=3.7', ) From 741b66aab203b3dfbc1454ee869d97999db61cfc Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 5 May 2024 20:11:30 +0200 Subject: [PATCH 035/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index f6b7e26..cd1ee63 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.4.3" +__version__ = "0.4.4" From a810be316fc53bbbca6e0085e32381bdca465141 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Jun 2024 09:19:47 +0200 Subject: [PATCH 036/161] Added audio modes --- LXMF/LXMF.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 8febabe..fa3e2d2 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -14,4 +14,28 @@ FIELD_AUDIO = 0x07 FIELD_THREAD = 0x08 FIELD_COMMANDS = 0x09 FIELD_RESULTS = 0x0A -FIELD_GROUP = 0x0B \ No newline at end of file +FIELD_GROUP = 0x0B + +# Audio modes for the data structure in FIELD_AUDIO + +# Codec2 Audio Modes +AM_CODEC2_450PWB = 0x01 +AM_CODEC2_450 = 0x02 +AM_CODEC2_700C = 0x03 +AM_CODEC2_1200 = 0x04 +AM_CODEC2_1300 = 0x05 +AM_CODEC2_1400 = 0x06 +AM_CODEC2_1600 = 0x07 +AM_CODEC2_2400 = 0x08 +AM_CODEC2_3200 = 0x09 + +# Opus Audio Modes +AM_OPUS_LBW = 0x10 +AM_OPUS_MBW = 0x11 +AM_OPUS_PTT = 0x12 +AM_OPUS_RT_HDX = 0x13 +AM_OPUS_RT_FDX = 0x14 +AM_OPUS_STANDARD = 0x15 +AM_OPUS_HQ = 0x16 +AM_OPUS_BROADCAST = 0x17 +AM_OPUS_LOSSLESS = 0x18 From a35e522e75e6afca4b185a56d989773cafaec52d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 2 Jun 2024 09:25:33 +0200 Subject: [PATCH 037/161] Added custom audio mode to spec --- LXMF/LXMF.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index fa3e2d2..21567a5 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -39,3 +39,7 @@ AM_OPUS_STANDARD = 0x15 AM_OPUS_HQ = 0x16 AM_OPUS_BROADCAST = 0x17 AM_OPUS_LOSSLESS = 0x18 + +# Custom, unspecified audio mode, the +# client must determined it itself +AM_CUSTOM = 0xFF \ No newline at end of file From e5a960b2df023eb0918f30680a3012fb32542cc5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 4 Jun 2024 18:31:18 +0200 Subject: [PATCH 038/161] Updated OPUS audio modes --- LXMF/LXMF.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 21567a5..a8bec6b 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -30,15 +30,16 @@ AM_CODEC2_2400 = 0x08 AM_CODEC2_3200 = 0x09 # Opus Audio Modes -AM_OPUS_LBW = 0x10 -AM_OPUS_MBW = 0x11 -AM_OPUS_PTT = 0x12 -AM_OPUS_RT_HDX = 0x13 -AM_OPUS_RT_FDX = 0x14 -AM_OPUS_STANDARD = 0x15 -AM_OPUS_HQ = 0x16 -AM_OPUS_BROADCAST = 0x17 -AM_OPUS_LOSSLESS = 0x18 +AM_OPUS_OGG = 0x10 +AM_OPUS_LBW = 0x11 +AM_OPUS_MBW = 0x12 +AM_OPUS_PTT = 0x13 +AM_OPUS_RT_HDX = 0x14 +AM_OPUS_RT_FDX = 0x15 +AM_OPUS_STANDARD = 0x16 +AM_OPUS_HQ = 0x17 +AM_OPUS_BROADCAST = 0x18 +AM_OPUS_LOSSLESS = 0x19 # Custom, unspecified audio mode, the # client must determined it itself From 4520507869d1725ef8cc161167fda2b7ebd4e3c5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 4 Jun 2024 18:43:46 +0200 Subject: [PATCH 039/161] Updated readme --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2f5c888..fb7c4dc 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ LXMF is efficient enough that it can deliver messages over extremely low-bandwid User-facing clients built on LXMF include: - [Sideband](https://unsigned.io/sideband) +- [Reticulum MeshChat](https://github.com/liamcottle/reticulum-meshchat) - [Nomad Network](https://unsigned.io/nomadnet) Community-provided tools and utilities for LXMF include: From b4ba9d628b04c69f068b2877ff69fc49b7d843f0 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 5 Jun 2024 00:32:59 +0200 Subject: [PATCH 040/161] Updated readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index fb7c4dc..5b7c9bd 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ LXMF is efficient enough that it can deliver messages over extremely low-bandwid User-facing clients built on LXMF include: - [Sideband](https://unsigned.io/sideband) -- [Reticulum MeshChat](https://github.com/liamcottle/reticulum-meshchat) +- [MeshChat](https://github.com/liamcottle/reticulum-meshchat) - [Nomad Network](https://unsigned.io/nomadnet) Community-provided tools and utilities for LXMF include: From 8b8008478da6148e0be8ba1fbc89cbefaede3e05 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 18:35:25 +0000 Subject: [PATCH 041/161] Bump setuptools from 68.0.0 to 70.0.0 Bumps [setuptools](https://github.com/pypa/setuptools) from 68.0.0 to 70.0.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v68.0.0...v70.0.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 48086e6..a322487 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ qrcode==7.4.2 rns==0.5.7 -setuptools==68.0.0 +setuptools==70.0.0 From 8e686b10e8c635c86384ebb9cd205dc13ba79ea7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 30 Aug 2024 00:11:11 +0200 Subject: [PATCH 042/161] Updated version and dependencies --- LXMF/_version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index cd1ee63..98a433b 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.4.4" +__version__ = "0.4.5" diff --git a/setup.py b/setup.py index da4639d..7d283d4 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.7.4'], + install_requires=['rns>=0.7.6'], python_requires='>=3.7', ) From 0be569ccd6ba355ac4b81409d195fe687eef08fa Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 30 Aug 2024 00:11:32 +0200 Subject: [PATCH 043/161] Updated field specifiers and descriptions --- LXMF/LXMF.py | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index a8bec6b..0edb6ad 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -1,9 +1,10 @@ APP_NAME = "lxmf" -# WARNING! These field specifiers are floating and not -# yet final! Consider highly experiemental, and expect -# them to change in the future! You have been warned :) - +########################################################## +# The following core fields are provided to facilitate # +# interoperability in data exchange between various LXMF # +# clients and systems. # +########################################################## FIELD_EMBEDDED_LXMS = 0x01 FIELD_TELEMETRY = 0x02 FIELD_TELEMETRY_STREAM = 0x03 @@ -16,6 +17,34 @@ FIELD_COMMANDS = 0x09 FIELD_RESULTS = 0x0A FIELD_GROUP = 0x0B +# For usecases such as including custom data structures, +# embedding or encapsulating other data types or protocols +# that are not native to LXMF, or bridging/tunneling +# external protocols or services over LXMF, the following +# fields are available. A format/type/protocol (or other) +# identifier can be included in the CUSTOM_TYPE field, and +# the embedded payload can be included in the CUSTOM_DATA +# field. It is up to the client application to correctly +# discern and potentially utilise any data embedded using +# this mechanism. +FIELD_CUSTOM_TYPE = 0xFB +FIELD_CUSTOM_DATA = 0xFC +FIELD_CUSTOM_META = 0xFD + +# The non-specific and debug fields are intended for +# development, testing and debugging use. +FIELD_NON_SPECIFIC = 0xFE +FIELD_DEBUG = 0xFF + +########################################################## +# The following section lists field-specific specifiers, # +# modes and identifiers that are native to LXMF. It is # +# optional for any client or system to support any of # +# these, and they are provided as template for easing # +# interoperability without sacrificing expandability # +# and flexibility of the format. # +########################################################## + # Audio modes for the data structure in FIELD_AUDIO # Codec2 Audio Modes @@ -41,6 +70,6 @@ AM_OPUS_HQ = 0x17 AM_OPUS_BROADCAST = 0x18 AM_OPUS_LOSSLESS = 0x19 -# Custom, unspecified audio mode, the -# client must determined it itself +# Custom, unspecified audio mode, the client must +# determine it itself based on the included data. AM_CUSTOM = 0xFF \ No newline at end of file From 68892091ec488337b38e34e0c45756b905008c2d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 30 Aug 2024 00:23:09 +0200 Subject: [PATCH 044/161] Updated readme --- README.md | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5b7c9bd..65018f4 100644 --- a/README.md +++ b/README.md @@ -130,10 +130,6 @@ You can also find the entire message in Date: Wed, 4 Sep 2024 18:58:25 +0200 Subject: [PATCH 048/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 98a433b..3d18726 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.4.5" +__version__ = "0.5.0" From 775ac7ff689b50cf34ee7c06d2c34a937b11d4e6 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 5 Sep 2024 14:56:49 +0200 Subject: [PATCH 049/161] Allow ratchet enforcement and fixed opportunistic delivery queue handling --- LXMF/LXMRouter.py | 18 +++++++++++------- LXMF/LXMessage.py | 4 +++- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 3dea0cd..f2daa2f 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -58,7 +58,7 @@ class LXMRouter: ####################################################### def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None, - propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT): + propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT, enforce_ratchets = False): random.seed(os.urandom(10)) @@ -93,6 +93,7 @@ class LXMRouter: self.information_storage_limit = None self.propagation_per_transfer_limit = propagation_limit self.delivery_per_transfer_limit = delivery_limit + self.enforce_ratchets = enforce_ratchets self.wants_download_on_path_available_from = None self.wants_download_on_path_available_to = None @@ -183,6 +184,8 @@ class LXMRouter: delivery_destination.set_packet_callback(self.delivery_packet) delivery_destination.set_link_established_callback(self.delivery_link_established) delivery_destination.display_name = display_name + if self.enforce_ratchets: + delivery_destination.enforce_ratchets() if display_name != None: delivery_destination.set_default_app_data(display_name.encode("utf-8")) @@ -1245,12 +1248,13 @@ class LXMRouter: delivery_destination = self.delivery_destinations[destination_hash] encrypted_lxmf_data = lxmf_data[LXMessage.DESTINATION_LENGTH:] decrypted_lxmf_data = delivery_destination.decrypt(encrypted_lxmf_data) - delivery_data = lxmf_data[:LXMessage.DESTINATION_LENGTH]+decrypted_lxmf_data - self.lxmf_delivery(delivery_data, delivery_destination.type) - self.locally_delivered_transient_ids[transient_id] = time.time() + if decrypted_lxmf_data != None: + delivery_data = lxmf_data[:LXMessage.DESTINATION_LENGTH]+decrypted_lxmf_data + self.lxmf_delivery(delivery_data, delivery_destination.type) + self.locally_delivered_transient_ids[transient_id] = time.time() - if signal_local_delivery != None: - return signal_local_delivery + if signal_local_delivery != None: + return signal_local_delivery else: if self.propagation_node: @@ -1326,7 +1330,7 @@ class LXMRouter: if lxmessage.state == LXMessage.DELIVERED: RNS.log("Delivery has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) - elif lxmessage.state == LXMessage.SENT: + elif lxmessage.method == LXMessage.PROPAGATED and lxmessage.state == LXMessage.SENT: RNS.log("Propagation has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) else: diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index e8dd1db..7442091 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -351,7 +351,9 @@ class LXMessage: def determine_transport_encryption(self): - # TODO: Update transport encryption descriptions to account for ratchets + # TODO: These descriptions are old and outdated. + # Update the transport encryption descriptions to + # account for ratchets and other changes. if self.method == LXMessage.OPPORTUNISTIC: if self.__destination.type == RNS.Destination.SINGLE: self.transport_encrypted = True From 95ba8cba607f96b2e0d4bc19d525d380cab1e860 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 5 Sep 2024 14:57:20 +0200 Subject: [PATCH 050/161] Updated dependencies --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index a322487..8117b1f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ qrcode==7.4.2 -rns==0.5.7 +rns==0.7.7 setuptools==70.0.0 diff --git a/setup.py b/setup.py index 7d283d4..52489a1 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.7.6'], + install_requires=['rns>=0.7.7'], python_requires='>=3.7', ) From 5366f895b23498b72f5e1a514fa751433039ed30 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 6 Sep 2024 00:51:36 +0200 Subject: [PATCH 051/161] Added stamp generation to LXMessage --- LXMF/LXMessage.py | 62 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 58 insertions(+), 4 deletions(-) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 7442091..c6eeb3f 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -1,6 +1,7 @@ import RNS import RNS.vendor.umsgpack as msgpack +import os import time import base64 @@ -90,7 +91,9 @@ class LXMessage: else: return "" - def __init__(self, destination, source, content = "", title = "", fields = None, desired_method = None, destination_hash = None, source_hash = None): + def __init__(self, destination, source, content = "", title = "", fields = None, desired_method = None, + destination_hash = None, source_hash = None, stamp_cost=None): + if isinstance(destination, RNS.Destination) or destination == None: self.__destination = destination if destination != None: @@ -118,9 +121,11 @@ class LXMessage: self.signature = None self.hash = None self.packed = None - self.progress = 0.0 + self.stamp = None + self.stamp_cost = stamp_cost self.state = LXMessage.DRAFT self.method = LXMessage.UNKNOWN + self.progress = 0.0 self.rssi = None self.snr = None self.q = None @@ -219,6 +224,41 @@ class LXMessage: def register_failed_callback(self, callback): self.failed_callback = callback + def validate_stamp(self, target_cost): + if self.stamp == None: + return False + else: + target = 0b1 << 256-target_cost + if int.from_bytes(RNS.Identity.full_hash(self.message_id+self.stamp)) > target: + return False + else: + return True + + def get_stamp(self, timeout=None): + if self.stamp_cost == None: + return None + + elif self.stamp != None: + # TODO: Check that message hash cannot actually + # change under any circumstances before handoff + return self.stamp + + else: + RNS.log(f"Generating stamp for {self}...", RNS.LOG_DEBUG) + start_time = time.time() + stamp = os.urandom(256//8); target = 0b1 << 256-self.stamp_cost; rounds = 1 + while int.from_bytes(RNS.Identity.full_hash(self.message_id+stamp)) > target: + if timeout != None and rounds % 10000 == 0: + if time.time() > start_time + timeout: + RNS.log(f"Stamp generation for {self} timed out", RNS.LOG_ERROR) + return None + + stamp = os.urandom(256//8) + rounds += 1 + + RNS.log(f"Stamp generated in {RNS.prettytime(time.time() - start_time)}", RNS.LOG_DEBUG) + return stamp + def pack(self): if not self.packed: if self.timestamp == None: @@ -235,6 +275,9 @@ class LXMessage: hashed_part += msgpack.packb(self.payload) self.hash = RNS.Identity.full_hash(hashed_part) self.message_id = self.hash + self.stamp = self.get_stamp() + if self.stamp != None: + self.payload.append(self.stamp) signed_part = b"" signed_part += hashed_part @@ -242,11 +285,11 @@ class LXMessage: self.signature = self.__source.sign(signed_part) self.signature_validated = True + packed_payload = msgpack.packb(self.payload) self.packed = b"" self.packed += self.__destination.hash self.packed += self.__source.hash self.packed += self.signature - packed_payload = msgpack.packb(self.payload) self.packed += packed_payload self.packed_size = len(self.packed) content_size = len(packed_payload) @@ -566,10 +609,19 @@ class LXMessage: source_hash = lxmf_bytes[LXMessage.DESTINATION_LENGTH:2*LXMessage.DESTINATION_LENGTH] signature = lxmf_bytes[2*LXMessage.DESTINATION_LENGTH:2*LXMessage.DESTINATION_LENGTH+LXMessage.SIGNATURE_LENGTH] packed_payload = lxmf_bytes[2*LXMessage.DESTINATION_LENGTH+LXMessage.SIGNATURE_LENGTH:] + unpacked_payload = msgpack.unpackb(packed_payload) + + # Extract stamp from payload if included + if len(unpacked_payload) > 4: + stamp = unpacked_payload[4] + unpacked_payload = unpacked_payload[:4] + packed_payload = msgpack.packb(unpacked_payload) + else: + stamp = None + hashed_part = b"" + destination_hash + source_hash + packed_payload message_hash = RNS.Identity.full_hash(hashed_part) signed_part = b"" + hashed_part + message_hash - unpacked_payload = msgpack.unpackb(packed_payload) timestamp = unpacked_payload[0] title_bytes = unpacked_payload[1] content_bytes = unpacked_payload[2] @@ -598,7 +650,9 @@ class LXMessage: desired_method = original_method) message.hash = message_hash + message.message_id = message.hash message.signature = signature + message.stamp = stamp message.incoming = True message.timestamp = timestamp message.packed = lxmf_bytes From 4dca031441cd52300acb4f95be406e3470e04f17 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 6 Sep 2024 00:57:06 +0200 Subject: [PATCH 052/161] Updated fields --- LXMF/LXMF.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 0edb6ad..8d79bf9 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -16,6 +16,7 @@ FIELD_THREAD = 0x08 FIELD_COMMANDS = 0x09 FIELD_RESULTS = 0x0A FIELD_GROUP = 0x0B +FIELD_TICKET = 0x0C # For usecases such as including custom data structures, # embedding or encapsulating other data types or protocols From fe14f8744d5c61ba1e1d20f7aeddcb33603f38dc Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 6 Sep 2024 16:49:01 +0200 Subject: [PATCH 053/161] Added base code for stamp generation and validation --- LXMF/LXMessage.py | 94 ++++++++++++++++++++++++++++++++++------ docs/example_receiver.py | 7 +++ 2 files changed, 88 insertions(+), 13 deletions(-) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index c6eeb3f..a79f600 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -4,6 +4,8 @@ import RNS.vendor.umsgpack as msgpack import os import time import base64 +import signal +import multiprocessing from .LXMF import APP_NAME @@ -224,15 +226,40 @@ class LXMessage: def register_failed_callback(self, callback): self.failed_callback = callback + @staticmethod + def stamp_workblock(message_id): + wb_st = time.time() + expand_rounds = 3000 + workblock = b"" + for n in range(expand_rounds): + workblock += RNS.Cryptography.hkdf( + length=256, + derive_from=message_id, + salt=RNS.Identity.full_hash(message_id+msgpack.packb(n)), + context=None, + ) + wb_time = time.time() - wb_st + RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) + + return workblock + + @staticmethod + def stamp_valid(stamp, target_cost, workblock): + target = 0b1 << 256-target_cost + result = RNS.Identity.full_hash(workblock+stamp) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + def validate_stamp(self, target_cost): if self.stamp == None: return False else: - target = 0b1 << 256-target_cost - if int.from_bytes(RNS.Identity.full_hash(self.message_id+self.stamp)) > target: - return False - else: + if LXMessage.stamp_valid(self.stamp, target_cost, LXMessage.stamp_workblock(self.message_id)): return True + else: + return False def get_stamp(self, timeout=None): if self.stamp_cost == None: @@ -244,19 +271,60 @@ class LXMessage: return self.stamp else: - RNS.log(f"Generating stamp for {self}...", RNS.LOG_DEBUG) + RNS.log(f"Generating stamp with cost {self.stamp_cost} for {self}...", RNS.LOG_DEBUG) + workblock = LXMessage.stamp_workblock(self.message_id) start_time = time.time() - stamp = os.urandom(256//8); target = 0b1 << 256-self.stamp_cost; rounds = 1 - while int.from_bytes(RNS.Identity.full_hash(self.message_id+stamp)) > target: - if timeout != None and rounds % 10000 == 0: - if time.time() > start_time + timeout: - RNS.log(f"Stamp generation for {self} timed out", RNS.LOG_ERROR) - return None + total_rounds = 0 + + stop_event = multiprocessing.Event() + result_queue = multiprocessing.Queue(maxsize=1) + rounds_queue = multiprocessing.Queue() + def job(stop_event): + terminated = False + rounds = 0 stamp = os.urandom(256//8) - rounds += 1 + while not LXMessage.stamp_valid(stamp, self.stamp_cost, workblock): + if stop_event.is_set(): + break + + if timeout != None and rounds % 10000 == 0: + if time.time() > start_time + timeout: + RNS.log(f"Stamp generation for {self} timed out", RNS.LOG_ERROR) + return None + + stamp = os.urandom(256//8) + rounds += 1 + + rounds_queue.put(rounds) + if not stop_event.is_set(): + result_queue.put(stamp) + + job_procs = [] + jobs = multiprocessing.cpu_count() + for _ in range(jobs): + process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event},) + job_procs.append(process) + process.start() + + stamp = result_queue.get() + stop_event.set() + + for j in range(jobs): + process = job_procs[j] + process.join() + total_rounds += rounds_queue.get() + + duration = time.time() - start_time + rounds = total_rounds + + # TODO: Remove stats output + RNS.log(f"Stamp generated in {RNS.prettytime(duration)} / {rounds} rounds", RNS.LOG_DEBUG) + RNS.log(f"Rounds per second {int(rounds/duration)}", RNS.LOG_DEBUG) + RNS.log(f"Stamp: {RNS.hexrep(stamp)}", RNS.LOG_DEBUG) + RNS.log(f"Resulting hash: {RNS.hexrep(RNS.Identity.full_hash(workblock+stamp))}", RNS.LOG_DEBUG) + ########################### - RNS.log(f"Stamp generated in {RNS.prettytime(time.time() - start_time)}", RNS.LOG_DEBUG) return stamp def pack(self): diff --git a/docs/example_receiver.py b/docs/example_receiver.py index 75c628e..e6ea117 100644 --- a/docs/example_receiver.py +++ b/docs/example_receiver.py @@ -13,6 +13,12 @@ def delivery_callback(message): if message.unverified_reason == LXMF.LXMessage.SOURCE_UNKNOWN: signature_string = "Cannot verify, source is unknown" + stamp_cost = 12 + if message.validate_stamp(stamp_cost): + stamp_string = "Valid" + else: + stamp_string = "Not valid" + RNS.log("\t+--- LXMF Delivery ---------------------------------------------") RNS.log("\t| Source hash : "+RNS.prettyhexrep(message.source_hash)) RNS.log("\t| Source instance : "+str(message.get_source())) @@ -24,6 +30,7 @@ def delivery_callback(message): RNS.log("\t| Content : "+message.content_as_string()) RNS.log("\t| Fields : "+str(message.fields)) RNS.log("\t| Message signature : "+signature_string) + RNS.log("\t| Stamp : "+stamp_string) RNS.log("\t+---------------------------------------------------------------") r = RNS.Reticulum() From fc99010a3db7867ab2d52b79487f670218ded27a Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 6 Sep 2024 17:37:24 +0200 Subject: [PATCH 054/161] Updated example --- docs/example_receiver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/example_receiver.py b/docs/example_receiver.py index e6ea117..02ea0c3 100644 --- a/docs/example_receiver.py +++ b/docs/example_receiver.py @@ -13,8 +13,8 @@ def delivery_callback(message): if message.unverified_reason == LXMF.LXMessage.SOURCE_UNKNOWN: signature_string = "Cannot verify, source is unknown" - stamp_cost = 12 - if message.validate_stamp(stamp_cost): + required_stamp_cost = 12 + if message.validate_stamp(required_stamp_cost): stamp_string = "Valid" else: stamp_string = "Not valid" From 537f1823b637ca37be60a9754fbf44f3f65fea5f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 6 Sep 2024 19:54:28 +0200 Subject: [PATCH 055/161] Added utility functions --- LXMF/LXMF.py | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 8d79bf9..0fe4a2c 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -73,4 +73,45 @@ AM_OPUS_LOSSLESS = 0x19 # Custom, unspecified audio mode, the client must # determine it itself based on the included data. -AM_CUSTOM = 0xFF \ No newline at end of file +AM_CUSTOM = 0xFF + + +########################################################## +# The following helper functions makes it easier to # +# handle and operate on LXMF data in client programs # +########################################################## + +import RNS.vendor.umsgpack as msgpack +def display_name_from_app_data(app_data=None): + if app_data == None: + return None + else: + # Version 0.5.0+ announce format + if (app_data[0] >= 0x90 and app_data[0] <= 0x9f) or app_data[0] == 0xdc: + peer_data = msgpack.unpackb(app_data) + if type(peer_data) == list: + if len(peer_data) < 1: + return None + else: + return peer_data[0].decode("utf-8") + + # Original announce format + else: + return app_data.decode("utf-8") + +def stamp_cost_from_app_data(app_data=None): + if app_data == None: + return None + else: + # Version 0.5.0+ announce format + if (app_data[0] >= 0x90 and app_data[0] <= 0x9f) or app_data[0] == 0xdc: + peer_data = msgpack.unpackb(app_data) + if type(peer_data) == list: + if len(peer_data) < 2: + return None + else: + return peer_data[1] + + # Original announce format + else: + return None \ No newline at end of file From 015dcc563146fd601afd9f9a5c5e2dfa5e3a5399 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 6 Sep 2024 19:55:18 +0200 Subject: [PATCH 056/161] Extract announced stamp cost in handlers --- LXMF/Handlers.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index c09bf1c..a33d13d 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -2,13 +2,14 @@ import time import RNS import RNS.vendor.umsgpack as msgpack -from .LXMF import APP_NAME +from .LXMF import APP_NAME, stamp_cost_from_app_data from .LXMessage import LXMessage class LXMFDeliveryAnnounceHandler: def __init__(self, lxmrouter): self.aspect_filter = APP_NAME+".delivery" + self.receive_path_responses = True self.lxmrouter = lxmrouter def received_announce(self, destination_hash, announced_identity, app_data): @@ -22,10 +23,18 @@ class LXMFDeliveryAnnounceHandler: self.lxmrouter.process_outbound() + try: + stamp_cost = stamp_cost_from_app_data(app_data) + if stamp_cost != None: + self.lxmrouter.update_stamp_cost(destination_hash, stamp_cost) + except Exception as e: + RNS.log(f"An error occurred while trying to decode announced stamp cost. The contained exception was: {e}", RNS.LOG_ERROR) + class LXMFPropagationAnnounceHandler: def __init__(self, lxmrouter): self.aspect_filter = APP_NAME+".propagation" + self.receive_path_responses = False self.lxmrouter = lxmrouter def received_announce(self, destination_hash, announced_identity, app_data): From dcb0a18cd748f0800d692e7aab5589a7b309ae77 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 6 Sep 2024 20:34:09 +0200 Subject: [PATCH 057/161] Added automatic signalling of stamp costs, stamp generation and validation --- LXMF/LXMRouter.py | 115 ++++++++++++++++++++++++++++++++++++++++++++-- LXMF/LXMessage.py | 1 + 2 files changed, 111 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index f2daa2f..5e3bf36 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -24,6 +24,7 @@ class LXMRouter: P_LINK_MAX_INACTIVITY = 3*60 MESSAGE_EXPIRY = 30*24*60*60 + STAMP_COST_EXPIRY = 45*24*60*60 NODE_ANNOUNCE_DELAY = 20 @@ -58,7 +59,8 @@ class LXMRouter: ####################################################### def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None, - propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT, enforce_ratchets = False): + propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT, enforce_ratchets = False, + enforce_stamps = False): random.seed(os.urandom(10)) @@ -94,6 +96,7 @@ class LXMRouter: self.propagation_per_transfer_limit = propagation_limit self.delivery_per_transfer_limit = delivery_limit self.enforce_ratchets = enforce_ratchets + self._enforce_stamps = enforce_stamps self.wants_download_on_path_available_from = None self.wants_download_on_path_available_to = None @@ -104,6 +107,9 @@ class LXMRouter: self.active_propagation_links = [] self.locally_delivered_transient_ids = {} self.locally_processed_transient_ids = {} + self.outbound_stamp_costs = {} + + self.cost_file_lock = threading.Lock() if identity == None: identity = RNS.Identity() @@ -148,6 +154,19 @@ class LXMRouter: except Exception as e: RNS.log("Could not load locally delivered message ID cache from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + try: + if os.path.isfile(self.storagepath+"/outbound_stamp_costs"): + with self.cost_file_lock: + with open(self.storagepath+"/outbound_stamp_costs", "rb") as outbound_stamp_cost_file: + data = outbound_stamp_cost_file.read() + self.outbound_stamp_costs = msgpack.unpackb(data) + + self.clean_outbound_stamp_costs() + self.save_outbound_stamp_costs() + + except Exception as e: + RNS.log("Could not load outbound stamp costs from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + atexit.register(self.exit_handler) job_thread = threading.Thread(target=self.jobloop) @@ -156,8 +175,7 @@ class LXMRouter: def announce(self, destination_hash): if destination_hash in self.delivery_destinations: - delivery_destination = self.delivery_destinations[destination_hash] - delivery_destination.announce(delivery_destination.display_name.encode("utf-8")) + self.delivery_destinations[destination_hash].announce(app_data=self.get_announce_app_data(destination_hash)) def announce_propagation_node(self): def delayed_announce(): @@ -175,7 +193,7 @@ class LXMRouter: da_thread.setDaemon(True) da_thread.start() - def register_delivery_identity(self, identity, display_name = None): + def register_delivery_identity(self, identity, display_name = None, stamp_cost = None): if not os.path.isdir(self.ratchetpath): os.makedirs(self.ratchetpath) @@ -184,11 +202,16 @@ class LXMRouter: delivery_destination.set_packet_callback(self.delivery_packet) delivery_destination.set_link_established_callback(self.delivery_link_established) delivery_destination.display_name = display_name + delivery_destination.stamp_cost = stamp_cost + if self.enforce_ratchets: delivery_destination.enforce_ratchets() if display_name != None: - delivery_destination.set_default_app_data(display_name.encode("utf-8")) + def get_app_data(): + return self.get_announce_app_data(delivery_destination) + + delivery_destination.set_default_app_data(get_app_data) self.delivery_destinations[delivery_destination.hash] = delivery_destination return delivery_destination @@ -380,6 +403,12 @@ class LXMRouter: self.propagation_node = False self.announce_propagation_node() + def enforce_stamps(self): + self._enforce_stamps = True + + def ignore_stamps(self): + self._enforce_stamps = False + def ignore_destination(self, destination_hash): if not destination_hash in self.ignored_list: self.ignored_list.append(destination_hash) @@ -553,6 +582,31 @@ class LXMRouter: self.locally_processed_transient_ids.pop(transient_id) RNS.log("Cleaned "+RNS.prettyhexrep(transient_id)+" from locally processed cache", RNS.LOG_DEBUG) + def update_stamp_cost(self, destination_hash, stamp_cost): + RNS.log(f"Updating outbound stamp cost for {RNS.prettyhexrep(destination_hash)} to {stamp_cost}", RNS.LOG_DEBUG) + self.outbound_stamp_costs[destination_hash] = [time.time(), stamp_cost] + + def job(): + self.save_outbound_stamp_costs() + threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() + + def get_announce_app_data(self, destination_hash): + if destination_hash in self.delivery_destinations: + delivery_destination = self.delivery_destinations[destination_hash] + + display_name = None + if delivery_destination.display_name != None: + display_name = delivery_destination.display_name.encode("utf-8") + + stamp_cost = None + if delivery_destination.stamp_cost != None and type(delivery_destination.stamp_cost) == int: + if delivery_destination.stamp_cost > 0 and delivery_destination.stamp_cost < 255: + stamp_cost = delivery_destination.stamp_cost + + peer_data = [display_name, stamp_cost] + + return msgpack.packb(peer_data) + def get_weight(self, transient_id): dst_hash = self.propagation_entries[transient_id][0] lxm_rcvd = self.propagation_entries[transient_id][2] @@ -678,6 +732,36 @@ class LXMRouter: except Exception as e: RNS.log("Could not save locally processed message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + def clean_outbound_stamp_costs(self): + try: + expired = [] + for destination_hash in self.outbound_stamp_costs: + entry = self.outbound_stamp_costs[destination_hash] + if time.time() > entry[0] + LXMRouter.STAMP_COST_EXPIRY: + expired.append(destination_hash) + + for destination_hash in expired: + RNS.log(f"Cleaning expired stamp cost for {destination_hash}") # TODO: Remove + self.outbound_stamp_costs.pop(destination_hash) + + except Exception as e: + RNS.log(f"Error while cleaning outbound stamp costs. The contained exception was: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + def save_outbound_stamp_costs(self): + with self.cost_file_lock: + try: + RNS.log("Saving outbound stamp costs...", RNS.LOG_DEBUG) # TODO: Remove + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) + + locally_processed_file = open(self.storagepath+"/outbound_stamp_costs", "wb") + locally_processed_file.write(msgpack.packb(self.outbound_stamp_costs)) + locally_processed_file.close() + + except Exception as e: + RNS.log("Could not save locally processed message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + def exit_handler(self): if self.propagation_node: try: @@ -919,6 +1003,13 @@ class LXMRouter: return False def handle_outbound(self, lxmessage): + if lxmessage.stamp_cost == None: + destination_hash = lxmessage.get_destination().hash + if destination_hash in self.outbound_stamp_costs: + stamp_cost = self.outbound_stamp_costs[destination_hash][1] + lxmessage.stamp_cost = stamp_cost + RNS.log(f"No stamp cost set on LXM to {RNS.prettyhexrep(destination_hash)}, autoconfigured to {stamp_cost}, as required by latest announce", RNS.LOG_DEBUG) + lxmessage.state = LXMessage.OUTBOUND if not lxmessage.packed: lxmessage.pack() @@ -946,6 +1037,20 @@ class LXMRouter: try: message = LXMessage.unpack_from_bytes(lxmf_data) + required_stamp_cost = self.delivery_destinations[message.destination_hash].stamp_cost + if required_stamp_cost != None: + if message.validate_stamp(required_stamp_cost): + message.stamp_valid = True + else: + message.stamp_valid = False + + if not message.stamp_valid: + if self._enforce_stamps: + RNS.log(f"Dropping {message} with invalid stamp", RNS.LOG_NOTICE) + return False + else: + RNS.log(f"Received {message} with invalid stamp, but allowing anyway, since stamp enforcement is disabled", RNS.LOG_NOTICE) + if phy_stats != None: if "rssi" in phy_stats: message.rssi = phy_stats["rssi"] if "snr" in phy_stats: message.snr = phy_stats["snr"] diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index a79f600..76a8661 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -125,6 +125,7 @@ class LXMessage: self.packed = None self.stamp = None self.stamp_cost = stamp_cost + self.stamp_valid = False self.state = LXMessage.DRAFT self.method = LXMessage.UNKNOWN self.progress = 0.0 From 4b5e27a5e22620440cd1ae26e749d34c16fe9806 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 6 Sep 2024 20:34:45 +0200 Subject: [PATCH 058/161] Updated examples --- docs/example_receiver.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/docs/example_receiver.py b/docs/example_receiver.py index 02ea0c3..8effd3e 100644 --- a/docs/example_receiver.py +++ b/docs/example_receiver.py @@ -2,6 +2,9 @@ import RNS import LXMF import time +required_stamp_cost = 8 +enforce_stamps = False + def delivery_callback(message): time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(message.timestamp)) signature_string = "Signature is invalid, reason undetermined" @@ -13,11 +16,10 @@ def delivery_callback(message): if message.unverified_reason == LXMF.LXMessage.SOURCE_UNKNOWN: signature_string = "Cannot verify, source is unknown" - required_stamp_cost = 12 - if message.validate_stamp(required_stamp_cost): - stamp_string = "Valid" + if message.stamp_valid: + stamp_string = "Validated" else: - stamp_string = "Not valid" + stamp_string = "Invalid" RNS.log("\t+--- LXMF Delivery ---------------------------------------------") RNS.log("\t| Source hash : "+RNS.prettyhexrep(message.source_hash)) @@ -35,9 +37,9 @@ def delivery_callback(message): r = RNS.Reticulum() -router = LXMF.LXMRouter(storagepath="./tmp1") +router = LXMF.LXMRouter(storagepath="./tmp1", enforce_stamps=enforce_stamps) identity = RNS.Identity() -my_lxmf_destination = router.register_delivery_identity(identity) +my_lxmf_destination = router.register_delivery_identity(identity, stamp_cost=required_stamp_cost) router.register_delivery_callback(delivery_callback) RNS.log("Ready to receive on: "+RNS.prettyhexrep(my_lxmf_destination.hash)) @@ -54,7 +56,7 @@ RNS.log("Ready to receive on: "+RNS.prettyhexrep(my_lxmf_destination.hash)) while True: input() RNS.log("Announcing lxmf.delivery destination...") - my_lxmf_destination.announce() + router.announce(my_lxmf_destination.hash) # input() # RNS.log("Requesting messages from propagation node...") From 0d76eee6cd25dafd3b0f7e15d28b346cea7d97e5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 7 Sep 2024 11:35:17 +0200 Subject: [PATCH 059/161] Stamp cost API functions and multi-process stamp generation on Android --- LXMF/LXMF.py | 13 +++- LXMF/LXMRouter.py | 59 +++++++++++++- LXMF/LXMessage.py | 164 +++++++++++++++++++++++++++++---------- docs/example_receiver.py | 2 +- 4 files changed, 189 insertions(+), 49 deletions(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 0fe4a2c..3618912 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -85,6 +85,8 @@ import RNS.vendor.umsgpack as msgpack def display_name_from_app_data(app_data=None): if app_data == None: return None + elif len(app_data) == 0: + return None else: # Version 0.5.0+ announce format if (app_data[0] >= 0x90 and app_data[0] <= 0x9f) or app_data[0] == 0xdc: @@ -93,7 +95,16 @@ def display_name_from_app_data(app_data=None): if len(peer_data) < 1: return None else: - return peer_data[0].decode("utf-8") + dn = peer_data[0] + if dn == None: + return None + else: + try: + decoded = dn.decode("utf-8") + return decoded + except: + RNS.log("Could not decode display name in included announce data. The contained exception was: {e}", RNS.LOG_ERROR) + return None # Original announce format else: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 5e3bf36..e0d61fe 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -173,9 +173,9 @@ class LXMRouter: job_thread.setDaemon(True) job_thread.start() - def announce(self, destination_hash): + def announce(self, destination_hash, attached_interface=None): if destination_hash in self.delivery_destinations: - self.delivery_destinations[destination_hash].announce(app_data=self.get_announce_app_data(destination_hash)) + self.delivery_destinations[destination_hash].announce(app_data=self.get_announce_app_data(destination_hash), attached_interface=attached_interface) def announce_propagation_node(self): def delayed_announce(): @@ -202,7 +202,6 @@ class LXMRouter: delivery_destination.set_packet_callback(self.delivery_packet) delivery_destination.set_link_established_callback(self.delivery_link_established) delivery_destination.display_name = display_name - delivery_destination.stamp_cost = stamp_cost if self.enforce_ratchets: delivery_destination.enforce_ratchets() @@ -214,11 +213,38 @@ class LXMRouter: delivery_destination.set_default_app_data(get_app_data) self.delivery_destinations[delivery_destination.hash] = delivery_destination + self.set_inbound_stamp_cost(delivery_destination.hash, stamp_cost) + return delivery_destination def register_delivery_callback(self, callback): self.__delivery_callback = callback + def set_inbound_stamp_cost(self, destination_hash, stamp_cost): + if destination_hash in self.delivery_destinations: + delivery_destination = self.delivery_destinations[destination_hash] + if stamp_cost == None: + delivery_destination.stamp_cost = None + return True + elif type(stamp_cost) == int: + if stamp_cost < 1: + delivery_destination.stamp_cost = None + elif stamp_cost < 255: + delivery_destination.stamp_cost = stamp_cost + else: + return False + + return True + + return False + + def get_outbound_stamp_cost(self, destination_hash): + if destination_hash in self.outbound_stamp_costs: + stamp_cost = self.outbound_stamp_costs[destination_hash][1] + return stamp_cost + else: + return None + def set_outbound_propagation_node(self, destination_hash): if len(destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH//8 or type(destination_hash) != bytes: raise ValueError("Invalid destination hash for outbound propagation node") @@ -1020,7 +1046,13 @@ class LXMRouter: time.sleep(0.1) self.pending_outbound.append(lxmessage) - self.process_outbound() + + if lxmessage.defer_stamp and lxmessage.stamp_cost == None: + RNS.log(f"Deferred stamp generation was requested for {lxmessage}, but no stamp is required, processing immediately", RNS.LOG_DEBUG) + lxmessage.defer_stamp = False + + if not lxmessage.defer_stamp: + self.process_outbound() def get_outbound_progress(self, lxm_hash): for lxm in self.pending_outbound: @@ -1029,6 +1061,13 @@ class LXMRouter: return None + def get_outbound_lxm_stamp_cost(self, lxm_hash): + for lxm in self.pending_outbound: + if lxm.hash == lxm_hash: + return lxm.stamp_cost + + return None + ### Message Routing & Delivery ######################## ####################################################### @@ -1440,6 +1479,18 @@ class LXMRouter: self.pending_outbound.remove(lxmessage) else: RNS.log("Starting outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + + # Handle potentially deferred stamp generation + if lxmessage.defer_stamp and lxmessage.stamp == None: + RNS.log(f"Generating deferred stamp for {lxmessage} now", RNS.LOG_DEBUG) + lxmessage.stamp = lxmessage.get_stamp() + lxmessage.defer_stamp = False + lxmessage.packed = None + lxmessage.pack() + + if lxmessage.progress == None or lxmessage.progress < 0.01: + lxmessage.progress = 0.01 + # Outbound handling for opportunistic messages if lxmessage.method == LXMessage.OPPORTUNISTIC: if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 76a8661..3b4956d 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -10,13 +10,13 @@ import multiprocessing from .LXMF import APP_NAME class LXMessage: - DRAFT = 0x00 + GENERATING = 0x00 OUTBOUND = 0x01 SENDING = 0x02 SENT = 0x04 DELIVERED = 0x08 FAILED = 0xFF - states = [DRAFT, OUTBOUND, SENDING, SENT, DELIVERED, FAILED] + states = [GENERATING, OUTBOUND, SENDING, SENT, DELIVERED, FAILED] UNKNOWN = 0x00 PACKET = 0x01 @@ -126,7 +126,8 @@ class LXMessage: self.stamp = None self.stamp_cost = stamp_cost self.stamp_valid = False - self.state = LXMessage.DRAFT + self.defer_stamp = False + self.state = LXMessage.GENERATING self.method = LXMessage.UNKNOWN self.progress = 0.0 self.rssi = None @@ -277,53 +278,128 @@ class LXMessage: start_time = time.time() total_rounds = 0 - stop_event = multiprocessing.Event() - result_queue = multiprocessing.Queue(maxsize=1) - rounds_queue = multiprocessing.Queue() - def job(stop_event): - terminated = False - rounds = 0 - - stamp = os.urandom(256//8) - while not LXMessage.stamp_valid(stamp, self.stamp_cost, workblock): - if stop_event.is_set(): - break - - if timeout != None and rounds % 10000 == 0: - if time.time() > start_time + timeout: - RNS.log(f"Stamp generation for {self} timed out", RNS.LOG_ERROR) - return None + if not RNS.vendor.platformutils.is_android(): + stop_event = multiprocessing.Event() + result_queue = multiprocessing.Queue(maxsize=1) + rounds_queue = multiprocessing.Queue() + def job(stop_event): + terminated = False + rounds = 0 stamp = os.urandom(256//8) - rounds += 1 + while not LXMessage.stamp_valid(stamp, self.stamp_cost, workblock): + if stop_event.is_set(): + break - rounds_queue.put(rounds) - if not stop_event.is_set(): - result_queue.put(stamp) + if timeout != None and rounds % 10000 == 0: + if time.time() > start_time + timeout: + RNS.log(f"Stamp generation for {self} timed out", RNS.LOG_ERROR) + return None - job_procs = [] - jobs = multiprocessing.cpu_count() - for _ in range(jobs): - process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event},) - job_procs.append(process) - process.start() + stamp = os.urandom(256//8) + rounds += 1 - stamp = result_queue.get() - stop_event.set() + rounds_queue.put(rounds) + if not stop_event.is_set(): + result_queue.put(stamp) - for j in range(jobs): - process = job_procs[j] - process.join() - total_rounds += rounds_queue.get() + job_procs = [] + jobs = multiprocessing.cpu_count() + for _ in range(jobs): + process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event},) + job_procs.append(process) + process.start() - duration = time.time() - start_time - rounds = total_rounds + stamp = result_queue.get() + stop_event.set() + + for j in range(jobs): + process = job_procs[j] + process.join() + total_rounds += rounds_queue.get() + + duration = time.time() - start_time + rounds = total_rounds + + else: + # Semaphore support is flaky to non-existent on + # Android, so we need to manually dispatch and + # manage workloads here, while periodically + # checking in on the progress. + + use_nacl = False + try: + import nacl.encoding + import nacl.hash + use_nacl = True + except: + pass + + def full_hash(m): + if use_nacl: + return nacl.hash.sha256(m, encoder=nacl.encoding.RawEncoder) + else: + return RNS.Identity.full_hash(m) + + def sv(s, c, w): + target = 0b1<<256-c + m = w+s + result = full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + stamp = None + wm = multiprocessing.Manager() + jobs = multiprocessing.cpu_count() + + RNS.log(f"Dispatching {jobs} workers for stamp generation...") # TODO: Remove + + results_dict = wm.dict() + while stamp == None: + job_procs = [] + + def job(procnum=None, results_dict=None, wb=None): + RNS.log(f"Worker {procnum} starting...") # TODO: Remove + rounds = 0 + + stamp = os.urandom(256//8) + while not sv(stamp, self.stamp_cost, wb): + if rounds >= 500: + stamp = None + RNS.log(f"Worker {procnum} found no result in {rounds} rounds") # TODO: Remove + break + + stamp = os.urandom(256//8) + rounds += 1 + + results_dict[procnum] = [stamp, rounds] + + for pnum in range(jobs): + process = multiprocessing.Process(target=job, kwargs={"procnum":pnum, "results_dict": results_dict, "wb": workblock},) + job_procs.append(process) + process.start() + + for process in job_procs: + process.join() + + for j in results_dict: + r = results_dict[j] + RNS.log(f"Result from {r}: {r[1]} rounds, stamp: {r[0]}") # TODO: Remove + total_rounds += r[1] + if r[0] != None: + stamp = r[0] + RNS.log(f"Found stamp: {stamp}") # TODO: Remove + + duration = time.time() - start_time + rounds = total_rounds # TODO: Remove stats output RNS.log(f"Stamp generated in {RNS.prettytime(duration)} / {rounds} rounds", RNS.LOG_DEBUG) - RNS.log(f"Rounds per second {int(rounds/duration)}", RNS.LOG_DEBUG) - RNS.log(f"Stamp: {RNS.hexrep(stamp)}", RNS.LOG_DEBUG) - RNS.log(f"Resulting hash: {RNS.hexrep(RNS.Identity.full_hash(workblock+stamp))}", RNS.LOG_DEBUG) + # RNS.log(f"Rounds per second {int(rounds/duration)}", RNS.LOG_DEBUG) + # RNS.log(f"Stamp: {RNS.hexrep(stamp)}", RNS.LOG_DEBUG) + # RNS.log(f"Resulting hash: {RNS.hexrep(RNS.Identity.full_hash(workblock+stamp))}", RNS.LOG_DEBUG) ########################### return stamp @@ -344,9 +420,11 @@ class LXMessage: hashed_part += msgpack.packb(self.payload) self.hash = RNS.Identity.full_hash(hashed_part) self.message_id = self.hash - self.stamp = self.get_stamp() - if self.stamp != None: - self.payload.append(self.stamp) + + if not self.defer_stamp: + self.stamp = self.get_stamp() + if self.stamp != None: + self.payload.append(self.stamp) signed_part = b"" signed_part += hashed_part diff --git a/docs/example_receiver.py b/docs/example_receiver.py index 8effd3e..64f914a 100644 --- a/docs/example_receiver.py +++ b/docs/example_receiver.py @@ -39,7 +39,7 @@ r = RNS.Reticulum() router = LXMF.LXMRouter(storagepath="./tmp1", enforce_stamps=enforce_stamps) identity = RNS.Identity() -my_lxmf_destination = router.register_delivery_identity(identity, stamp_cost=required_stamp_cost) +my_lxmf_destination = router.register_delivery_identity(identity, display_name="Anonymous Peer", stamp_cost=required_stamp_cost) router.register_delivery_callback(delivery_callback) RNS.log("Ready to receive on: "+RNS.prettyhexrep(my_lxmf_destination.hash)) From 74cbd114736fbe3f4f651b0e15aa640ec3324000 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 7 Sep 2024 11:37:39 +0200 Subject: [PATCH 060/161] Cleanup --- LXMF/LXMRouter.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index e0d61fe..56ad297 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -58,10 +58,7 @@ class LXMRouter: ### Developer-facing API ############################## ####################################################### - def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None, - propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT, enforce_ratchets = False, - enforce_stamps = False): - + def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None, propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT, enforce_ratchets = False, enforce_stamps = False): random.seed(os.urandom(10)) self.pending_inbound = [] From 62f5a9eeadffccbd6e751dbc7da949f1ddc7f54c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 7 Sep 2024 15:17:48 +0200 Subject: [PATCH 061/161] Implemented ticket exchanges. Implemented stamp generation and validation by tickets. --- LXMF/LXMRouter.py | 175 +++++++++++++++++++++++++++++++++++++-- LXMF/LXMessage.py | 98 ++++++++++++++++------ docs/example_receiver.py | 7 ++ docs/example_sender.py | 6 +- 4 files changed, 251 insertions(+), 35 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 56ad297..2ff5d75 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -9,6 +9,7 @@ import RNS import RNS.vendor.umsgpack as msgpack from .LXMF import APP_NAME +from .LXMF import FIELD_TICKET from .LXMPeer import LXMPeer from .LXMessage import LXMessage @@ -105,8 +106,10 @@ class LXMRouter: self.locally_delivered_transient_ids = {} self.locally_processed_transient_ids = {} self.outbound_stamp_costs = {} + self.available_tickets = {"outbound": {}, "inbound": {}, "last_deliveries": {}} self.cost_file_lock = threading.Lock() + self.ticket_file_lock = threading.Lock() if identity == None: identity = RNS.Identity() @@ -164,6 +167,31 @@ class LXMRouter: except Exception as e: RNS.log("Could not load outbound stamp costs from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + try: + if os.path.isfile(self.storagepath+"/available_tickets"): + with self.ticket_file_lock: + with open(self.storagepath+"/available_tickets", "rb") as available_tickets_file: + data = available_tickets_file.read() + self.available_tickets = msgpack.unpackb(data) + if not type(self.available_tickets) == dict: + RNS.log("Invalid data format for loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets = {"outbound": {}, "inbound": {}, "last_deliveries": {}} + if not "outbound" in self.available_tickets: + RNS.log("Missing outbound entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["outbound"] = {} + if not "inbound" in self.available_tickets: + RNS.log("Missing inbound entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["inbound"] = {} + if not "last_deliveries" in self.available_tickets: + RNS.log("Missing local_deliveries entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["last_deliveries"] = {} + + self.clean_available_tickets() + self.save_available_tickets() + + except Exception as e: + RNS.log("Could not load outbound stamp costs from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + atexit.register(self.exit_handler) job_thread = threading.Thread(target=self.jobloop) @@ -191,6 +219,10 @@ class LXMRouter: da_thread.start() def register_delivery_identity(self, identity, display_name = None, stamp_cost = None): + if len(self.delivery_destinations) != 0: + RNS.log("Currently only one delivery identity is supported per LXMF router instance", RNS.LOG_ERROR) + return None + if not os.path.isdir(self.ratchetpath): os.makedirs(self.ratchetpath) @@ -647,6 +679,61 @@ class LXMRouter: return weight + def generate_ticket(self, destination_hash, expiry=LXMessage.TICKET_EXPIRY): + now = time.time() + ticket = None + if destination_hash in self.available_tickets["last_deliveries"]: + last_delivery = self.available_tickets["last_deliveries"][destination_hash] + elapsed = now - last_delivery + if elapsed < LXMessage.TICKET_INTERVAL: + RNS.log(f"A ticket for {RNS.prettyhexrep(destination_hash)} was already delivered {RNS.prettytime(elapsed)} ago, not including another ticket yet", RNS.LOG_DEBUG) + return None + + if destination_hash in self.available_tickets["inbound"]: + for ticket in self.available_tickets["inbound"][destination_hash]: + ticket_entry = self.available_tickets["inbound"][destination_hash][ticket] + expires = ticket_entry[0]; validity_left = expires - now + if validity_left > LXMessage.TICKET_RENEW: + RNS.log(f"Found generated ticket for {RNS.prettyhexrep(destination_hash)} with {RNS.prettytime(validity_left)} of validity left, re-using this one", RNS.LOG_DEBUG) + return [expires, ticket] + + else: + self.available_tickets["inbound"][destination_hash] = {} + + RNS.log(f"No generated tickets for {RNS.prettyhexrep(destination_hash)} with enough validity found, generating a new one", RNS.LOG_DEBUG) + expires = now+expiry + ticket = os.urandom(LXMessage.TICKET_LENGTH) + self.available_tickets["inbound"][destination_hash][ticket] = [expires] + self.save_available_tickets() + + return [expires, ticket] + + def remember_ticket(self, destination_hash, ticket_entry): + expires = ticket_entry[0]-time.time() + RNS.log(f"Remembering ticket for {RNS.prettyhexrep(destination_hash)}, expires in {RNS.prettytime(expires)}", RNS.LOG_DEBUG) + self.available_tickets["outbound"][destination_hash] = [ticket_entry[0], ticket_entry[1]] + + def get_outbound_ticket(self, destination_hash): + if destination_hash in self.available_tickets["outbound"]: + entry = self.available_tickets["outbound"][destination_hash] + if entry[0] > time.time(): + return entry[1] + + return None + + def get_inbound_tickets(self, destination_hash): + now = time.time() + available_tickets = [] + if destination_hash in self.available_tickets["inbound"]: + for inbound_ticket in self.available_tickets["inbound"][destination_hash]: + if now < self.available_tickets["inbound"][destination_hash][inbound_ticket][0]: + available_tickets.append(inbound_ticket) + + if len(available_tickets) == 0: + return None + else: + return available_tickets + def get_size(self, transient_id): lxm_size = self.propagation_entries[transient_id][3] return lxm_size @@ -778,13 +865,57 @@ class LXMRouter: if not os.path.isdir(self.storagepath): os.makedirs(self.storagepath) - locally_processed_file = open(self.storagepath+"/outbound_stamp_costs", "wb") - locally_processed_file.write(msgpack.packb(self.outbound_stamp_costs)) - locally_processed_file.close() + outbound_stamp_costs_file = open(self.storagepath+"/outbound_stamp_costs", "wb") + outbound_stamp_costs_file.write(msgpack.packb(self.outbound_stamp_costs)) + outbound_stamp_costs_file.close() except Exception as e: RNS.log("Could not save locally processed message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + def clean_available_tickets(self): + try: + # Clean outbound tickets + expired_outbound = [] + for destination_hash in self.available_tickets["outbound"]: + entry = self.available_tickets["outbound"][destination_hash] + if time.time() > entry[0]: + expired_outbound.append(destination_hash) + + for destination_hash in expired_outbound: + RNS.log(f"Cleaning expired outbound ticket for {destination_hash}") # TODO: Remove + self.available_tickets["outbound"].pop(destination_hash) + + # Clean inbound tickets + for destination_hash in self.available_tickets["inbound"]: + expired_inbound = [] + for inbound_ticket in self.available_tickets["inbound"][destination_hash]: + entry = self.available_tickets["inbound"][destination_hash][inbound_ticket] + ticket_expiry = entry[0] + if time.time() > ticket_expiry+LXMessage.TICKET_GRACE: + expired_inbound.append(inbound_ticket) + + for inbound_ticket in expired_inbound: + RNS.log(f"Cleaning expired inbound ticket for {destination_hash}") # TODO: Remove + self.available_tickets["inbound"][destination_hash].pop(destination_hash) + + except Exception as e: + RNS.log(f"Error while cleaning outbound stamp costs. The contained exception was: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + def save_available_tickets(self): + with self.ticket_file_lock: + try: + RNS.log("Saving available tickets...", RNS.LOG_DEBUG) # TODO: Remove + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) + + available_tickets_file = open(self.storagepath+"/available_tickets", "wb") + available_tickets_file.write(msgpack.packb(self.available_tickets)) + available_tickets_file.close() + + except Exception as e: + RNS.log("Could not save available tickets to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + def exit_handler(self): if self.propagation_node: try: @@ -1026,14 +1157,29 @@ class LXMRouter: return False def handle_outbound(self, lxmessage): + destination_hash = lxmessage.get_destination().hash if lxmessage.stamp_cost == None: - destination_hash = lxmessage.get_destination().hash if destination_hash in self.outbound_stamp_costs: stamp_cost = self.outbound_stamp_costs[destination_hash][1] lxmessage.stamp_cost = stamp_cost RNS.log(f"No stamp cost set on LXM to {RNS.prettyhexrep(destination_hash)}, autoconfigured to {stamp_cost}, as required by latest announce", RNS.LOG_DEBUG) lxmessage.state = LXMessage.OUTBOUND + + # If an outbound ticket is available for this + # destination, attach it to the message. + lxmessage.outbound_ticket = self.get_outbound_ticket(destination_hash) + if lxmessage.outbound_ticket != None and lxmessage.defer_stamp: + RNS.log(f"Deferred stamp generation was requested for {lxmessage}, but outbound ticket was applied, processing immediately", RNS.LOG_DEBUG) + lxmessage.defer_stamp = False + + # If requested, include a ticket to allow the + # destination to reply without generating a stamp. + if lxmessage.include_ticket: + ticket = self.generate_ticket(lxmessage.destination_hash) + if ticket: + lxmessage.fields[FIELD_TICKET] = ticket + if not lxmessage.packed: lxmessage.pack() @@ -1073,9 +1219,23 @@ class LXMRouter: try: message = LXMessage.unpack_from_bytes(lxmf_data) + if message.signature_validated and FIELD_TICKET in message.fields: + ticket_entry = message.fields[FIELD_TICKET] + if type(ticket_entry) == list and len(ticket_entry) > 1: + expires = ticket_entry[0] + ticket = ticket_entry[1] + + if time.time() < expires: + if type(ticket) == bytes and len(ticket) == LXMessage.TICKET_LENGTH: + self.remember_ticket(message.source_hash, ticket_entry) + def save_job(): + self.save_available_tickets() + threading.Thread(target=save_job, daemon=True).start() + required_stamp_cost = self.delivery_destinations[message.destination_hash].stamp_cost if required_stamp_cost != None: - if message.validate_stamp(required_stamp_cost): + destination_tickets = self.get_inbound_tickets(message.source_hash) + if message.validate_stamp(required_stamp_cost, tickets=destination_tickets): message.stamp_valid = True else: message.stamp_valid = False @@ -1471,6 +1631,11 @@ class LXMRouter: if lxmessage.state == LXMessage.DELIVERED: RNS.log("Delivery has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) + if lxmessage.include_ticket and FIELD_TICKET in lxmessage.fields: + RNS.log(f"Updating latest ticket delivery for {RNS.prettyhexrep(lxmessage.destination_hash)}", RNS.LOG_DEBUG) + self.available_tickets["last_deliveries"][lxmessage.destination_hash] = time.time() + self.save_available_tickets() + elif lxmessage.method == LXMessage.PROPAGATED and lxmessage.state == LXMessage.SENT: RNS.log("Propagation has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 3b4956d..14ff299 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -35,6 +35,17 @@ class LXMessage: DESTINATION_LENGTH = RNS.Identity.TRUNCATED_HASHLENGTH//8 SIGNATURE_LENGTH = RNS.Identity.SIGLENGTH//8 + TICKET_LENGTH = RNS.Identity.TRUNCATED_HASHLENGTH//8 + + # Default ticket expiry is 3 weeks, with an + # additional grace period of 5 days, allowing + # for timekeeping inaccuracies. Tickets will + # automatically renew when there is less than + # 14 days to expiry. + TICKET_EXPIRY = 21*24*60*60 + TICKET_GRACE = 5*24*60*60 + TICKET_RENEW = 14*24*60*60 + TICKET_INTERVAL = 3*24*60*60 # LXMF overhead is 111 bytes per message: # 16 bytes for destination hash @@ -93,8 +104,7 @@ class LXMessage: else: return "" - def __init__(self, destination, source, content = "", title = "", fields = None, desired_method = None, - destination_hash = None, source_hash = None, stamp_cost=None): + def __init__(self, destination, source, content = "", title = "", fields = None, desired_method = None, destination_hash = None, source_hash = None, stamp_cost=None, include_ticket=False): if isinstance(destination, RNS.Destination) or destination == None: self.__destination = destination @@ -114,25 +124,31 @@ class LXMessage: else: raise ValueError("LXMessage initialised with invalid source") + if title == None: + title = "" + self.set_title_from_string(title) self.set_content_from_string(content) self.set_fields(fields) - self.payload = None - self.timestamp = None - self.signature = None - self.hash = None - self.packed = None - self.stamp = None - self.stamp_cost = stamp_cost - self.stamp_valid = False - self.defer_stamp = False - self.state = LXMessage.GENERATING - self.method = LXMessage.UNKNOWN - self.progress = 0.0 - self.rssi = None - self.snr = None - self.q = None + self.payload = None + self.timestamp = None + self.signature = None + self.hash = None + self.packed = None + self.state = LXMessage.GENERATING + self.method = LXMessage.UNKNOWN + self.progress = 0.0 + self.rssi = None + self.snr = None + self.q = None + + self.stamp = None + self.stamp_cost = stamp_cost + self.stamp_valid = False + self.defer_stamp = False + self.outbound_ticket = None + self.include_ticket = include_ticket self.propagation_packed = None self.paper_packed = None @@ -254,7 +270,13 @@ class LXMessage: else: return True - def validate_stamp(self, target_cost): + def validate_stamp(self, target_cost, tickets=None): + if tickets != None: + for ticket in tickets: + if self.stamp == RNS.Identity.truncated_hash(ticket+self.message_id): + RNS.log(f"Stamp on {self} validated by inbound ticket", RNS.LOG_DEBUG) # TODO: Remove at some point + return True + if self.stamp == None: return False else: @@ -264,14 +286,25 @@ class LXMessage: return False def get_stamp(self, timeout=None): - if self.stamp_cost == None: + # If an outbound ticket exists, use this for + # generating a valid stamp. + if self.outbound_ticket != None and type(self.outbound_ticket) == bytes and len(self.outbound_ticket) == LXMessage.TICKET_LENGTH: + RNS.log(f"Generating stamp with outbound ticket for {self}", RNS.LOG_DEBUG) # TODO: Remove at some point + return RNS.Identity.truncated_hash(self.outbound_ticket+self.message_id) + + # If no stamp cost is required, we can just + # return immediately. + elif self.stamp_cost == None: return None + # If a stamp was already generated, return + # it immediately. elif self.stamp != None: - # TODO: Check that message hash cannot actually - # change under any circumstances before handoff return self.stamp + # Otherwise, we will need to generate a + # valid stamp according to the cost that + # the receiver has specified. else: RNS.log(f"Generating stamp with cost {self.stamp_cost} for {self}...", RNS.LOG_DEBUG) workblock = LXMessage.stamp_workblock(self.message_id) @@ -279,6 +312,7 @@ class LXMessage: total_rounds = 0 if not RNS.vendor.platformutils.is_android(): + RNS.log("Preparing IPC semaphores", RNS.LOG_DEBUG) # TODO: Remove stop_event = multiprocessing.Event() result_queue = multiprocessing.Queue(maxsize=1) rounds_queue = multiprocessing.Queue() @@ -305,14 +339,17 @@ class LXMessage: job_procs = [] jobs = multiprocessing.cpu_count() + RNS.log("Starting workers", RNS.LOG_DEBUG) # TODO: Remove for _ in range(jobs): process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event},) job_procs.append(process) process.start() + RNS.log("Awaiting results on queue", RNS.LOG_DEBUG) # TODO: Remove stamp = result_queue.get() stop_event.set() + RNS.log("Joining worker processes", RNS.LOG_DEBUG) # TODO: Remove for j in range(jobs): process = job_procs[j] process.join() @@ -354,21 +391,21 @@ class LXMessage: wm = multiprocessing.Manager() jobs = multiprocessing.cpu_count() - RNS.log(f"Dispatching {jobs} workers for stamp generation...") # TODO: Remove + # RNS.log(f"Dispatching {jobs} workers for stamp generation...") # TODO: Remove results_dict = wm.dict() while stamp == None: job_procs = [] def job(procnum=None, results_dict=None, wb=None): - RNS.log(f"Worker {procnum} starting...") # TODO: Remove + # RNS.log(f"Worker {procnum} starting...") # TODO: Remove rounds = 0 stamp = os.urandom(256//8) while not sv(stamp, self.stamp_cost, wb): if rounds >= 500: stamp = None - RNS.log(f"Worker {procnum} found no result in {rounds} rounds") # TODO: Remove + # RNS.log(f"Worker {procnum} found no result in {rounds} rounds") # TODO: Remove break stamp = os.urandom(256//8) @@ -386,17 +423,24 @@ class LXMessage: for j in results_dict: r = results_dict[j] - RNS.log(f"Result from {r}: {r[1]} rounds, stamp: {r[0]}") # TODO: Remove + # RNS.log(f"Result from {r}: {r[1]} rounds, stamp: {r[0]}") # TODO: Remove total_rounds += r[1] if r[0] != None: stamp = r[0] - RNS.log(f"Found stamp: {stamp}") # TODO: Remove + # RNS.log(f"Found stamp: {stamp}") # TODO: Remove + + if stamp == None: + elapsed = time.time() - start_time + speed = total_rounds/elapsed + RNS.log(f"Stamp generation for {self} running. {total_rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) duration = time.time() - start_time rounds = total_rounds + + speed = total_rounds/duration # TODO: Remove stats output - RNS.log(f"Stamp generated in {RNS.prettytime(duration)} / {rounds} rounds", RNS.LOG_DEBUG) + RNS.log(f"Stamp generated in {RNS.prettytime(duration)}, {rounds} rounds, {int(speed)} rounds per second", RNS.LOG_DEBUG) # RNS.log(f"Rounds per second {int(rounds/duration)}", RNS.LOG_DEBUG) # RNS.log(f"Stamp: {RNS.hexrep(stamp)}", RNS.LOG_DEBUG) # RNS.log(f"Resulting hash: {RNS.hexrep(RNS.Identity.full_hash(workblock+stamp))}", RNS.LOG_DEBUG) diff --git a/docs/example_receiver.py b/docs/example_receiver.py index 64f914a..7d3aa20 100644 --- a/docs/example_receiver.py +++ b/docs/example_receiver.py @@ -6,6 +6,7 @@ required_stamp_cost = 8 enforce_stamps = False def delivery_callback(message): + global my_lxmf_destination, router time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(message.timestamp)) signature_string = "Signature is invalid, reason undetermined" if message.signature_validated: @@ -35,6 +36,12 @@ def delivery_callback(message): RNS.log("\t| Stamp : "+stamp_string) RNS.log("\t+---------------------------------------------------------------") + # Optionally, send a reply + # source = my_lxmf_destination + # dest = message.source + # lxm = LXMF.LXMessage(dest, source, "Reply", None, desired_method=LXMF.LXMessage.DIRECT, include_ticket=True) + # router.handle_outbound(lxm) + r = RNS.Reticulum() router = LXMF.LXMRouter(storagepath="./tmp1", enforce_stamps=enforce_stamps) diff --git a/docs/example_sender.py b/docs/example_sender.py index 3dc6494..bcb8d36 100644 --- a/docs/example_sender.py +++ b/docs/example_sender.py @@ -14,7 +14,7 @@ r = RNS.Reticulum() router = LXMF.LXMRouter(storagepath="./tmp2") router.register_delivery_callback(delivery_callback) ident = RNS.Identity() -source = router.register_delivery_identity(ident, display_name=random_names[random.randint(0,len(random_names)-1)]) +source = router.register_delivery_identity(ident, display_name=random_names[random.randint(0,len(random_names)-1)], stamp_cost=8) router.announce(source.hash) RNS.log("Source announced") @@ -42,14 +42,14 @@ while True: lxm = LXMF.LXMessage(dest, source, random_msgs[random.randint(0,len(random_msgs)-1)], random_titles[random.randint(0,len(random_titles)-1)], - desired_method=LXMF.LXMessage.DIRECT) + desired_method=LXMF.LXMessage.DIRECT, include_ticket=True) # Or, create an oppertunistic, single-packet message # for sending without first establishing a link: # lxm = LXMF.LXMessage(dest, source, "This is a test", # random_titles[random.randint(0,len(random_titles)-1)], - # desired_method=LXMF.LXMessage.OPPORTUNISTIC) + # desired_method=LXMF.LXMessage.OPPORTUNISTIC, include_ticket=True) # Or, try sending the message via a propagation node: From 40eb014c917c33927384c59ddd9b66533d839580 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 7 Sep 2024 22:40:13 +0200 Subject: [PATCH 062/161] Implemented deferred multiprocessor stamp generation in the background --- LXMF/LXMRouter.py | 101 ++++++++++++++++++++---- LXMF/LXMessage.py | 190 +++++++++++++++++++++++++++------------------- 2 files changed, 200 insertions(+), 91 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 2ff5d75..9a82a13 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -95,6 +95,7 @@ class LXMRouter: self.delivery_per_transfer_limit = delivery_limit self.enforce_ratchets = enforce_ratchets self._enforce_stamps = enforce_stamps + self.pending_deferred_stamps = {} self.wants_download_on_path_available_from = None self.wants_download_on_path_available_to = None @@ -110,6 +111,7 @@ class LXMRouter: self.cost_file_lock = threading.Lock() self.ticket_file_lock = threading.Lock() + self.stamp_gen_lock = threading.Lock() if identity == None: identity = RNS.Identity() @@ -237,8 +239,7 @@ class LXMRouter: if display_name != None: def get_app_data(): - return self.get_announce_app_data(delivery_destination) - + return self.get_announce_app_data(delivery_destination.hash) delivery_destination.set_default_app_data(get_app_data) self.delivery_destinations[delivery_destination.hash] = delivery_destination @@ -540,6 +541,7 @@ class LXMRouter: ####################################################### JOB_OUTBOUND_INTERVAL = 1 + JOB_STAMPS_INTERVAL = 1 JOB_LINKS_INTERVAL = 1 JOB_TRANSIENT_INTERVAL = 60 JOB_STORE_INTERVAL = 120 @@ -550,6 +552,9 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0: self.process_outbound() + if self.processing_count % LXMRouter.JOB_STAMPS_INTERVAL == 0: + threading.Thread(target=self.process_deferred_stamps, daemon=True).start() + if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0: self.clean_links() @@ -721,6 +726,14 @@ class LXMRouter: return None + def get_outbound_ticket_expiry(self, destination_hash): + if destination_hash in self.available_tickets["outbound"]: + entry = self.available_tickets["outbound"][destination_hash] + if entry[0] > time.time(): + return entry[0] + + return None + def get_inbound_tickets(self, destination_hash): now = time.time() available_tickets = [] @@ -916,6 +929,29 @@ class LXMRouter: except Exception as e: RNS.log("Could not save available tickets to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + def reload_available_tickets(self): + RNS.log("Reloading available tickets from storage", RNS.LOG_DEBUG) + try: + with self.ticket_file_lock: + with open(self.storagepath+"/available_tickets", "rb") as available_tickets_file: + data = available_tickets_file.read() + self.available_tickets = msgpack.unpackb(data) + if not type(self.available_tickets) == dict: + RNS.log("Invalid data format for loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets = {"outbound": {}, "inbound": {}, "last_deliveries": {}} + if not "outbound" in self.available_tickets: + RNS.log("Missing outbound entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["outbound"] = {} + if not "inbound" in self.available_tickets: + RNS.log("Missing inbound entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["inbound"] = {} + if not "last_deliveries" in self.available_tickets: + RNS.log("Missing local_deliveries entry in loaded available tickets, recreating...", RNS.LOG_ERROR) + self.available_tickets["last_deliveries"] = {} + + except Exception as e: + RNS.log(f"An error occurred while reloading available tickets from storage: {e}", RNS.LOG_ERROR) + def exit_handler(self): if self.propagation_node: try: @@ -1188,19 +1224,24 @@ class LXMRouter: while self.processing_outbound: time.sleep(0.1) - self.pending_outbound.append(lxmessage) - if lxmessage.defer_stamp and lxmessage.stamp_cost == None: RNS.log(f"Deferred stamp generation was requested for {lxmessage}, but no stamp is required, processing immediately", RNS.LOG_DEBUG) lxmessage.defer_stamp = False if not lxmessage.defer_stamp: + self.pending_outbound.append(lxmessage) self.process_outbound() + else: + self.pending_deferred_stamps[lxmessage.message_id] = lxmessage def get_outbound_progress(self, lxm_hash): for lxm in self.pending_outbound: if lxm.hash == lxm_hash: return lxm.progress + + for lxm_id in self.pending_deferred_stamps: + if self.pending_deferred_stamps[lxm_id].hash == lxm_hash: + return self.pending_deferred_stamps[lxm_id].progress return None @@ -1208,6 +1249,10 @@ class LXMRouter: for lxm in self.pending_outbound: if lxm.hash == lxm_hash: return lxm.stamp_cost + + for lxm_id in self.pending_deferred_stamps: + if self.pending_deferred_stamps[lxm_id].hash == lxm_hash: + return self.pending_deferred_stamps[lxm_id].stamp_cost return None @@ -1616,13 +1661,51 @@ class LXMRouter: def fail_message(self, lxmessage): RNS.log(str(lxmessage)+" failed to send", RNS.LOG_DEBUG) - self.pending_outbound.remove(lxmessage) + if lxmessage in self.pending_outbound: + self.pending_outbound.remove(lxmessage) + self.failed_outbound.append(lxmessage) lxmessage.state = LXMessage.FAILED if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): lxmessage.failed_callback(lxmessage) + def process_deferred_stamps(self): + if len(self.pending_deferred_stamps) > 0: + RNS.log(f"Processing deferred stamps...", RNS.LOG_DEBUG) # TODO: Remove + + if self.stamp_gen_lock.locked(): + RNS.log(f"A stamp is already generating, returning...", RNS.LOG_DEBUG) # TODO: Remove + return + + else: + with self.stamp_gen_lock: + selected_lxm = None + selected_message_id = None + for message_id in self.pending_deferred_stamps: + lxmessage = self.pending_deferred_stamps[message_id] + if selected_lxm == None: + selected_lxm = lxmessage + selected_message_id = message_id + + if selected_lxm != None: + RNS.log(f"Starting stamp generation for {selected_lxm}...", RNS.LOG_DEBUG) + generated_stamp = selected_lxm.get_stamp() + if generated_stamp: + selected_lxm.stamp = generated_stamp + selected_lxm.defer_stamp = False + selected_lxm.packed = None + selected_lxm.pack() + self.pending_deferred_stamps.pop(selected_message_id) + self.pending_outbound.append(selected_lxm) + RNS.log(f"Stamp generation completed for {selected_lxm}", RNS.LOG_DEBUG) + else: + RNS.log(f"Deferred stamp generation did not succeed. Failing {selected_lxm}.", RNS.LOG_ERROR) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + self.fail_message(selected_lxm) + + def process_outbound(self, sender = None): if self.processing_outbound: return @@ -1641,14 +1724,6 @@ class LXMRouter: self.pending_outbound.remove(lxmessage) else: RNS.log("Starting outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - - # Handle potentially deferred stamp generation - if lxmessage.defer_stamp and lxmessage.stamp == None: - RNS.log(f"Generating deferred stamp for {lxmessage} now", RNS.LOG_DEBUG) - lxmessage.stamp = lxmessage.get_stamp() - lxmessage.defer_stamp = False - lxmessage.packed = None - lxmessage.pack() if lxmessage.progress == None or lxmessage.progress < 0.01: lxmessage.progress = 0.01 diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 14ff299..86b637d 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -45,7 +45,7 @@ class LXMessage: TICKET_EXPIRY = 21*24*60*60 TICKET_GRACE = 5*24*60*60 TICKET_RENEW = 14*24*60*60 - TICKET_INTERVAL = 3*24*60*60 + TICKET_INTERVAL = 1*24*60*60 # LXMF overhead is 111 bytes per message: # 16 bytes for destination hash @@ -131,24 +131,24 @@ class LXMessage: self.set_content_from_string(content) self.set_fields(fields) - self.payload = None - self.timestamp = None - self.signature = None - self.hash = None - self.packed = None - self.state = LXMessage.GENERATING - self.method = LXMessage.UNKNOWN - self.progress = 0.0 - self.rssi = None - self.snr = None - self.q = None + self.payload = None + self.timestamp = None + self.signature = None + self.hash = None + self.packed = None + self.state = LXMessage.GENERATING + self.method = LXMessage.UNKNOWN + self.progress = 0.0 + self.rssi = None + self.snr = None + self.q = None - self.stamp = None - self.stamp_cost = stamp_cost - self.stamp_valid = False - self.defer_stamp = False - self.outbound_ticket = None - self.include_ticket = include_ticket + self.stamp = None + self.stamp_cost = stamp_cost + self.stamp_valid = False + self.defer_stamp = True + self.outbound_ticket = None + self.include_ticket = include_ticket self.propagation_packed = None self.paper_packed = None @@ -166,7 +166,9 @@ class LXMessage: self.resource_representation = None self.__delivery_destination = None self.__delivery_callback = None - self.failed_callback = None + self.failed_callback = None + + self.deferred_stamp_generating = False def set_title_from_string(self, title_string): self.title = title_string.encode("utf-8") @@ -312,50 +314,79 @@ class LXMessage: total_rounds = 0 if not RNS.vendor.platformutils.is_android(): - RNS.log("Preparing IPC semaphores", RNS.LOG_DEBUG) # TODO: Remove + mp_debug = True + + jobs = multiprocessing.cpu_count() stop_event = multiprocessing.Event() - result_queue = multiprocessing.Queue(maxsize=1) + result_queue = multiprocessing.Queue(1) rounds_queue = multiprocessing.Queue() - def job(stop_event): + + def job(stop_event, pn, sc, wb): terminated = False rounds = 0 + pstamp = os.urandom(256//8) - stamp = os.urandom(256//8) - while not LXMessage.stamp_valid(stamp, self.stamp_cost, workblock): - if stop_event.is_set(): - break + def sv(s, c, w): + target = 0b1<<256-c; m = w+s + result = RNS.Identity.full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True - if timeout != None and rounds % 10000 == 0: - if time.time() > start_time + timeout: - RNS.log(f"Stamp generation for {self} timed out", RNS.LOG_ERROR) - return None + while not stop_event.is_set() and not sv(pstamp, sc, wb): + pstamp = os.urandom(256//8); rounds += 1 - stamp = os.urandom(256//8) - rounds += 1 - - rounds_queue.put(rounds) if not stop_event.is_set(): - result_queue.put(stamp) - + stop_event.set() + result_queue.put(pstamp) + rounds_queue.put(rounds) + job_procs = [] - jobs = multiprocessing.cpu_count() - RNS.log("Starting workers", RNS.LOG_DEBUG) # TODO: Remove - for _ in range(jobs): - process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event},) + RNS.log(f"Starting {jobs} workers", RNS.LOG_DEBUG) # TODO: Remove + for jpn in range(jobs): + process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event, "pn": jpn, "sc": self.stamp_cost, "wb": workblock},) job_procs.append(process) process.start() - RNS.log("Awaiting results on queue", RNS.LOG_DEBUG) # TODO: Remove stamp = result_queue.get() - stop_event.set() - - RNS.log("Joining worker processes", RNS.LOG_DEBUG) # TODO: Remove - for j in range(jobs): - process = job_procs[j] - process.join() - total_rounds += rounds_queue.get() - + RNS.log("Got stamp result from worker", RNS.LOG_DEBUG) # TODO: Remove duration = time.time() - start_time + + spurious_results = 0 + try: + while True: + result_queue.get_nowait() + spurious_results += 1 + except: + pass + + for j in range(jobs): + nrounds = 0 + try: + nrounds = rounds_queue.get(timeout=2) + except Exception as e: + RNS.log(f"Failed to get round stats part {j}: {e}", RNS.LOG_ERROR) # TODO: Remove + total_rounds += nrounds + + all_exited = False + exit_timeout = time.time() + 5 + while time.time() < exit_timeout: + if not any(p.is_alive() for p in job_procs): + all_exited = True + break + time.sleep(0.1) + + if not all_exited: + RNS.log("Stamp generation IPC timeout, possible worker deadlock", RNS.LOG_ERROR) + return None + + else: + for j in range(jobs): + process = job_procs[j] + process.join() + # RNS.log(f"Joined {j} / {process}", RNS.LOG_DEBUG) # TODO: Remove + rounds = total_rounds else: @@ -365,17 +396,21 @@ class LXMessage: # checking in on the progress. use_nacl = False - try: - import nacl.encoding - import nacl.hash - use_nacl = True - except: - pass + rounds_per_worker = 1000 + if RNS.vendor.platformutils.is_android(): + rounds_per_worker = 500 + try: + import nacl.encoding + import nacl.hash + use_nacl = True + except: + pass - def full_hash(m): - if use_nacl: + if use_nacl: + def full_hash(m): return nacl.hash.sha256(m, encoder=nacl.encoding.RawEncoder) - else: + else: + def full_hash(m): return RNS.Identity.full_hash(m) def sv(s, c, w): @@ -391,30 +426,35 @@ class LXMessage: wm = multiprocessing.Manager() jobs = multiprocessing.cpu_count() - # RNS.log(f"Dispatching {jobs} workers for stamp generation...") # TODO: Remove + RNS.log(f"Dispatching {jobs} workers for stamp generation...", RNS.LOG_DEBUG) # TODO: Remove results_dict = wm.dict() while stamp == None: job_procs = [] - def job(procnum=None, results_dict=None, wb=None): - # RNS.log(f"Worker {procnum} starting...") # TODO: Remove + def job(procnum=None, results_dict=None, wb=None, sc=None, jr=None): + RNS.log(f"Worker {procnum} starting for {jr} rounds...") # TODO: Remove rounds = 0 + found_stamp = None + found_time = None - stamp = os.urandom(256//8) - while not sv(stamp, self.stamp_cost, wb): - if rounds >= 500: - stamp = None + while True: + pstamp = os.urandom(256//8) + rounds += 1 + if sv(pstamp, sc, wb): + found_stamp = pstamp + found_time = time.time() + break + + if rounds >= jr: # RNS.log(f"Worker {procnum} found no result in {rounds} rounds") # TODO: Remove break - stamp = os.urandom(256//8) - rounds += 1 - - results_dict[procnum] = [stamp, rounds] + results_dict[procnum] = [found_stamp, rounds, found_time] for pnum in range(jobs): - process = multiprocessing.Process(target=job, kwargs={"procnum":pnum, "results_dict": results_dict, "wb": workblock},) + pargs = {"procnum":pnum, "results_dict": results_dict, "wb": workblock, "sc":self.stamp_cost, "jr":rounds_per_worker} + process = multiprocessing.Process(target=job, kwargs=pargs) job_procs.append(process) process.start() @@ -423,14 +463,13 @@ class LXMessage: for j in results_dict: r = results_dict[j] - # RNS.log(f"Result from {r}: {r[1]} rounds, stamp: {r[0]}") # TODO: Remove total_rounds += r[1] if r[0] != None: stamp = r[0] - # RNS.log(f"Found stamp: {stamp}") # TODO: Remove + found_time = r[2] if stamp == None: - elapsed = time.time() - start_time + elapsed = found_time - start_time speed = total_rounds/elapsed RNS.log(f"Stamp generation for {self} running. {total_rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) @@ -439,12 +478,7 @@ class LXMessage: speed = total_rounds/duration - # TODO: Remove stats output RNS.log(f"Stamp generated in {RNS.prettytime(duration)}, {rounds} rounds, {int(speed)} rounds per second", RNS.LOG_DEBUG) - # RNS.log(f"Rounds per second {int(rounds/duration)}", RNS.LOG_DEBUG) - # RNS.log(f"Stamp: {RNS.hexrep(stamp)}", RNS.LOG_DEBUG) - # RNS.log(f"Resulting hash: {RNS.hexrep(RNS.Identity.full_hash(workblock+stamp))}", RNS.LOG_DEBUG) - ########################### return stamp From 6d83b019e1a8d17fb164d52945df5f6344c2b1a8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 8 Sep 2024 01:22:00 +0200 Subject: [PATCH 063/161] Added stamp value property on inbound --- LXMF/Handlers.py | 4 ++-- LXMF/LXMRouter.py | 2 ++ LXMF/LXMessage.py | 34 +++++++++++++++++++++++++++++----- 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index a33d13d..eb10a76 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -25,8 +25,8 @@ class LXMFDeliveryAnnounceHandler: try: stamp_cost = stamp_cost_from_app_data(app_data) - if stamp_cost != None: - self.lxmrouter.update_stamp_cost(destination_hash, stamp_cost) + self.lxmrouter.update_stamp_cost(destination_hash, stamp_cost) + except Exception as e: RNS.log(f"An error occurred while trying to decode announced stamp cost. The contained exception was: {e}", RNS.LOG_ERROR) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 9a82a13..49793cb 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1282,8 +1282,10 @@ class LXMRouter: destination_tickets = self.get_inbound_tickets(message.source_hash) if message.validate_stamp(required_stamp_cost, tickets=destination_tickets): message.stamp_valid = True + message.stamp_checked = True else: message.stamp_valid = False + message.stamp_checked = True if not message.stamp_valid: if self._enforce_stamps: diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 86b637d..adbac64 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -46,6 +46,7 @@ class LXMessage: TICKET_GRACE = 5*24*60*60 TICKET_RENEW = 14*24*60*60 TICKET_INTERVAL = 1*24*60*60 + COST_TICKET = 0x100 # LXMF overhead is 111 bytes per message: # 16 bytes for destination hash @@ -145,7 +146,9 @@ class LXMessage: self.stamp = None self.stamp_cost = stamp_cost + self.stamp_value = None self.stamp_valid = False + self.stamp_checked = False self.defer_stamp = True self.outbound_ticket = None self.include_ticket = include_ticket @@ -162,6 +165,7 @@ class LXMessage: self.delivery_attempts = 0 self.transport_encrypted = False self.transport_encryption = None + self.ratchet_id = None self.packet_representation = None self.resource_representation = None self.__delivery_destination = None @@ -272,17 +276,31 @@ class LXMessage: else: return True + @staticmethod + def stamp_value(material): + bits = 256 + value = 0 + i = int.from_bytes(material) + while ((i & (1 << (bits - 1))) == 0): + i = (i << 1) + value += 1 + + return value + def validate_stamp(self, target_cost, tickets=None): if tickets != None: for ticket in tickets: if self.stamp == RNS.Identity.truncated_hash(ticket+self.message_id): RNS.log(f"Stamp on {self} validated by inbound ticket", RNS.LOG_DEBUG) # TODO: Remove at some point + self.stamp_value = LXMessage.COST_TICKET return True if self.stamp == None: return False else: - if LXMessage.stamp_valid(self.stamp, target_cost, LXMessage.stamp_workblock(self.message_id)): + workblock = LXMessage.stamp_workblock(self.message_id) + if LXMessage.stamp_valid(self.stamp, target_cost, workblock): + self.stamp_value = LXMessage.stamp_value(RNS.Identity.full_hash(workblock+self.stamp)) return True else: return False @@ -314,8 +332,8 @@ class LXMessage: total_rounds = 0 if not RNS.vendor.platformutils.is_android(): - mp_debug = True - + allow_kill = True + stamp = None jobs = multiprocessing.cpu_count() stop_event = multiprocessing.Event() result_queue = multiprocessing.Queue(1) @@ -345,7 +363,7 @@ class LXMessage: job_procs = [] RNS.log(f"Starting {jobs} workers", RNS.LOG_DEBUG) # TODO: Remove for jpn in range(jobs): - process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event, "pn": jpn, "sc": self.stamp_cost, "wb": workblock},) + process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event, "pn": jpn, "sc": self.stamp_cost, "wb": workblock}, daemon=True) job_procs.append(process) process.start() @@ -379,7 +397,13 @@ class LXMessage: if not all_exited: RNS.log("Stamp generation IPC timeout, possible worker deadlock", RNS.LOG_ERROR) - return None + if allow_kill: + for j in range(jobs): + process = job_procs[j] + process.kill() + return stamp + else: + return None else: for j in range(jobs): From a20f380284a728fa704fd16bc4504200eb386ece Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 8 Sep 2024 14:52:54 +0200 Subject: [PATCH 064/161] Get message ratchet IDs --- LXMF/LXMRouter.py | 21 +++++++++++++++++---- LXMF/LXMessage.py | 26 +++++++++++++++++++++----- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 49793cb..659c47b 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1260,9 +1260,14 @@ class LXMRouter: ### Message Routing & Delivery ######################## ####################################################### - def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None): + def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None): try: message = LXMessage.unpack_from_bytes(lxmf_data) + if ratchet_id and not message.ratchet_id: + message.ratchet_id = ratchet_id + + if method: + message.method = method if message.signature_validated and FIELD_TICKET in message.fields: ticket_entry = message.fields[FIELD_TICKET] @@ -1299,6 +1304,7 @@ class LXMRouter: if "snr" in phy_stats: message.snr = phy_stats["snr"] if "q" in phy_stats: message.q = phy_stats["q"] + # TODO: Update these descriptions to account for ratchets if destination_type == RNS.Destination.SINGLE: message.transport_encrypted = True message.transport_encryption = LXMessage.ENCRYPTION_DESCRIPTION_EC @@ -1339,11 +1345,14 @@ class LXMRouter: def delivery_packet(self, data, packet): packet.prove() try: + method = None if packet.destination_type != RNS.Destination.LINK: + method = LXMessage.OPPORTUNISTIC lxmf_data = b"" lxmf_data += packet.destination.hash lxmf_data += data else: + method = LXMessage.DIRECT lxmf_data = data try: @@ -1356,7 +1365,7 @@ class LXMRouter: phy_stats = {"rssi": packet.rssi, "snr": packet.snr, "q": packet.q} - self.lxmf_delivery(lxmf_data, packet.destination_type, phy_stats=phy_stats) + self.lxmf_delivery(lxmf_data, packet.destination_type, phy_stats=phy_stats, ratchet_id=packet.ratchet_id, method=method) except Exception as e: RNS.log("Exception occurred while parsing incoming LXMF data.", RNS.LOG_ERROR) @@ -1388,8 +1397,12 @@ class LXMRouter: def delivery_resource_concluded(self, resource): RNS.log("Transfer concluded for LXMF delivery resource "+str(resource), RNS.LOG_DEBUG) if resource.status == RNS.Resource.COMPLETE: + ratchet_id = None + # Set ratchet ID to link ID if available + if resource.link and hasattr(resource.link, "link_id"): + ratchet_id = resource.link.link_id phy_stats = {"rssi": resource.link.rssi, "snr": resource.link.snr, "q": resource.link.q} - self.lxmf_delivery(resource.data.read(), resource.link.type, phy_stats=phy_stats) + self.lxmf_delivery(resource.data.read(), resource.link.type, phy_stats=phy_stats, ratchet_id=ratchet_id, method=LXMessage.DIRECT) ### Peer Sync & Propagation ########################### @@ -1598,7 +1611,7 @@ class LXMRouter: decrypted_lxmf_data = delivery_destination.decrypt(encrypted_lxmf_data) if decrypted_lxmf_data != None: delivery_data = lxmf_data[:LXMessage.DESTINATION_LENGTH]+decrypted_lxmf_data - self.lxmf_delivery(delivery_data, delivery_destination.type) + self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED) self.locally_delivered_transient_ids[transient_id] = time.time() if signal_local_delivery != None: diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index adbac64..a41560f 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -159,6 +159,7 @@ class LXMessage: self.incoming = False self.signature_validated = False self.unverified_reason = None + self.ratchet_id = None self.representation = LXMessage.UNKNOWN self.desired_method = desired_method @@ -310,11 +311,13 @@ class LXMessage: # generating a valid stamp. if self.outbound_ticket != None and type(self.outbound_ticket) == bytes and len(self.outbound_ticket) == LXMessage.TICKET_LENGTH: RNS.log(f"Generating stamp with outbound ticket for {self}", RNS.LOG_DEBUG) # TODO: Remove at some point + self.stamp_value = LXMessage.COST_TICKET return RNS.Identity.truncated_hash(self.outbound_ticket+self.message_id) # If no stamp cost is required, we can just # return immediately. elif self.stamp_cost == None: + self.stamp_value = None return None # If a stamp was already generated, return @@ -504,6 +507,9 @@ class LXMessage: RNS.log(f"Stamp generated in {RNS.prettytime(duration)}, {rounds} rounds, {int(speed)} rounds per second", RNS.LOG_DEBUG) + self.stamp_value = LXMessage.stamp_value(RNS.Identity.full_hash(workblock+stamp)) + self.stamp_valid = True + return stamp def pack(self): @@ -575,6 +581,7 @@ class LXMessage: single_packet_content_limit = LXMessage.LINK_PACKET_MAX_CONTENT encrypted_data = self.__destination.encrypt(self.packed[LXMessage.DESTINATION_LENGTH:]) + self.ratchet_id = self.__destination.latest_ratchet_id self.propagation_packed = msgpack.packb([time.time(), [self.packed[:LXMessage.DESTINATION_LENGTH]+encrypted_data]]) content_size = len(self.propagation_packed) @@ -589,6 +596,7 @@ class LXMessage: paper_content_limit = LXMessage.PAPER_MDU encrypted_data = self.__destination.encrypt(self.packed[LXMessage.DESTINATION_LENGTH:]) + self.ratchet_id = self.__destination.latest_ratchet_id self.paper_packed = self.packed[:LXMessage.DESTINATION_LENGTH]+encrypted_data content_size = len(self.paper_packed) @@ -605,14 +613,18 @@ class LXMessage: self.determine_transport_encryption() if self.method == LXMessage.OPPORTUNISTIC: - self.__as_packet().send().set_delivery_callback(self.__mark_delivered) + lxm_packet = self.__as_packet() + lxm_packet.send().set_delivery_callback(self.__mark_delivered) + self.ratchet_id = lxm_packet.ratchet_id self.state = LXMessage.SENT elif self.method == LXMessage.DIRECT: self.state = LXMessage.SENDING if self.representation == LXMessage.PACKET: - receipt = self.__as_packet().send() + lxm_packet = self.__as_packet() + receipt = lxm_packet.send() + self.ratchet_id = self.__delivery_destination.link_id if receipt: receipt.set_delivery_callback(self.__mark_delivered) receipt.set_timeout_callback(self.__link_packet_timed_out) @@ -623,6 +635,7 @@ class LXMessage: elif self.representation == LXMessage.RESOURCE: self.resource_representation = self.__as_resource() + self.ratchet_id = self.__delivery_destination.link_id self.progress = 0.10 elif self.method == LXMessage.PROPAGATED: @@ -692,7 +705,8 @@ class LXMessage: try: self.__delivery_callback(self) except Exception as e: - RNS.log("An error occurred in the external delivery callback for "+str(message), RNS.LOG_ERROR) + RNS.log("An error occurred in the external delivery callback for "+str(self), RNS.LOG_ERROR) + RNS.trace_exception(e) def __mark_propagated(self, receipt = None): RNS.log("Received propagation success notification for "+str(self), RNS.LOG_DEBUG) @@ -703,7 +717,8 @@ class LXMessage: try: self.__delivery_callback(self) except Exception as e: - RNS.log("An error occurred in the external delivery callback for "+str(message), RNS.LOG_ERROR) + RNS.log("An error occurred in the external delivery callback for "+str(self), RNS.LOG_ERROR) + RNS.trace_exception(e) def __mark_paper_generated(self, receipt = None): RNS.log("Paper message generation succeeded for "+str(self), RNS.LOG_DEBUG) @@ -714,7 +729,8 @@ class LXMessage: try: self.__delivery_callback(self) except Exception as e: - RNS.log("An error occurred in the external delivery callback for "+str(message), RNS.LOG_ERROR) + RNS.log("An error occurred in the external delivery callback for "+str(self), RNS.LOG_ERROR) + RNS.trace_exception(e) def __resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: From ae0952455b881d7b9a58498f863665fd3893be8a Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 8 Sep 2024 17:54:11 +0200 Subject: [PATCH 065/161] Ignore stamp requirements for paper messages --- LXMF/LXMRouter.py | 34 ++++++++++++++++++++-------------- LXMF/LXMessage.py | 1 + 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 659c47b..260911c 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -853,7 +853,7 @@ class LXMRouter: locally_processed_file.close() except Exception as e: - RNS.log("Could not save locally processed message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + RNS.log("Could not save locally processed transient ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) def clean_outbound_stamp_costs(self): try: @@ -864,7 +864,6 @@ class LXMRouter: expired.append(destination_hash) for destination_hash in expired: - RNS.log(f"Cleaning expired stamp cost for {destination_hash}") # TODO: Remove self.outbound_stamp_costs.pop(destination_hash) except Exception as e: @@ -874,7 +873,6 @@ class LXMRouter: def save_outbound_stamp_costs(self): with self.cost_file_lock: try: - RNS.log("Saving outbound stamp costs...", RNS.LOG_DEBUG) # TODO: Remove if not os.path.isdir(self.storagepath): os.makedirs(self.storagepath) @@ -883,7 +881,7 @@ class LXMRouter: outbound_stamp_costs_file.close() except Exception as e: - RNS.log("Could not save locally processed message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + RNS.log("Could not save outbound stamp costs to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) def clean_available_tickets(self): try: @@ -912,7 +910,7 @@ class LXMRouter: self.available_tickets["inbound"][destination_hash].pop(destination_hash) except Exception as e: - RNS.log(f"Error while cleaning outbound stamp costs. The contained exception was: {e}", RNS.LOG_ERROR) + RNS.log(f"Error while cleaning available tickets. The contained exception was: {e}", RNS.LOG_ERROR) RNS.trace_exception(e) def save_available_tickets(self): @@ -1260,7 +1258,7 @@ class LXMRouter: ### Message Routing & Delivery ######################## ####################################################### - def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None): + def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None, no_stamp_enforcement=False): try: message = LXMessage.unpack_from_bytes(lxmf_data) if ratchet_id and not message.ratchet_id: @@ -1293,11 +1291,16 @@ class LXMRouter: message.stamp_checked = True if not message.stamp_valid: - if self._enforce_stamps: - RNS.log(f"Dropping {message} with invalid stamp", RNS.LOG_NOTICE) - return False + if no_stamp_enforcement: + RNS.log(f"Received {message} with invalid stamp, but allowing anyway, since stamp enforcement was temporarily disabled", RNS.LOG_NOTICE) else: - RNS.log(f"Received {message} with invalid stamp, but allowing anyway, since stamp enforcement is disabled", RNS.LOG_NOTICE) + if self._enforce_stamps: + RNS.log(f"Dropping {message} with invalid stamp", RNS.LOG_NOTICE) + return False + else: + RNS.log(f"Received {message} with invalid stamp, but allowing anyway, since stamp enforcement is disabled", RNS.LOG_NOTICE) + else: + RNS.log(f"Received {message} valid stamp", RNS.LOG_DEBUG) if phy_stats != None: if "rssi" in phy_stats: message.rssi = phy_stats["rssi"] @@ -1593,7 +1596,11 @@ class LXMRouter: except Exception as e: RNS.log("Error while unpacking received propagation resource", RNS.LOG_DEBUG) - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None): + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False): + no_stamp_enforcement = False + if is_paper_message: + no_stamp_enforcement = True + try: if len(lxmf_data) >= LXMessage.LXMF_OVERHEAD: transient_id = RNS.Identity.full_hash(lxmf_data) @@ -1611,7 +1618,7 @@ class LXMRouter: decrypted_lxmf_data = delivery_destination.decrypt(encrypted_lxmf_data) if decrypted_lxmf_data != None: delivery_data = lxmf_data[:LXMessage.DESTINATION_LENGTH]+decrypted_lxmf_data - self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED) + self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED, no_stamp_enforcement=no_stamp_enforcement) self.locally_delivered_transient_ids[transient_id] = time.time() if signal_local_delivery != None: @@ -1661,7 +1668,7 @@ class LXMRouter: lxmf_data = base64.urlsafe_b64decode(uri.replace(LXMessage.URI_SCHEMA+"://", "").replace("/", "")+"==") transient_id = RNS.Identity.full_hash(lxmf_data) - router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate) + router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate, is_paper_message=True) if router_propagation_result != False: RNS.log("LXM with transient ID "+RNS.prettyhexrep(transient_id)+" was ingested.", RNS.LOG_DEBUG) return router_propagation_result @@ -1687,7 +1694,6 @@ class LXMRouter: def process_deferred_stamps(self): if len(self.pending_deferred_stamps) > 0: - RNS.log(f"Processing deferred stamps...", RNS.LOG_DEBUG) # TODO: Remove if self.stamp_gen_lock.locked(): RNS.log(f"A stamp is already generating, returning...", RNS.LOG_DEBUG) # TODO: Remove diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index a41560f..3564260 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -301,6 +301,7 @@ class LXMessage: else: workblock = LXMessage.stamp_workblock(self.message_id) if LXMessage.stamp_valid(self.stamp, target_cost, workblock): + RNS.log(f"Stamp on {self} validated", RNS.LOG_DEBUG) # TODO: Remove at some point self.stamp_value = LXMessage.stamp_value(RNS.Identity.full_hash(workblock+self.stamp)) return True else: From 813467243603517c2938763eaad937a05a07d8bc Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 9 Sep 2024 02:12:27 +0200 Subject: [PATCH 066/161] Cleanup --- LXMF/LXMRouter.py | 1 - LXMF/LXMessage.py | 205 ++++++++++++++++++++++----------------- docs/example_receiver.py | 2 + 3 files changed, 118 insertions(+), 90 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 260911c..199ff63 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1696,7 +1696,6 @@ class LXMRouter: if len(self.pending_deferred_stamps) > 0: if self.stamp_gen_lock.locked(): - RNS.log(f"A stamp is already generating, returning...", RNS.LOG_DEBUG) # TODO: Remove return else: diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 3564260..b70c66f 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -335,7 +335,121 @@ class LXMessage: start_time = time.time() total_rounds = 0 - if not RNS.vendor.platformutils.is_android(): + if RNS.vendor.platformutils.is_windows() or RNS.vendor.platformutils.is_darwin(): + platform = RNS.vendor.platformutils.get_platform() + RNS.log(f"Running stamp generation on {platform}, work limited to single CPU core. This will be slow.", RNS.LOG_WARNING) + rounds = 0 + pstamp = os.urandom(256//8) + + def sv(s, c, w): + target = 0b1<<256-c; m = w+s + result = RNS.Identity.full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + while not sv(pstamp, self.stamp_cost, workblock): + pstamp = os.urandom(256//8); rounds += 1 + + stamp = pstamp + duration = time.time() - start_time + + elif RNS.vendor.platformutils.is_android(): + # Semaphore support is flaky to non-existent on + # Android, so we need to manually dispatch and + # manage workloads here, while periodically + # checking in on the progress. + + use_nacl = False + rounds_per_worker = 1000 + try: + import nacl.encoding + import nacl.hash + use_nacl = True + except: + pass + + if use_nacl: + def full_hash(m): + return nacl.hash.sha256(m, encoder=nacl.encoding.RawEncoder) + else: + def full_hash(m): + return RNS.Identity.full_hash(m) + + def sv(s, c, w): + target = 0b1<<256-c + m = w+s + result = full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + stamp = None + wm = multiprocessing.Manager() + jobs = multiprocessing.cpu_count() + + RNS.log(f"Dispatching {jobs} workers for stamp generation...", RNS.LOG_DEBUG) # TODO: Remove + + results_dict = wm.dict() + while stamp == None: + job_procs = [] + + def job(procnum=None, results_dict=None, wb=None, sc=None, jr=None): + # RNS.log(f"Worker {procnum} starting for {jr} rounds...") # TODO: Remove + try: + rounds = 0 + found_stamp = None + found_time = None + + while True: + pstamp = os.urandom(256//8) + rounds += 1 + if sv(pstamp, sc, wb): + found_stamp = pstamp + found_time = time.time() + break + + if rounds >= jr: + # RNS.log(f"Worker {procnum} found no result in {rounds} rounds") # TODO: Remove + break + + results_dict[procnum] = [found_stamp, rounds, found_time] + except Exception as e: + RNS.log("WORKER ERROR") + RNS.trace_exception(e) + + try: + for pnum in range(jobs): + pargs = {"procnum":pnum, "results_dict": results_dict, "wb": workblock, "sc":self.stamp_cost, "jr":rounds_per_worker} + process = multiprocessing.Process(target=job, kwargs=pargs) + job_procs.append(process) + process.start() + + for process in job_procs: + process.join() + + for j in results_dict: + r = results_dict[j] + total_rounds += r[1] + if r[0] != None: + stamp = r[0] + found_time = r[2] + + if stamp == None: + elapsed = time.time() - start_time + speed = total_rounds/elapsed + RNS.log(f"Stamp generation for {self} running. {total_rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) + + except Exception as e: + RNS.log("ERROR") + RNS.trace_exception(e) + + duration = time.time() - start_time + rounds = total_rounds + + else: allow_kill = True stamp = None jobs = multiprocessing.cpu_count() @@ -417,94 +531,7 @@ class LXMessage: rounds = total_rounds - else: - # Semaphore support is flaky to non-existent on - # Android, so we need to manually dispatch and - # manage workloads here, while periodically - # checking in on the progress. - - use_nacl = False - rounds_per_worker = 1000 - if RNS.vendor.platformutils.is_android(): - rounds_per_worker = 500 - try: - import nacl.encoding - import nacl.hash - use_nacl = True - except: - pass - - if use_nacl: - def full_hash(m): - return nacl.hash.sha256(m, encoder=nacl.encoding.RawEncoder) - else: - def full_hash(m): - return RNS.Identity.full_hash(m) - - def sv(s, c, w): - target = 0b1<<256-c - m = w+s - result = full_hash(m) - if int.from_bytes(result, byteorder="big") > target: - return False - else: - return True - - stamp = None - wm = multiprocessing.Manager() - jobs = multiprocessing.cpu_count() - - RNS.log(f"Dispatching {jobs} workers for stamp generation...", RNS.LOG_DEBUG) # TODO: Remove - - results_dict = wm.dict() - while stamp == None: - job_procs = [] - - def job(procnum=None, results_dict=None, wb=None, sc=None, jr=None): - RNS.log(f"Worker {procnum} starting for {jr} rounds...") # TODO: Remove - rounds = 0 - found_stamp = None - found_time = None - - while True: - pstamp = os.urandom(256//8) - rounds += 1 - if sv(pstamp, sc, wb): - found_stamp = pstamp - found_time = time.time() - break - - if rounds >= jr: - # RNS.log(f"Worker {procnum} found no result in {rounds} rounds") # TODO: Remove - break - - results_dict[procnum] = [found_stamp, rounds, found_time] - - for pnum in range(jobs): - pargs = {"procnum":pnum, "results_dict": results_dict, "wb": workblock, "sc":self.stamp_cost, "jr":rounds_per_worker} - process = multiprocessing.Process(target=job, kwargs=pargs) - job_procs.append(process) - process.start() - - for process in job_procs: - process.join() - - for j in results_dict: - r = results_dict[j] - total_rounds += r[1] - if r[0] != None: - stamp = r[0] - found_time = r[2] - - if stamp == None: - elapsed = found_time - start_time - speed = total_rounds/elapsed - RNS.log(f"Stamp generation for {self} running. {total_rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) - - duration = time.time() - start_time - rounds = total_rounds - - speed = total_rounds/duration + speed = rounds/duration RNS.log(f"Stamp generated in {RNS.prettytime(duration)}, {rounds} rounds, {int(speed)} rounds per second", RNS.LOG_DEBUG) diff --git a/docs/example_receiver.py b/docs/example_receiver.py index 7d3aa20..999f6a3 100644 --- a/docs/example_receiver.py +++ b/docs/example_receiver.py @@ -32,6 +32,8 @@ def delivery_callback(message): RNS.log("\t| Title : "+message.title_as_string()) RNS.log("\t| Content : "+message.content_as_string()) RNS.log("\t| Fields : "+str(message.fields)) + if message.ratchet_id: + RNS.log("\t| Ratchet : "+str(RNS.Identity._get_ratchet_id(message.ratchet_id))) RNS.log("\t| Message signature : "+signature_string) RNS.log("\t| Stamp : "+stamp_string) RNS.log("\t+---------------------------------------------------------------") From 058186dfdc87b24dd84f2f96bd837b374880305d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 9 Sep 2024 13:15:37 +0200 Subject: [PATCH 067/161] Cleanup --- LXMF/LXMessage.py | 250 ++----------------------------------------- LXMF/LXStamper.py | 263 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 274 insertions(+), 239 deletions(-) create mode 100644 LXMF/LXStamper.py diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index b70c66f..5a8a26f 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -4,11 +4,12 @@ import RNS.vendor.umsgpack as msgpack import os import time import base64 -import signal import multiprocessing +import LXMF.LXStamper as LXStamper from .LXMF import APP_NAME + class LXMessage: GENERATING = 0x00 OUTBOUND = 0x01 @@ -251,23 +252,6 @@ class LXMessage: def register_failed_callback(self, callback): self.failed_callback = callback - @staticmethod - def stamp_workblock(message_id): - wb_st = time.time() - expand_rounds = 3000 - workblock = b"" - for n in range(expand_rounds): - workblock += RNS.Cryptography.hkdf( - length=256, - derive_from=message_id, - salt=RNS.Identity.full_hash(message_id+msgpack.packb(n)), - context=None, - ) - wb_time = time.time() - wb_st - RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) - - return workblock - @staticmethod def stamp_valid(stamp, target_cost, workblock): target = 0b1 << 256-target_cost @@ -277,17 +261,6 @@ class LXMessage: else: return True - @staticmethod - def stamp_value(material): - bits = 256 - value = 0 - i = int.from_bytes(material) - while ((i & (1 << (bits - 1))) == 0): - i = (i << 1) - value += 1 - - return value - def validate_stamp(self, target_cost, tickets=None): if tickets != None: for ticket in tickets: @@ -299,10 +272,10 @@ class LXMessage: if self.stamp == None: return False else: - workblock = LXMessage.stamp_workblock(self.message_id) + workblock = LXStamper.stamp_workblock(self.message_id) if LXMessage.stamp_valid(self.stamp, target_cost, workblock): RNS.log(f"Stamp on {self} validated", RNS.LOG_DEBUG) # TODO: Remove at some point - self.stamp_value = LXMessage.stamp_value(RNS.Identity.full_hash(workblock+self.stamp)) + self.stamp_value = LXStamper.stamp_value(workblock, self.stamp) return True else: return False @@ -330,215 +303,14 @@ class LXMessage: # valid stamp according to the cost that # the receiver has specified. else: - RNS.log(f"Generating stamp with cost {self.stamp_cost} for {self}...", RNS.LOG_DEBUG) - workblock = LXMessage.stamp_workblock(self.message_id) - start_time = time.time() - total_rounds = 0 - - if RNS.vendor.platformutils.is_windows() or RNS.vendor.platformutils.is_darwin(): - platform = RNS.vendor.platformutils.get_platform() - RNS.log(f"Running stamp generation on {platform}, work limited to single CPU core. This will be slow.", RNS.LOG_WARNING) - rounds = 0 - pstamp = os.urandom(256//8) - - def sv(s, c, w): - target = 0b1<<256-c; m = w+s - result = RNS.Identity.full_hash(m) - if int.from_bytes(result, byteorder="big") > target: - return False - else: - return True - - while not sv(pstamp, self.stamp_cost, workblock): - pstamp = os.urandom(256//8); rounds += 1 - - stamp = pstamp - duration = time.time() - start_time - - elif RNS.vendor.platformutils.is_android(): - # Semaphore support is flaky to non-existent on - # Android, so we need to manually dispatch and - # manage workloads here, while periodically - # checking in on the progress. - - use_nacl = False - rounds_per_worker = 1000 - try: - import nacl.encoding - import nacl.hash - use_nacl = True - except: - pass - - if use_nacl: - def full_hash(m): - return nacl.hash.sha256(m, encoder=nacl.encoding.RawEncoder) - else: - def full_hash(m): - return RNS.Identity.full_hash(m) - - def sv(s, c, w): - target = 0b1<<256-c - m = w+s - result = full_hash(m) - if int.from_bytes(result, byteorder="big") > target: - return False - else: - return True - - stamp = None - wm = multiprocessing.Manager() - jobs = multiprocessing.cpu_count() - - RNS.log(f"Dispatching {jobs} workers for stamp generation...", RNS.LOG_DEBUG) # TODO: Remove - - results_dict = wm.dict() - while stamp == None: - job_procs = [] - - def job(procnum=None, results_dict=None, wb=None, sc=None, jr=None): - # RNS.log(f"Worker {procnum} starting for {jr} rounds...") # TODO: Remove - try: - rounds = 0 - found_stamp = None - found_time = None - - while True: - pstamp = os.urandom(256//8) - rounds += 1 - if sv(pstamp, sc, wb): - found_stamp = pstamp - found_time = time.time() - break - - if rounds >= jr: - # RNS.log(f"Worker {procnum} found no result in {rounds} rounds") # TODO: Remove - break - - results_dict[procnum] = [found_stamp, rounds, found_time] - except Exception as e: - RNS.log("WORKER ERROR") - RNS.trace_exception(e) - - try: - for pnum in range(jobs): - pargs = {"procnum":pnum, "results_dict": results_dict, "wb": workblock, "sc":self.stamp_cost, "jr":rounds_per_worker} - process = multiprocessing.Process(target=job, kwargs=pargs) - job_procs.append(process) - process.start() - - for process in job_procs: - process.join() - - for j in results_dict: - r = results_dict[j] - total_rounds += r[1] - if r[0] != None: - stamp = r[0] - found_time = r[2] - - if stamp == None: - elapsed = time.time() - start_time - speed = total_rounds/elapsed - RNS.log(f"Stamp generation for {self} running. {total_rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) - - except Exception as e: - RNS.log("ERROR") - RNS.trace_exception(e) - - duration = time.time() - start_time - rounds = total_rounds - - else: - allow_kill = True - stamp = None - jobs = multiprocessing.cpu_count() - stop_event = multiprocessing.Event() - result_queue = multiprocessing.Queue(1) - rounds_queue = multiprocessing.Queue() - - def job(stop_event, pn, sc, wb): - terminated = False - rounds = 0 - pstamp = os.urandom(256//8) - - def sv(s, c, w): - target = 0b1<<256-c; m = w+s - result = RNS.Identity.full_hash(m) - if int.from_bytes(result, byteorder="big") > target: - return False - else: - return True - - while not stop_event.is_set() and not sv(pstamp, sc, wb): - pstamp = os.urandom(256//8); rounds += 1 - - if not stop_event.is_set(): - stop_event.set() - result_queue.put(pstamp) - rounds_queue.put(rounds) - - job_procs = [] - RNS.log(f"Starting {jobs} workers", RNS.LOG_DEBUG) # TODO: Remove - for jpn in range(jobs): - process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event, "pn": jpn, "sc": self.stamp_cost, "wb": workblock}, daemon=True) - job_procs.append(process) - process.start() - - stamp = result_queue.get() - RNS.log("Got stamp result from worker", RNS.LOG_DEBUG) # TODO: Remove - duration = time.time() - start_time - - spurious_results = 0 - try: - while True: - result_queue.get_nowait() - spurious_results += 1 - except: - pass - - for j in range(jobs): - nrounds = 0 - try: - nrounds = rounds_queue.get(timeout=2) - except Exception as e: - RNS.log(f"Failed to get round stats part {j}: {e}", RNS.LOG_ERROR) # TODO: Remove - total_rounds += nrounds - - all_exited = False - exit_timeout = time.time() + 5 - while time.time() < exit_timeout: - if not any(p.is_alive() for p in job_procs): - all_exited = True - break - time.sleep(0.1) - - if not all_exited: - RNS.log("Stamp generation IPC timeout, possible worker deadlock", RNS.LOG_ERROR) - if allow_kill: - for j in range(jobs): - process = job_procs[j] - process.kill() - return stamp - else: - return None - - else: - for j in range(jobs): - process = job_procs[j] - process.join() - # RNS.log(f"Joined {j} / {process}", RNS.LOG_DEBUG) # TODO: Remove - - rounds = total_rounds + generated_stamp, value = LXStamper.generate_stamp(self.message_id, self.stamp_cost) + if generated_stamp: + self.stamp_value = value + self.stamp_valid = True + return generated_stamp - speed = rounds/duration - - RNS.log(f"Stamp generated in {RNS.prettytime(duration)}, {rounds} rounds, {int(speed)} rounds per second", RNS.LOG_DEBUG) - - self.stamp_value = LXMessage.stamp_value(RNS.Identity.full_hash(workblock+stamp)) - self.stamp_valid = True - - return stamp + else: + return None def pack(self): if not self.packed: diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py new file mode 100644 index 0000000..9bd58de --- /dev/null +++ b/LXMF/LXStamper.py @@ -0,0 +1,263 @@ +import RNS +import RNS.vendor.umsgpack as msgpack + +import os +import time +import multiprocessing + +WORKBLOCK_EXPAND_ROUNDS = 3000 + +def stamp_workblock(message_id): + wb_st = time.time() + expand_rounds = WORKBLOCK_EXPAND_ROUNDS + workblock = b"" + for n in range(expand_rounds): + workblock += RNS.Cryptography.hkdf( + length=256, + derive_from=message_id, + salt=RNS.Identity.full_hash(message_id+msgpack.packb(n)), + context=None, + ) + wb_time = time.time() - wb_st + RNS.log(f"Stamp workblock size {RNS.prettysize(len(workblock))}, generated in {round(wb_time*1000,2)}ms", RNS.LOG_DEBUG) + + return workblock + +def stamp_value(workblock, stamp): + value = 0 + bits = 256 + material = RNS.Identity.full_hash(workblock+stamp) + i = int.from_bytes(material) + while ((i & (1 << (bits - 1))) == 0): + i = (i << 1) + value += 1 + + return value + +def generate_stamp(message_id, stamp_cost): + RNS.log(f"Generating stamp with cost {stamp_cost} for {RNS.prettyhexrep(message_id)}...", RNS.LOG_DEBUG) + workblock = stamp_workblock(message_id) + + start_time = time.time() + stamp = None + rounds = 0 + value = 0 + + if RNS.vendor.platformutils.is_windows() or RNS.vendor.platformutils.is_darwin(): + stamp, rounds = job_simple(stamp_cost, workblock) + + elif RNS.vendor.platformutils.is_android(): + stamp, rounds = job_android(stamp_cost, workblock) + + else: + stamp, rounds = job_linux(stamp_cost, workblock) + + duration = time.time() - start_time + speed = rounds/duration + value = stamp_value(workblock, stamp) + + RNS.log(f"Stamp with value {value} generated in {RNS.prettytime(duration)}, {rounds} rounds, {int(speed)} rounds per second", RNS.LOG_DEBUG) + + return stamp, value + +def job_simple(stamp_cost, workblock): + # A simple, single-process stamp generator. + # should work on any platform, and is used + # as a fall-back, in case of limited multi- + # processing and/or acceleration support. + + platform = RNS.vendor.platformutils.get_platform() + RNS.log(f"Running stamp generation on {platform}, work limited to single CPU core. This will be slower than ideal.", RNS.LOG_WARNING) + + rounds = 0 + pstamp = os.urandom(256//8) + st = time.time() + + def sv(s, c, w): + target = 0b1<<256-c; m = w+s + result = RNS.Identity.full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + while not sv(pstamp, stamp_cost, workblock): + pstamp = os.urandom(256//8); rounds += 1 + if rounds % 2500 == 0: + speed = rounds / (time.time()-st) + RNS.log(f"Stamp generation running. {rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) + + return pstamp, rounds + +def job_linux(stamp_cost, workblock): + allow_kill = True + stamp = None + total_rounds = 0 + jobs = multiprocessing.cpu_count() + stop_event = multiprocessing.Event() + result_queue = multiprocessing.Queue(1) + rounds_queue = multiprocessing.Queue() + + def job(stop_event, pn, sc, wb): + terminated = False + rounds = 0 + pstamp = os.urandom(256//8) + + def sv(s, c, w): + target = 0b1<<256-c; m = w+s + result = RNS.Identity.full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + while not stop_event.is_set() and not sv(pstamp, sc, wb): + pstamp = os.urandom(256//8); rounds += 1 + + if not stop_event.is_set(): + stop_event.set() + result_queue.put(pstamp) + rounds_queue.put(rounds) + + job_procs = [] + RNS.log(f"Starting {jobs} stamp generation workers", RNS.LOG_DEBUG) + for jpn in range(jobs): + process = multiprocessing.Process(target=job, kwargs={"stop_event": stop_event, "pn": jpn, "sc": stamp_cost, "wb": workblock}, daemon=True) + job_procs.append(process) + process.start() + + stamp = result_queue.get() + RNS.log("Got stamp result from worker", RNS.LOG_DEBUG) # TODO: Remove + + # Collect any potential spurious + # results from worker queue. + try: + while True: + result_queue.get_nowait() + except: + pass + + for j in range(jobs): + nrounds = 0 + try: + nrounds = rounds_queue.get(timeout=2) + except Exception as e: + RNS.log(f"Failed to get round stats part {j}: {e}", RNS.LOG_ERROR) + total_rounds += nrounds + + all_exited = False + exit_timeout = time.time() + 5 + while time.time() < exit_timeout: + if not any(p.is_alive() for p in job_procs): + all_exited = True + break + time.sleep(0.1) + + if not all_exited: + RNS.log("Stamp generation IPC timeout, possible worker deadlock. Terminating remaining processes.", RNS.LOG_ERROR) + if allow_kill: + for j in range(jobs): + process = job_procs[j] + process.kill() + else: + return None + + else: + for j in range(jobs): + process = job_procs[j] + process.join() + # RNS.log(f"Joined {j} / {process}", RNS.LOG_DEBUG) # TODO: Remove + + return stamp, total_rounds + +def job_android(stamp_cost, workblock): + # Semaphore support is flaky to non-existent on + # Android, so we need to manually dispatch and + # manage workloads here, while periodically + # checking in on the progress. + + stamp = None + start_time = time.time() + total_rounds = 0 + rounds_per_worker = 1000 + + use_nacl = False + try: + import nacl.encoding + import nacl.hash + use_nacl = True + except: + pass + + if use_nacl: + def full_hash(m): + return nacl.hash.sha256(m, encoder=nacl.encoding.RawEncoder) + else: + def full_hash(m): + return RNS.Identity.full_hash(m) + + def sv(s, c, w): + target = 0b1<<256-c + m = w+s + result = full_hash(m) + if int.from_bytes(result, byteorder="big") > target: + return False + else: + return True + + wm = multiprocessing.Manager() + jobs = multiprocessing.cpu_count() + + def job(procnum=None, results_dict=None, wb=None, sc=None, jr=None): + # RNS.log(f"Worker {procnum} starting for {jr} rounds...") # TODO: Remove + try: + rounds = 0 + found_stamp = None + + while True: + pstamp = os.urandom(256//8) + rounds += 1 + if sv(pstamp, sc, wb): + found_stamp = pstamp + break + + if rounds >= jr: + # RNS.log(f"Worker {procnum} found no result in {rounds} rounds") # TODO: Remove + break + + results_dict[procnum] = [found_stamp, rounds] + except Exception as e: + RNS.log(f"Stamp generation worker error: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + RNS.log(f"Dispatching {jobs} workers for stamp generation...", RNS.LOG_DEBUG) # TODO: Remove + + results_dict = wm.dict() + while stamp == None: + job_procs = [] + try: + for pnum in range(jobs): + pargs = {"procnum":pnum, "results_dict": results_dict, "wb": workblock, "sc":stamp_cost, "jr":rounds_per_worker} + process = multiprocessing.Process(target=job, kwargs=pargs) + job_procs.append(process) + process.start() + + for process in job_procs: + process.join() + + for j in results_dict: + r = results_dict[j] + total_rounds += r[1] + if r[0] != None: + stamp = r[0] + + if stamp == None: + elapsed = time.time() - start_time + speed = total_rounds/elapsed + RNS.log(f"Stamp generation running. {total_rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) + + except Exception as e: + RNS.log(f"Stamp generation job error: {e}") + RNS.trace_exception(e) + + return stamp, total_rounds \ No newline at end of file From ea49d22bceca5bae19207cd612f816b6a5f2ec04 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 9 Sep 2024 13:20:54 +0200 Subject: [PATCH 068/161] Add test entrypoint to LXStamper --- LXMF/LXStamper.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 9bd58de..28cf9cd 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -260,4 +260,21 @@ def job_android(stamp_cost, workblock): RNS.log(f"Stamp generation job error: {e}") RNS.trace_exception(e) - return stamp, total_rounds \ No newline at end of file + return stamp, total_rounds + +if __name__ == "__main__": + import sys + if len(sys.argv) < 2: + RNS.log("No cost argument provided", RNS.LOG_ERROR) + exit(1) + else: + try: + cost = int(sys.argv[1]) + except Exception as e: + RNS.log(f"Invalid cost argument provided: {e}", RNS.LOG_ERROR) + exit(1) + + RNS.loglevel = RNS.LOG_DEBUG + RNS.log("Testing LXMF stamp generation", RNS.LOG_DEBUG) + message_id = os.urandom(32) + generate_stamp(message_id, cost) \ No newline at end of file From 98eea1171ee618c572f83295ae1683b916709466 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 9 Sep 2024 15:24:36 +0200 Subject: [PATCH 069/161] Added event field --- LXMF/LXMF.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 3618912..146d8ec 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -17,6 +17,7 @@ FIELD_COMMANDS = 0x09 FIELD_RESULTS = 0x0A FIELD_GROUP = 0x0B FIELD_TICKET = 0x0C +FIELD_EVENT = 0x0D # For usecases such as including custom data structures, # embedding or encapsulating other data types or protocols From 453772859d4e806577839ac82a985e54ad15857d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 10 Sep 2024 20:17:09 +0200 Subject: [PATCH 070/161] Removed deprecated query destination --- LXMF/LXMRouter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 199ff63..28e3854 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -117,7 +117,6 @@ class LXMRouter: identity = RNS.Identity() self.identity = identity - self.lxmf_query_destination = RNS.Destination(None, RNS.Destination.IN, RNS.Destination.PLAIN, APP_NAME, "query") self.propagation_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation") if autopeer != None: From 717240c8d404dcad7e09eab45cf94ea3ab6cb6b7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 11 Sep 2024 02:09:43 +0200 Subject: [PATCH 071/161] Cleanup --- LXMF/LXMRouter.py | 2 +- LXMF/LXMessage.py | 17 +++++++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 28e3854..d26f155 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1299,7 +1299,7 @@ class LXMRouter: else: RNS.log(f"Received {message} with invalid stamp, but allowing anyway, since stamp enforcement is disabled", RNS.LOG_NOTICE) else: - RNS.log(f"Received {message} valid stamp", RNS.LOG_DEBUG) + RNS.log(f"Received {message} with valid stamp", RNS.LOG_DEBUG) if phy_stats != None: if "rssi" in phy_stats: message.rssi = phy_stats["rssi"] diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 5a8a26f..08e1077 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -264,10 +264,14 @@ class LXMessage: def validate_stamp(self, target_cost, tickets=None): if tickets != None: for ticket in tickets: - if self.stamp == RNS.Identity.truncated_hash(ticket+self.message_id): - RNS.log(f"Stamp on {self} validated by inbound ticket", RNS.LOG_DEBUG) # TODO: Remove at some point - self.stamp_value = LXMessage.COST_TICKET - return True + try: + if self.stamp == RNS.Identity.truncated_hash(ticket+self.message_id): + RNS.log(f"Stamp on {self} validated by inbound ticket", RNS.LOG_DEBUG) # TODO: Remove at some point + self.stamp_value = LXMessage.COST_TICKET + return True + except Exception as e: + RNS.log(f"Error while validating ticket: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) if self.stamp == None: return False @@ -284,9 +288,10 @@ class LXMessage: # If an outbound ticket exists, use this for # generating a valid stamp. if self.outbound_ticket != None and type(self.outbound_ticket) == bytes and len(self.outbound_ticket) == LXMessage.TICKET_LENGTH: - RNS.log(f"Generating stamp with outbound ticket for {self}", RNS.LOG_DEBUG) # TODO: Remove at some point + generated_stamp = RNS.Identity.truncated_hash(self.outbound_ticket+self.message_id) self.stamp_value = LXMessage.COST_TICKET - return RNS.Identity.truncated_hash(self.outbound_ticket+self.message_id) + RNS.log(f"Generated stamp with outbound ticket {RNS.hexrep(self.outbound_ticket)} for {self}", RNS.LOG_DEBUG) # TODO: Remove at some point + return generated_stamp # If no stamp cost is required, we can just # return immediately. From 0fc15e6054605d9afb8b72a6e59272b716fc72e4 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 11 Sep 2024 11:48:36 +0200 Subject: [PATCH 072/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 3d18726..dd9b22c 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.0" +__version__ = "0.5.1" From d8e2e2a45f590eb5c4e650dc10ba02444f713619 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 12 Sep 2024 10:11:49 +0200 Subject: [PATCH 073/161] Fix stamp cost extractor function --- LXMF/LXMF.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 146d8ec..23b10b3 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -112,7 +112,7 @@ def display_name_from_app_data(app_data=None): return app_data.decode("utf-8") def stamp_cost_from_app_data(app_data=None): - if app_data == None: + if app_data == None or app_data == b"": return None else: # Version 0.5.0+ announce format From bbf1eda3b0b42d8b0f4b48b16270d9ac10b4e7f3 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 12 Sep 2024 17:57:26 +0200 Subject: [PATCH 074/161] Added backchannel delivery --- LXMF/LXMRouter.py | 49 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 6 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index d26f155..7f529b1 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -66,6 +66,7 @@ class LXMRouter: self.pending_outbound = [] self.failed_outbound = [] self.direct_links = {} + self.backchannel_links = {} self.delivery_destinations = {} self.prioritised_list = [] @@ -1380,6 +1381,7 @@ class LXMRouter: link.set_resource_callback(self.delivery_resource_advertised) link.set_resource_started_callback(self.resource_transfer_began) link.set_resource_concluded_callback(self.delivery_resource_concluded) + link.set_remote_identified_callback(self.delivery_remote_identified) def delivery_link_closed(self, link): pass @@ -1406,6 +1408,11 @@ class LXMRouter: phy_stats = {"rssi": resource.link.rssi, "snr": resource.link.snr, "q": resource.link.q} self.lxmf_delivery(resource.data.read(), resource.link.type, phy_stats=phy_stats, ratchet_id=ratchet_id, method=LXMessage.DIRECT) + def delivery_remote_identified(self, link, identity): + destination_hash = RNS.Destination.hash_from_name_and_identity("lxmf.delivery", identity) + self.backchannel_links[destination_hash] = link + RNS.log(f"Backchannel became available for {RNS.prettyhexrep(destination_hash)} on delivery link {link}", RNS.LOG_DEBUG) + ### Peer Sync & Propagation ########################### ####################################################### @@ -1733,11 +1740,26 @@ class LXMRouter: if lxmessage.state == LXMessage.DELIVERED: RNS.log("Delivery has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) + + # Udate ticket delivery stats if lxmessage.include_ticket and FIELD_TICKET in lxmessage.fields: RNS.log(f"Updating latest ticket delivery for {RNS.prettyhexrep(lxmessage.destination_hash)}", RNS.LOG_DEBUG) self.available_tickets["last_deliveries"][lxmessage.destination_hash] = time.time() self.save_available_tickets() + # Prepare link for backchannel communications + delivery_destination_hash = lxmessage.get_destination().hash + if delivery_destination_hash in self.direct_links: + direct_link = self.direct_links[delivery_destination_hash] + if not hasattr(direct_link, "backchannel_identified") or direct_link.backchannel_identified == False: + if direct_link.initiator == True: + source_destination_hash = lxmessage.get_source().hash + if source_destination_hash in self.delivery_destinations: + backchannel_identity = self.delivery_destinations[source_destination_hash].identity + direct_link.identify(backchannel_identity) + self.delivery_link_established(direct_link) + RNS.log(f"Performed backchannel identification as {backchannel_identity} on {direct_link}", RNS.LOG_DEBUG) + elif lxmessage.method == LXMessage.PROPAGATED and lxmessage.state == LXMessage.SENT: RNS.log("Propagation has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) @@ -1764,16 +1786,28 @@ class LXMRouter: elif lxmessage.method == LXMessage.DIRECT: if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: delivery_destination_hash = lxmessage.get_destination().hash - + direct_link = None + if delivery_destination_hash in self.direct_links: - # A link already exists, so we'll try to use it - # to deliver the message + # An established direct link already exists to + # the destination, so we'll try to use it for + # delivering the message direct_link = self.direct_links[delivery_destination_hash] + RNS.log(f"Using available direct link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}", RNS.LOG_DEBUG) + + elif delivery_destination_hash in self.backchannel_links: + # An established backchannel link exists to + # the destination, so we'll try to use it for + # delivering the message + direct_link = self.backchannel_links[delivery_destination_hash] + RNS.log(f"Using available backchannel link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}", RNS.LOG_DEBUG) + + if direct_link != None: if direct_link.status == RNS.Link.ACTIVE: if lxmessage.progress == None or lxmessage.progress < 0.05: lxmessage.progress = 0.05 if lxmessage.state != LXMessage.SENDING: - RNS.log("Starting transfer of "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + RNS.log("Starting transfer of "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" on link "+str(direct_link), RNS.LOG_DEBUG) lxmessage.set_delivery_destination(direct_link) lxmessage.send() else: @@ -1783,7 +1817,7 @@ class LXMRouter: RNS.log("Waiting for proof for "+str(lxmessage)+" sent as link packet", RNS.LOG_DEBUG) elif direct_link.status == RNS.Link.CLOSED: if direct_link.activated_at != None: - RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was closed", RNS.LOG_DEBUG) + RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was closed, reason: "+str(direct_link.teardown_reason), RNS.LOG_DEBUG) else: if not hasattr(lxmessage, "path_request_retried"): RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was never activated, retrying path request...", RNS.LOG_DEBUG) @@ -1795,7 +1829,10 @@ class LXMRouter: lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT lxmessage.set_delivery_destination(None) - self.direct_links.pop(delivery_destination_hash) + if delivery_destination_hash in self.direct_links: + self.direct_links.pop(delivery_destination_hash) + if delivery_destination_hash in self.backchannel_links: + self.backchannel_links.pop(delivery_destination_hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT else: # Simply wait for the link to become active or close From 9193aa5e023a8c4fdd466cfe8a1b7ab2d4c173ed Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 12 Sep 2024 18:24:14 +0200 Subject: [PATCH 075/161] Improved logging --- LXMF/LXMRouter.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 7f529b1..533785f 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1756,9 +1756,10 @@ class LXMRouter: source_destination_hash = lxmessage.get_source().hash if source_destination_hash in self.delivery_destinations: backchannel_identity = self.delivery_destinations[source_destination_hash].identity + backchannel_desthash = RNS.Destination.hash_from_name_and_identity("lxmf.delivery", backchannel_identity) direct_link.identify(backchannel_identity) self.delivery_link_established(direct_link) - RNS.log(f"Performed backchannel identification as {backchannel_identity} on {direct_link}", RNS.LOG_DEBUG) + RNS.log(f"Performed backchannel identification as {RNS.prettyhexrep(backchannel_desthash)} on {direct_link}", RNS.LOG_DEBUG) elif lxmessage.method == LXMessage.PROPAGATED and lxmessage.state == LXMessage.SENT: RNS.log("Propagation has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) @@ -1817,7 +1818,7 @@ class LXMRouter: RNS.log("Waiting for proof for "+str(lxmessage)+" sent as link packet", RNS.LOG_DEBUG) elif direct_link.status == RNS.Link.CLOSED: if direct_link.activated_at != None: - RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was closed, reason: "+str(direct_link.teardown_reason), RNS.LOG_DEBUG) + RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was closed", RNS.LOG_DEBUG) else: if not hasattr(lxmessage, "path_request_retried"): RNS.log("The link to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+" was never activated, retrying path request...", RNS.LOG_DEBUG) From 19696d206d34eddfa72542da6fd974e15403c9ef Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 12 Sep 2024 21:28:50 +0200 Subject: [PATCH 076/161] Actually set property --- LXMF/LXMRouter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 533785f..b7d327f 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1758,6 +1758,7 @@ class LXMRouter: backchannel_identity = self.delivery_destinations[source_destination_hash].identity backchannel_desthash = RNS.Destination.hash_from_name_and_identity("lxmf.delivery", backchannel_identity) direct_link.identify(backchannel_identity) + direct_link.backchannel_identified = True self.delivery_link_established(direct_link) RNS.log(f"Performed backchannel identification as {RNS.prettyhexrep(backchannel_desthash)} on {direct_link}", RNS.LOG_DEBUG) From 0e2f0fb09098ffc3450a25b5e236bebb09842302 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 16 Sep 2024 17:49:22 +0200 Subject: [PATCH 077/161] Updated example --- docs/example_receiver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/example_receiver.py b/docs/example_receiver.py index 999f6a3..9bf1c61 100644 --- a/docs/example_receiver.py +++ b/docs/example_receiver.py @@ -29,8 +29,8 @@ def delivery_callback(message): RNS.log("\t| Destination instance : "+str(message.get_destination())) RNS.log("\t| Transport Encryption : "+str(message.transport_encryption)) RNS.log("\t| Timestamp : "+time_string) - RNS.log("\t| Title : "+message.title_as_string()) - RNS.log("\t| Content : "+message.content_as_string()) + RNS.log("\t| Title : "+str(message.title_as_string())) + RNS.log("\t| Content : "+str(message.content_as_string())) RNS.log("\t| Fields : "+str(message.fields)) if message.ratchet_id: RNS.log("\t| Ratchet : "+str(RNS.Identity._get_ratchet_id(message.ratchet_id))) From 7789e0bc26ac926ce5f4ea4f769236e525d945a4 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 16 Sep 2024 17:49:54 +0200 Subject: [PATCH 078/161] Improved opportunistic delivery logic and performance --- LXMF/LXMRouter.py | 26 ++++++++++++++++++-------- LXMF/LXMessage.py | 40 +++++++++++++++++++++++++++------------- 2 files changed, 45 insertions(+), 21 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index b7d327f..0a603ed 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -21,6 +21,7 @@ class LXMRouter: PROCESSING_INTERVAL = 4 DELIVERY_RETRY_WAIT = 10 PATH_REQUEST_WAIT = 7 + MAX_PATHLESS_TRIES = 2 LINK_MAX_INACTIVITY = 10*60 P_LINK_MAX_INACTIVITY = 3*60 @@ -367,7 +368,6 @@ class LXMRouter: self.propagation_transfer_state = LXMRouter.PR_PATH_REQUESTED self.request_messages_path_job() else: - # TODO: Remove at some point RNS.log("Waiting for propagation node link to become active", RNS.LOG_EXTREME) else: RNS.log("Cannot request LXMF propagation node sync, no default propagation node configured", RNS.LOG_WARNING) @@ -893,7 +893,6 @@ class LXMRouter: expired_outbound.append(destination_hash) for destination_hash in expired_outbound: - RNS.log(f"Cleaning expired outbound ticket for {destination_hash}") # TODO: Remove self.available_tickets["outbound"].pop(destination_hash) # Clean inbound tickets @@ -906,7 +905,6 @@ class LXMRouter: expired_inbound.append(inbound_ticket) for inbound_ticket in expired_inbound: - RNS.log(f"Cleaning expired inbound ticket for {destination_hash}") # TODO: Remove self.available_tickets["inbound"][destination_hash].pop(destination_hash) except Exception as e: @@ -916,7 +914,6 @@ class LXMRouter: def save_available_tickets(self): with self.ticket_file_lock: try: - RNS.log("Saving available tickets...", RNS.LOG_DEBUG) # TODO: Remove if not os.path.isdir(self.storagepath): os.makedirs(self.storagepath) @@ -1774,11 +1771,24 @@ class LXMRouter: # Outbound handling for opportunistic messages if lxmessage.method == LXMessage.OPPORTUNISTIC: if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS: - if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: + if lxmessage.delivery_attempts >= LXMRouter.MAX_PATHLESS_TRIES and not RNS.Transport.has_path(lxmessage.get_destination().hash): + RNS.log(f"Requesting path to {RNS.prettyhexrep(lxmessage.get_destination().hash)} after {lxmessage.delivery_attempts} pathless tries for {lxmessage}", RNS.LOG_DEBUG) lxmessage.delivery_attempts += 1 - lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT - RNS.log("Opportunistic delivery attempt "+str(lxmessage.delivery_attempts)+" for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) - lxmessage.send() + RNS.Transport.request_path(lxmessage.get_destination().hash) + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + lxmessage.progress = 0.00 + elif lxmessage.delivery_attempts == LXMRouter.MAX_PATHLESS_TRIES+2 and RNS.Transport.has_path(lxmessage.get_destination().hash): + RNS.log(f"Opportunistic delivery for {lxmessage} still unsuccessful after {lxmessage.delivery_attempts} attempts, trying to update path to {RNS.prettyhexrep(lxmessage.get_destination().hash)}", RNS.LOG_DEBUG) + lxmessage.delivery_attempts += 1 + RNS.Transport.request_path(lxmessage.get_destination().hash) + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT + lxmessage.progress = 0.00 + else: + if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: + lxmessage.delivery_attempts += 1 + lxmessage.next_delivery_attempt = time.time() + LXMRouter.DELIVERY_RETRY_WAIT + RNS.log("Opportunistic delivery attempt "+str(lxmessage.delivery_attempts)+" for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + lxmessage.send() else: RNS.log("Max delivery attempts reached for oppertunistic "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) self.fail_message(lxmessage) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 08e1077..21334a8 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -49,18 +49,20 @@ class LXMessage: TICKET_INTERVAL = 1*24*60*60 COST_TICKET = 0x100 - # LXMF overhead is 111 bytes per message: + # LXMF overhead is 112 bytes per message: # 16 bytes for destination hash # 16 bytes for source hash # 64 bytes for Ed25519 signature # 8 bytes for timestamp - # 7 bytes for msgpack structure - LXMF_OVERHEAD = 2*DESTINATION_LENGTH + SIGNATURE_LENGTH + 8 + 7 + # 8 bytes for msgpack structure + TIMESTAMP_SIZE = 8 + STRUCT_OVERHEAD = 8 + LXMF_OVERHEAD = 2*DESTINATION_LENGTH + SIGNATURE_LENGTH + TIMESTAMP_SIZE + STRUCT_OVERHEAD # With an MTU of 500, the maximum amount of data # we can send in a single encrypted packet is - # 383 bytes. - ENCRYPTED_PACKET_MDU = RNS.Packet.ENCRYPTED_MDU + # 391 bytes. + ENCRYPTED_PACKET_MDU = RNS.Packet.ENCRYPTED_MDU + TIMESTAMP_SIZE # The max content length we can fit in LXMF message # inside a single RNS packet is the encrypted MDU, minus @@ -69,7 +71,7 @@ class LXMessage: # field of the packet, therefore we also add the length # of a destination hash to the calculation. With default # RNS and LXMF parameters, the largest single-packet - # LXMF message we can send is 288 bytes. If a message + # LXMF message we can send is 295 bytes. If a message # is larger than that, a Reticulum link will be used. ENCRYPTED_PACKET_MAX_CONTENT = ENCRYPTED_PACKET_MDU - LXMF_OVERHEAD + DESTINATION_LENGTH @@ -79,13 +81,13 @@ class LXMessage: LINK_PACKET_MDU = RNS.Link.MDU # Which means that we can deliver single-packet LXMF - # messages with content of up to 320 bytes over a link. + # messages with content of up to 319 bytes over a link. # If a message is larger than that, LXMF will sequence # and transfer it as a RNS resource over the link instead. LINK_PACKET_MAX_CONTENT = LINK_PACKET_MDU - LXMF_OVERHEAD # For plain packets without encryption, we can - # fit up to 369 bytes of content. + # fit up to 368 bytes of content. PLAIN_PACKET_MDU = RNS.Packet.PLAIN_MDU PLAIN_PACKET_MAX_CONTENT = PLAIN_PACKET_MDU - LXMF_OVERHEAD + DESTINATION_LENGTH @@ -129,8 +131,16 @@ class LXMessage: if title == None: title = "" - self.set_title_from_string(title) - self.set_content_from_string(content) + if type(title) == bytes: + self.set_title_from_bytes(title) + else: + self.set_title_from_string(title) + + if type(content) == bytes: + self.set_content_from_bytes(content) + else: + self.set_content_from_string(content) + self.set_fields(fields) self.payload = None @@ -192,7 +202,11 @@ class LXMessage: self.content = content_bytes def content_as_string(self): - return self.content.decode("utf-8") + try: + return self.content.decode("utf-8") + except Exception as e: + RNS.log(f"{self} could not decode message content as string: {e}") + return None def set_fields(self, fields): if isinstance(fields, dict) or fields == None: @@ -352,7 +366,7 @@ class LXMessage: self.packed += self.signature self.packed += packed_payload self.packed_size = len(self.packed) - content_size = len(packed_payload) + content_size = len(packed_payload)-LXMessage.TIMESTAMP_SIZE-LXMessage.STRUCT_OVERHEAD # If no desired delivery method has been defined, # one will be chosen according to these rules: @@ -367,7 +381,7 @@ class LXMessage: single_packet_content_limit = LXMessage.PLAIN_PACKET_MAX_CONTENT if content_size > single_packet_content_limit: - raise TypeError("LXMessage desired opportunistic delivery method, but content exceeds single-packet size.") + raise TypeError(f"LXMessage desired opportunistic delivery method, but content of length {content_size} exceeds single-packet content limit of {single_packet_content_limit}.") else: self.method = LXMessage.OPPORTUNISTIC self.representation = LXMessage.PACKET From b5d6ed3d9b45a7959498dc81873f6d7904b7ccf5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 16 Sep 2024 20:08:26 +0200 Subject: [PATCH 079/161] Automatically reconfigure to direct delivery if opportunistic message content is too large --- LXMF/LXMRouter.py | 8 +++++++- LXMF/LXMessage.py | 11 ++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 0a603ed..aa0578a 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -536,6 +536,12 @@ class LXMRouter: def information_storage_size(self): pass + def delivery_link_available(self, destination_hash): + if destination_hash in self.direct_links or destination_hash in self.backchannel_links: + return True + else: + return False + ### Utility & Maintenance ############################# ####################################################### @@ -1746,7 +1752,7 @@ class LXMRouter: # Prepare link for backchannel communications delivery_destination_hash = lxmessage.get_destination().hash - if delivery_destination_hash in self.direct_links: + if lxmessage.method == LXMessage.DIRECT and delivery_destination_hash in self.direct_links: direct_link = self.direct_links[delivery_destination_hash] if not hasattr(direct_link, "backchannel_identified") or direct_link.backchannel_identified == False: if direct_link.initiator == True: diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 21334a8..253085b 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -372,8 +372,16 @@ class LXMessage: # one will be chosen according to these rules: if self.desired_method == None: self.desired_method = LXMessage.DIRECT - # TODO: Expand rules to something more intelligent + + # If opportunistic delivery was requested, check + # that message will fit within packet size limits + if self.desired_method == LXMessage.OPPORTUNISTIC: + if self.__destination.type == RNS.Destination.SINGLE: + if content_size > LXMessage.ENCRYPTED_PACKET_MAX_CONTENT: + RNS.log(f"Opportunistic delivery was requested for {self}, but content exceeds packet size limit. Falling back to link-based delivery.", RNS.LOG_DEBUG) + self.desired_method = LXMessage.DIRECT + # Set delivery parameters according to delivery method if self.desired_method == LXMessage.OPPORTUNISTIC: if self.__destination.type == RNS.Destination.SINGLE: single_packet_content_limit = LXMessage.ENCRYPTED_PACKET_MAX_CONTENT @@ -434,6 +442,7 @@ class LXMessage: if self.method == LXMessage.OPPORTUNISTIC: lxm_packet = self.__as_packet() lxm_packet.send().set_delivery_callback(self.__mark_delivered) + self.progress = 0.50 self.ratchet_id = lxm_packet.ratchet_id self.state = LXMessage.SENT From efc15bde269efdf7e0de7b7b526e1387b8a6b733 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 16 Sep 2024 20:09:57 +0200 Subject: [PATCH 080/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index dd9b22c..7225152 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.1" +__version__ = "0.5.2" From 67d21deff9d4038e443891b4bc441135af4b8d8d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 16 Sep 2024 20:11:53 +0200 Subject: [PATCH 081/161] Updated dependencies --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 8117b1f..6b7926a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ qrcode==7.4.2 -rns==0.7.7 +rns==0.7.8 setuptools==70.0.0 diff --git a/setup.py b/setup.py index 52489a1..da87c8e 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.7.7'], + install_requires=['rns>=0.7.8'], python_requires='>=3.7', ) From cf6fc4a5105689ba1932b0dc309e6e2e1bf82958 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 17 Sep 2024 12:56:56 +0200 Subject: [PATCH 082/161] Update max pathless tries --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index aa0578a..2a05f5b 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -21,7 +21,7 @@ class LXMRouter: PROCESSING_INTERVAL = 4 DELIVERY_RETRY_WAIT = 10 PATH_REQUEST_WAIT = 7 - MAX_PATHLESS_TRIES = 2 + MAX_PATHLESS_TRIES = 1 LINK_MAX_INACTIVITY = 10*60 P_LINK_MAX_INACTIVITY = 3*60 From e1a56be5c1e440c6eca869ad3b02ada940741fd0 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 17 Sep 2024 13:18:16 +0200 Subject: [PATCH 083/161] Pre-emptively request unknown paths for opportunistic messages --- LXMF/LXMRouter.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 2a05f5b..6aafce3 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -276,6 +276,10 @@ class LXMRouter: else: return None + def set_active_propagation_node(self, destination_hash): + self.set_outbound_propagation_node(destination_hash) + # self.set_inbound_propagation_node(destination_hash) + def set_outbound_propagation_node(self, destination_hash): if len(destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH//8 or type(destination_hash) != bytes: raise ValueError("Invalid destination hash for outbound propagation node") @@ -289,6 +293,13 @@ class LXMRouter: def get_outbound_propagation_node(self): return self.outbound_propagation_node + def set_inbound_propagation_node(self, destination_hash): + # TODO: Implement + raise NotImplementedError("Inbound/outbound propagation node differentiation is currently not implemented") + + def get_inbound_propagation_node(self): + return self.get_outbound_propagation_node() + def set_retain_node_lxms(self, retain): if retain == True: self.retain_synced_on_node = True @@ -1195,6 +1206,7 @@ class LXMRouter: def handle_outbound(self, lxmessage): destination_hash = lxmessage.get_destination().hash + if lxmessage.stamp_cost == None: if destination_hash in self.outbound_stamp_costs: stamp_cost = self.outbound_stamp_costs[destination_hash][1] @@ -1220,10 +1232,13 @@ class LXMRouter: if not lxmessage.packed: lxmessage.pack() - lxmessage.determine_transport_encryption() + unknown_path_requested = False + if not RNS.Transport.has_path(destination_hash) and lxmessage.method == LXMessage.OPPORTUNISTIC: + RNS.log(f"Pre-emptively requesting unknown path for opportunistic {lxmessage}", RNS.LOG_DEBUG) + RNS.Transport.request_path(destination_hash) + unknown_path_requested = True - while self.processing_outbound: - time.sleep(0.1) + lxmessage.determine_transport_encryption() if lxmessage.defer_stamp and lxmessage.stamp_cost == None: RNS.log(f"Deferred stamp generation was requested for {lxmessage}, but no stamp is required, processing immediately", RNS.LOG_DEBUG) @@ -1231,7 +1246,10 @@ class LXMRouter: if not lxmessage.defer_stamp: self.pending_outbound.append(lxmessage) - self.process_outbound() + if not unknown_path_requested: + while self.processing_outbound: + time.sleep(0.05) + self.process_outbound() else: self.pending_deferred_stamps[lxmessage.message_id] = lxmessage @@ -1783,7 +1801,7 @@ class LXMRouter: RNS.Transport.request_path(lxmessage.get_destination().hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT lxmessage.progress = 0.00 - elif lxmessage.delivery_attempts == LXMRouter.MAX_PATHLESS_TRIES+2 and RNS.Transport.has_path(lxmessage.get_destination().hash): + elif lxmessage.delivery_attempts == LXMRouter.MAX_PATHLESS_TRIES+3 and RNS.Transport.has_path(lxmessage.get_destination().hash): RNS.log(f"Opportunistic delivery for {lxmessage} still unsuccessful after {lxmessage.delivery_attempts} attempts, trying to update path to {RNS.prettyhexrep(lxmessage.get_destination().hash)}", RNS.LOG_DEBUG) lxmessage.delivery_attempts += 1 RNS.Transport.request_path(lxmessage.get_destination().hash) From ef1c3331adaabf23fa3e4cdf552f9d0ee06b23d7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 17 Sep 2024 13:29:36 +0200 Subject: [PATCH 084/161] Improved outbound handling --- LXMF/LXMRouter.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 6aafce3..34cfbc8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1236,6 +1236,7 @@ class LXMRouter: if not RNS.Transport.has_path(destination_hash) and lxmessage.method == LXMessage.OPPORTUNISTIC: RNS.log(f"Pre-emptively requesting unknown path for opportunistic {lxmessage}", RNS.LOG_DEBUG) RNS.Transport.request_path(destination_hash) + lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT unknown_path_requested = True lxmessage.determine_transport_encryption() @@ -1245,11 +1246,13 @@ class LXMRouter: lxmessage.defer_stamp = False if not lxmessage.defer_stamp: + while not unknown_path_requested and self.processing_outbound: + time.sleep(0.05) + self.pending_outbound.append(lxmessage) if not unknown_path_requested: - while self.processing_outbound: - time.sleep(0.05) self.process_outbound() + else: self.pending_deferred_stamps[lxmessage.message_id] = lxmessage @@ -1786,6 +1789,7 @@ class LXMRouter: elif lxmessage.method == LXMessage.PROPAGATED and lxmessage.state == LXMessage.SENT: RNS.log("Propagation has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) + else: RNS.log("Starting outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) From a461fd415afcf80ff15c2a1d1cbd61bb5ccf6d04 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 19 Sep 2024 23:00:24 +0200 Subject: [PATCH 085/161] Rediscover path during opportunistic delivery if path fails --- LXMF/LXMRouter.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 34cfbc8..e3480cb 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1805,10 +1805,14 @@ class LXMRouter: RNS.Transport.request_path(lxmessage.get_destination().hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT lxmessage.progress = 0.00 - elif lxmessage.delivery_attempts == LXMRouter.MAX_PATHLESS_TRIES+3 and RNS.Transport.has_path(lxmessage.get_destination().hash): - RNS.log(f"Opportunistic delivery for {lxmessage} still unsuccessful after {lxmessage.delivery_attempts} attempts, trying to update path to {RNS.prettyhexrep(lxmessage.get_destination().hash)}", RNS.LOG_DEBUG) + elif lxmessage.delivery_attempts == LXMRouter.MAX_PATHLESS_TRIES+1 and RNS.Transport.has_path(lxmessage.get_destination().hash): + RNS.log(f"Opportunistic delivery for {lxmessage} still unsuccessful after {lxmessage.delivery_attempts} attempts, trying to rediscover path to {RNS.prettyhexrep(lxmessage.get_destination().hash)}", RNS.LOG_DEBUG) lxmessage.delivery_attempts += 1 - RNS.Transport.request_path(lxmessage.get_destination().hash) + RNS.Reticulum.get_instance().drop_path(lxmessage.get_destination().hash) + def rediscover_job(): + time.sleep(0.5) + RNS.Transport.request_path(lxmessage.get_destination().hash) + threading.Thread(target=rediscover_job, daemon=True).start() lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT lxmessage.progress = 0.00 else: From 05f144ae506568c822284f73c3809097e22bdbc2 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 19 Sep 2024 23:07:58 +0200 Subject: [PATCH 086/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 7225152..43a1e95 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.2" +__version__ = "0.5.3" From ac2c9c3a9b0964f4f212663de4fc0362c02e1d13 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 30 Sep 2024 19:27:40 +0200 Subject: [PATCH 087/161] Improved opportunistic delivery performance --- LXMF/Handlers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index eb10a76..5ce5571 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -15,7 +15,7 @@ class LXMFDeliveryAnnounceHandler: def received_announce(self, destination_hash, announced_identity, app_data): for lxmessage in self.lxmrouter.pending_outbound: if destination_hash == lxmessage.destination_hash: - if lxmessage.method == LXMessage.DIRECT: + if lxmessage.method == LXMessage.DIRECT or lxmessage.method == LXMessage.OPPORTUNISTIC: lxmessage.next_delivery_attempt = time.time() while self.lxmrouter.processing_outbound: From 0e0d01a0b23896cb60423478cb37b68094dbf6a0 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 30 Sep 2024 19:28:13 +0200 Subject: [PATCH 088/161] Updated versions --- LXMF/_version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 43a1e95..6b27eee 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.3" +__version__ = "0.5.4" diff --git a/setup.py b/setup.py index da87c8e..9832796 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.7.8'], + install_requires=['rns>=0.8.1'], python_requires='>=3.7', ) From eefb1c8349827185350075ea54f123f2e12b3b11 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 2 Oct 2024 02:06:00 +0200 Subject: [PATCH 089/161] Updated versions --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9832796..109d799 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.8.1'], + install_requires=['rns>=0.8.0'], python_requires='>=3.7', ) From 19f0fa77242cfdf2f3a05b1fc52cd4ad4ccdf68d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 4 Oct 2024 11:22:39 +0200 Subject: [PATCH 090/161] Don't try creating LXMF peer destination until identity is resolved. Fixes #19. --- LXMF/LXMPeer.py | 10 +++++++--- LXMF/_version.py | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a0b61a5..0962b26 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -108,7 +108,10 @@ class LXMPeer: self.router = router self.destination_hash = destination_hash self.identity = RNS.Identity.recall(destination_hash) - self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + if self.identity != None: + self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + else: + RNS.log(f"Could not recall identity for LXMF propagation peer {RNS.prettyhexrep(self.destination_hash)}, will retry identity resolution on next sync", RNS.LOG_WARNING) def sync(self): RNS.log("Initiating LXMF Propagation Node sync with peer "+RNS.prettyhexrep(self.destination_hash), RNS.LOG_DEBUG) @@ -126,9 +129,10 @@ class LXMPeer: else: if self.identity == None: self.identity = RNS.Identity.recall(destination_hash) - self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + if self.identity != None: + self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") - if self.identity != None: + if self.destination != None: if len(self.unhandled_messages) > 0: if self.state == LXMPeer.IDLE: RNS.log("Establishing link for sync to peer "+RNS.prettyhexrep(self.destination_hash)+"...", RNS.LOG_DEBUG) diff --git a/LXMF/_version.py b/LXMF/_version.py index 6b27eee..86716a7 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.4" +__version__ = "0.5.5" From 19d8909b104263227922310cf08a9fbc7affe0c2 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 5 Oct 2024 15:26:18 +0200 Subject: [PATCH 091/161] Fixed inbound ticket cleaning bug --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index e3480cb..4157741 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -922,7 +922,7 @@ class LXMRouter: expired_inbound.append(inbound_ticket) for inbound_ticket in expired_inbound: - self.available_tickets["inbound"][destination_hash].pop(destination_hash) + self.available_tickets["inbound"][destination_hash].pop(inbound_ticket) except Exception as e: RNS.log(f"Error while cleaning available tickets. The contained exception was: {e}", RNS.LOG_ERROR) From 0178fb0d4f822fe2172d117d001ef96f2ce226a7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 5 Oct 2024 22:40:14 +0200 Subject: [PATCH 092/161] Don't reset propagation node address and links if already set to the same destination --- LXMF/LXMRouter.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 4157741..7968854 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -284,11 +284,12 @@ class LXMRouter: if len(destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH//8 or type(destination_hash) != bytes: raise ValueError("Invalid destination hash for outbound propagation node") else: - self.outbound_propagation_node = destination_hash - if self.outbound_propagation_link != None: - if self.outbound_propagation_link.destination.hash != destination_hash: - self.outbound_propagation_link.teardown() - self.outbound_propagation_link = None + if self.outbound_propagation_node != destination_hash: + self.outbound_propagation_node = destination_hash + if self.outbound_propagation_link != None: + if self.outbound_propagation_link.destination.hash != destination_hash: + self.outbound_propagation_link.teardown() + self.outbound_propagation_link = None def get_outbound_propagation_node(self): return self.outbound_propagation_node From 672d7542386375bfb1c015162336973d31b6f62e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 6 Oct 2024 11:13:38 +0200 Subject: [PATCH 093/161] Updated dependency --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 109d799..1fb47e9 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.8.0'], + install_requires=['rns>=0.8.2'], python_requires='>=3.7', ) From 0cb771439fa5b4f0d6d632f46e02e54952e7b3f5 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 11 Oct 2024 23:40:27 +0200 Subject: [PATCH 094/161] Fixed incorrect progress values on path waiting --- LXMF/LXMRouter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 7968854..068dc44 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1805,7 +1805,7 @@ class LXMRouter: lxmessage.delivery_attempts += 1 RNS.Transport.request_path(lxmessage.get_destination().hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT - lxmessage.progress = 0.00 + lxmessage.progress = 0.01 elif lxmessage.delivery_attempts == LXMRouter.MAX_PATHLESS_TRIES+1 and RNS.Transport.has_path(lxmessage.get_destination().hash): RNS.log(f"Opportunistic delivery for {lxmessage} still unsuccessful after {lxmessage.delivery_attempts} attempts, trying to rediscover path to {RNS.prettyhexrep(lxmessage.get_destination().hash)}", RNS.LOG_DEBUG) lxmessage.delivery_attempts += 1 @@ -1815,7 +1815,7 @@ class LXMRouter: RNS.Transport.request_path(lxmessage.get_destination().hash) threading.Thread(target=rediscover_job, daemon=True).start() lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT - lxmessage.progress = 0.00 + lxmessage.progress = 0.01 else: if not hasattr(lxmessage, "next_delivery_attempt") or time.time() > lxmessage.next_delivery_attempt: lxmessage.delivery_attempts += 1 @@ -1901,7 +1901,7 @@ class LXMRouter: RNS.log("No path known for delivery attempt "+str(lxmessage.delivery_attempts)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash)+". Requesting path...", RNS.LOG_DEBUG) RNS.Transport.request_path(lxmessage.get_destination().hash) lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT - lxmessage.progress = 0.00 + lxmessage.progress = 0.01 else: RNS.log("Max delivery attempts reached for direct "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) self.fail_message(lxmessage) From aa406d15521b4a15a58d6df1efc2ceb06856ccd7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 11 Oct 2024 23:45:24 +0200 Subject: [PATCH 095/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 86716a7..a779a44 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.5" +__version__ = "0.5.6" From 36f0c17c8b73d707bb4351b9f1d379417763b2c4 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 13 Oct 2024 13:05:52 +0200 Subject: [PATCH 096/161] Added RNR_REFS field --- LXMF/LXMF.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 23b10b3..c0d3039 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -18,6 +18,7 @@ FIELD_RESULTS = 0x0A FIELD_GROUP = 0x0B FIELD_TICKET = 0x0C FIELD_EVENT = 0x0D +FIELD_RNR_REFS = 0x0E # For usecases such as including custom data structures, # embedding or encapsulating other data types or protocols From c9272c9218de9d12047f595c494c377142211248 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 13 Oct 2024 13:08:10 +0200 Subject: [PATCH 097/161] Fixed missing byteorder argument in stamp value calculation. Fixes #21. --- LXMF/LXStamper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 28cf9cd..2023ec0 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -27,7 +27,7 @@ def stamp_value(workblock, stamp): value = 0 bits = 256 material = RNS.Identity.full_hash(workblock+stamp) - i = int.from_bytes(material) + i = int.from_bytes(material, byteorder="big") while ((i & (1 << (bits - 1))) == 0): i = (i << 1) value += 1 From 9ff76c0473e9d4107e079f266dd08144bb74c7c8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 13 Oct 2024 14:01:10 +0200 Subject: [PATCH 098/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index a779a44..1cc82e6 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.6" +__version__ = "0.5.7" From 61331b58d72ae1f88adcc6b7085b260d95dffb14 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 23 Nov 2024 12:47:31 +0100 Subject: [PATCH 099/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 1cc82e6..fc0a843 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.7" +__version__ = "0.5.8" From b172c7fcd40c7b470fa0d5140be4e7f22b89ada8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 23 Nov 2024 12:49:01 +0100 Subject: [PATCH 100/161] Added PN announce data validation to announce handler --- LXMF/Handlers.py | 32 ++++++++++++++++---------------- LXMF/LXMF.py | 28 +++++++++++++++++++++++++--- 2 files changed, 41 insertions(+), 19 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 5ce5571..7420ea5 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -2,8 +2,7 @@ import time import RNS import RNS.vendor.umsgpack as msgpack -from .LXMF import APP_NAME, stamp_cost_from_app_data - +from .LXMF import APP_NAME, stamp_cost_from_app_data, pn_announce_data_is_valid from .LXMessage import LXMessage class LXMFDeliveryAnnounceHandler: @@ -40,23 +39,24 @@ class LXMFPropagationAnnounceHandler: def received_announce(self, destination_hash, announced_identity, app_data): try: if type(app_data) == bytes: - data = msgpack.unpackb(app_data) - if self.lxmrouter.propagation_node and self.lxmrouter.autopeer: - node_timebase = data[1] - propagation_transfer_limit = None - if len(data) >= 3: - try: - propagation_transfer_limit = float(data[2]) - except: - propagation_transfer_limit = None + data = msgpack.unpackb(app_data) - if data[0] == True: - if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + if pn_announce_data_is_valid(data): + node_timebase = data[1] + propagation_transfer_limit = None + if len(data) >= 3: + try: + propagation_transfer_limit = float(data[2]) + except: + propagation_transfer_limit = None - elif data[0] == False: - self.lxmrouter.unpeer(destination_hash, node_timebase) + if data[0] == True: + if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + + elif data[0] == False: + self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index c0d3039..184e530 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -83,6 +83,7 @@ AM_CUSTOM = 0xFF # handle and operate on LXMF data in client programs # ########################################################## +import RNS import RNS.vendor.umsgpack as msgpack def display_name_from_app_data(app_data=None): if app_data == None: @@ -104,8 +105,8 @@ def display_name_from_app_data(app_data=None): try: decoded = dn.decode("utf-8") return decoded - except: - RNS.log("Could not decode display name in included announce data. The contained exception was: {e}", RNS.LOG_ERROR) + except Exception as e: + RNS.log(f"Could not decode display name in included announce data. The contained exception was: {e}", RNS.LOG_ERROR) return None # Original announce format @@ -127,4 +128,25 @@ def stamp_cost_from_app_data(app_data=None): # Original announce format else: - return None \ No newline at end of file + return None + +def pn_announce_data_is_valid(data): + try: + if type(data) == bytes: + data = msgpack.unpackb(data) + + if len(data) < 3: + raise ValueError("Invalid announce data: Insufficient peer data") + else: + if data[0] != True and data[0] != False: + raise ValueError("Invalid announce data: Indeterminate propagation node status") + try: + int(data[1]) + except: + raise ValueError("Invalid announce data: Could not decode peer timebase") + + except Exception as e: + RNS.log(f"Could not validate propagation node announce data: {e}", RNS.LOG_DEBUG) + return False + + return True \ No newline at end of file From c21da895b6510952e1ae3771c13c84b5b3b04ba4 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 23 Nov 2024 13:20:24 +0100 Subject: [PATCH 101/161] Improved duplicate message detection when syncing from multiple different PNs --- LXMF/LXMRouter.py | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 068dc44..743cfa8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -351,6 +351,7 @@ class LXMRouter: if self.outbound_propagation_node != None: if self.outbound_propagation_link != None and self.outbound_propagation_link.status == RNS.Link.ACTIVE: self.propagation_transfer_state = LXMRouter.PR_LINK_ESTABLISHED + RNS.log("Requesting message list from propagation node", RNS.LOG_DEBUG) self.outbound_propagation_link.identify(identity) self.outbound_propagation_link.request( LXMPeer.MESSAGE_GET_PATH, @@ -643,7 +644,7 @@ class LXMRouter: removed_entries = [] for transient_id in self.locally_delivered_transient_ids: timestamp = self.locally_delivered_transient_ids[transient_id] - if now > timestamp+LXMRouter.MESSAGE_EXPIRY*1.25: + if now > timestamp+LXMRouter.MESSAGE_EXPIRY*6.0: removed_entries.append(transient_id) for transient_id in removed_entries: @@ -653,7 +654,7 @@ class LXMRouter: removed_entries = [] for transient_id in self.locally_processed_transient_ids: timestamp = self.locally_processed_transient_ids[transient_id] - if now > timestamp+LXMRouter.MESSAGE_EXPIRY*1.25: + if now > timestamp+LXMRouter.MESSAGE_EXPIRY*6.0: removed_entries.append(transient_id) for transient_id in removed_entries: @@ -854,9 +855,8 @@ class LXMRouter: if not os.path.isdir(self.storagepath): os.makedirs(self.storagepath) - locally_delivered_file = open(self.storagepath+"/local_deliveries", "wb") - locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) - locally_delivered_file.close() + with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: + locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) except Exception as e: RNS.log("Could not save locally delivered message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -866,9 +866,8 @@ class LXMRouter: if not os.path.isdir(self.storagepath): os.makedirs(self.storagepath) - locally_processed_file = open(self.storagepath+"/locally_processed", "wb") - locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) - locally_processed_file.close() + with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: + locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) except Exception as e: RNS.log("Could not save locally processed transient ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1124,24 +1123,32 @@ class LXMRouter: wants = [] if len(request_receipt.response) > 0: for transient_id in request_receipt.response: - if not self.retain_synced_on_node and self.has_message(transient_id): - haves.append(transient_id) + if self.has_message(transient_id): + if not self.retain_synced_on_node: + haves.append(transient_id) else: if self.propagation_transfer_max_messages == LXMRouter.PR_ALL_MESSAGES or len(wants) < self.propagation_transfer_max_messages: wants.append(transient_id) + ms = "" if len(wants) == 1 else "s" + RNS.log(f"Requesting {len(wants)} message{ms} from propagation node", RNS.LOG_DEBUG) request_receipt.link.request( LXMPeer.MESSAGE_GET_PATH, [wants, haves, self.delivery_per_transfer_limit], response_callback=self.message_get_response, failed_callback=self.message_get_failed, - progress_callback=self.message_get_progress - ) + progress_callback=self.message_get_progress) + else: self.propagation_transfer_state = LXMRouter.PR_COMPLETE self.propagation_transfer_progress = 1.0 self.propagation_transfer_last_result = 0 + else: + RNS.log("Invalid message list data received from propagation node", RNS.LOG_DEBUG) + if self.outbound_propagation_link != None: + self.outbound_propagation_link.teardown() + def message_get_response(self, request_receipt): if request_receipt.response == LXMPeer.ERROR_NO_IDENTITY: RNS.log("Propagation node indicated missing identification on get request, tearing down link.", RNS.LOG_DEBUG) From 575fbc9ffe820feb5a92acc0c00b7a8582f603a0 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 23 Nov 2024 13:20:43 +0100 Subject: [PATCH 102/161] Updated dependencies --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 1fb47e9..bb56614 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.8.2'], + install_requires=['rns>=0.8.6'], python_requires='>=3.7', ) From 1a43d93da2d35e78e9e40fd6e84ac6c6bf237fa3 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 9 Dec 2024 18:16:12 +0100 Subject: [PATCH 103/161] Added message renderer field --- LXMF/LXMF.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/LXMF/LXMF.py b/LXMF/LXMF.py index 184e530..db0edb7 100644 --- a/LXMF/LXMF.py +++ b/LXMF/LXMF.py @@ -19,6 +19,7 @@ FIELD_GROUP = 0x0B FIELD_TICKET = 0x0C FIELD_EVENT = 0x0D FIELD_RNR_REFS = 0x0E +FIELD_RENDERER = 0x0F # For usecases such as including custom data structures, # embedding or encapsulating other data types or protocols @@ -77,6 +78,18 @@ AM_OPUS_LOSSLESS = 0x19 # determine it itself based on the included data. AM_CUSTOM = 0xFF +# Message renderer specifications for FIELD_RENDERER. +# The renderer specification is completely optional, +# and only serves as an indication to the receiving +# client on how to render the message contents. It is +# not mandatory to implement, either on sending or +# receiving sides, but is the recommended way to +# signal how to render a message, if non-plaintext +# formatting is used. +RENDERER_PLAIN = 0x00 +RENDERER_MICRON = 0x01 +RENDERER_MARKDOWN = 0x02 +RENDERER_BBCODE = 0x03 ########################################################## # The following helper functions makes it easier to # From c426c93cc5d63a3dae18ad2264b1299a7ad9e46c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 9 Dec 2024 22:10:17 +0100 Subject: [PATCH 104/161] Updated versions --- LXMF/_version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index fc0a843..40e294f 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.8" +__version__ = "0.5.9" diff --git a/setup.py b/setup.py index bb56614..6ee237c 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.8.6'], + install_requires=['rns>=0.8.7'], python_requires='>=3.7', ) From 96dddf1b3a0328f22b59ac7d4fcc8193d9884a44 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 23 Dec 2024 12:36:53 +0100 Subject: [PATCH 105/161] Added handling of corrupted transient ID cache files --- LXMF/LXMRouter.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 743cfa8..fa168ef 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -143,19 +143,31 @@ class LXMRouter: if os.path.isfile(self.storagepath+"/local_deliveries"): locally_delivered_file = open(self.storagepath+"/local_deliveries", "rb") data = locally_delivered_file.read() - self.locally_delivered_transient_ids = msgpack.unpackb(data) locally_delivered_file.close() - - if os.path.isfile(self.storagepath+"/locally_processed"): - locally_processed_file = open(self.storagepath+"/locally_processed", "rb") - data = locally_processed_file.read() - self.locally_processed_transient_ids = msgpack.unpackb(data) - locally_processed_file.close() - - self.clean_transient_id_caches() + self.locally_delivered_transient_ids = msgpack.unpackb(data) except Exception as e: RNS.log("Could not load locally delivered message ID cache from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + self.locally_delivered_transient_ids = {} + + try: + if os.path.isfile(self.storagepath+"/locally_processed"): + locally_processed_file = open(self.storagepath+"/locally_processed", "rb") + data = locally_processed_file.read() + locally_processed_file.close() + self.locally_processed_transient_ids = msgpack.unpackb(data) + + except Exception as e: + RNS.log("Could not load locally processed message ID cache from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + self.locally_processed_transient_ids = {} + + try: + self.clean_transient_id_caches() + + except Exception as e: + RNS.log("Could not clean transient ID caches. The contained exception was : "+str(e), RNS.LOG_ERROR) + self.locally_delivered_transient_ids = {} + self.locally_processed_transient_ids = {} try: if os.path.isfile(self.storagepath+"/outbound_stamp_costs"): From a6a42eff80cd469f734dbfd24991e5b0713e0666 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 13 Jan 2025 14:35:14 +0100 Subject: [PATCH 106/161] Add sync transfer rate to peer stats --- LXMF/LXMPeer.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 0962b26..a88f6da 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -47,6 +47,11 @@ class LXMPeer: peer.link_establishment_rate = dictionary["link_establishment_rate"] else: peer.link_establishment_rate = 0 + + if "sync_transfer_rate" in dictionary: + peer.sync_transfer_rate = dictionary["sync_transfer_rate"] + else: + peer.sync_transfer_rate = 0 if "propagation_transfer_limit" in dictionary: try: @@ -73,6 +78,7 @@ class LXMPeer: dictionary["last_heard"] = self.last_heard dictionary["destination_hash"] = self.destination_hash dictionary["link_establishment_rate"] = self.link_establishment_rate + dictionary["sync_transfer_rate"] = self.sync_transfer_rate dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit handled_ids = [] @@ -96,6 +102,7 @@ class LXMPeer: self.sync_backoff = 0 self.peering_timebase = 0 self.link_establishment_rate = 0 + self.sync_transfer_rate = 0 self.propagation_transfer_limit = None self.link = None @@ -257,6 +264,7 @@ class LXMPeer: data = msgpack.packb([time.time(), lxm_list]) resource = RNS.Resource(data, self.link, callback = self.resource_concluded) resource.transferred_messages = wanted_message_ids + resource.sync_transfer_started = time.time() self.state = LXMPeer.RESOURCE_TRANSFERRING else: @@ -289,7 +297,12 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - RNS.log("Sync to peer "+RNS.prettyhexrep(self.destination_hash)+" completed", RNS.LOG_DEBUG) + rate_str = "" + if hasattr(resource, "sync_transfer_started") and resource.sync_transfer_started: + self.sync_transfer_rate = (resource.get_transfer_size()*8)/(time.time()-resource.sync_transfer_started) + rate_str = f" at {RNS.prettyspeed(self.sync_transfer_rate)}" + + RNS.log("Sync to peer "+RNS.prettyhexrep(self.destination_hash)+" completed"+rate_str, RNS.LOG_DEBUG) self.alive = True self.last_heard = time.time() From 976305b791a1db8b11996fe8e6dd81cfdc4be863 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 13 Jan 2025 14:37:51 +0100 Subject: [PATCH 107/161] Sort waiting peers by sync transfer rate --- LXMF/LXMRouter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index fa168ef..5410a54 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1514,12 +1514,12 @@ class LXMRouter: if len(waiting_peers) > 0: fastest_peers = sorted( waiting_peers, - key=lambda p: p.link_establishment_rate, + key=lambda p: p.sync_transfer_rate, reverse=True )[0:min(LXMRouter.FASTEST_N_RANDOM_POOL, len(waiting_peers))] peer_pool.extend(fastest_peers) - unknown_speed_peers = [p for p in waiting_peers if p.link_establishment_rate == 0] + unknown_speed_peers = [p for p in waiting_peers if p.sync_transfer_rate == 0] if len(unknown_speed_peers) > 0: peer_pool.extend( unknown_speed_peers[ From 2d175a331fb259d0c0f01dd50907f58c28ccdf0d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 13 Jan 2025 15:26:27 +0100 Subject: [PATCH 108/161] Updated dependencies --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6ee237c..6d25b47 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.8.7'], + install_requires=['rns>=0.9.0'], python_requires='>=3.7', ) From d97c4f292e43dfe49b91fb457dab13ded15e9947 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 14 Jan 2025 21:32:10 +0100 Subject: [PATCH 109/161] Fixed missing checks for file corruption --- LXMF/LXMRouter.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 5410a54..64677a8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -145,6 +145,9 @@ class LXMRouter: data = locally_delivered_file.read() locally_delivered_file.close() self.locally_delivered_transient_ids = msgpack.unpackb(data) + if not type(self.locally_delivered_transient_ids) == dict: + RNS.log("Invalid data format for loaded locally delivered transient IDs, recreating...", RNS.LOG_ERROR) + self.locally_delivered_transient_ids = {} except Exception as e: RNS.log("Could not load locally delivered message ID cache from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -156,6 +159,10 @@ class LXMRouter: data = locally_processed_file.read() locally_processed_file.close() self.locally_processed_transient_ids = msgpack.unpackb(data) + if not type(self.locally_processed_transient_ids) == dict: + RNS.log("Invalid data format for loaded locally processed transient IDs, recreating...", RNS.LOG_ERROR) + self.locally_processed_transient_ids = {} + except Exception as e: RNS.log("Could not load locally processed message ID cache from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -175,6 +182,9 @@ class LXMRouter: with open(self.storagepath+"/outbound_stamp_costs", "rb") as outbound_stamp_cost_file: data = outbound_stamp_cost_file.read() self.outbound_stamp_costs = msgpack.unpackb(data) + if not type(self.outbound_stamp_costs) == dict: + RNS.log("Invalid data format for loaded outbound stamp costs, recreating...", RNS.LOG_ERROR) + self.outbound_stamp_costs = {} self.clean_outbound_stamp_costs() self.save_outbound_stamp_costs() From a67695411697e1899f417f23156115283014928f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 18 Jan 2025 19:13:43 +0100 Subject: [PATCH 110/161] Added ability to cancel outbound messages --- LXMF/LXMRouter.py | 45 ++++++++++++++++++++++++++++++++++++++++----- LXMF/LXMessage.py | 23 +++++++++++++---------- 2 files changed, 53 insertions(+), 15 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 64677a8..5c71f93 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1234,6 +1234,30 @@ class LXMRouter: else: return False + def cancel_outbound(self, message_id): + try: + lxmessage = None + for lxm in self.pending_outbound: + if lxm.message_id == message_id: + lxmessage = lxm + + if message_id in self.pending_deferred_stamps: + RNS.log(f"Cancelling deferred stamp generation for {lxmessage}", RNS.LOG_DEBUG) + + if lxmessage != None: + lxmessage.state = LXMessage.CANCELLED + if lxmessage in self.pending_outbound: + RNS.log(f"Cancelling {lxmessage} in outbound queue", RNS.LOG_DEBUG) + if lxmessage.representation == LXMessage.RESOURCE: + if lxmessage.resource_representation != None: + lxmessage.resource_representation.cancel() + + self.process_outbound() + + except Exception as e: + RNS.log(f"An error occurred while cancelling {lxmessage}: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + def handle_outbound(self, lxmessage): destination_hash = lxmessage.get_destination().hash @@ -1780,10 +1804,15 @@ class LXMRouter: self.pending_outbound.append(selected_lxm) RNS.log(f"Stamp generation completed for {selected_lxm}", RNS.LOG_DEBUG) else: - RNS.log(f"Deferred stamp generation did not succeed. Failing {selected_lxm}.", RNS.LOG_ERROR) - selected_lxm.stamp_generation_failed = True - self.pending_deferred_stamps.pop(selected_message_id) - self.fail_message(selected_lxm) + if selected_lxm.state == LXMessage.CANCELLED: + RNS.log(f"Message cancelled during deferred stamp generation for {selected_lxm}.", RNS.LOG_ERROR) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + else: + RNS.log(f"Deferred stamp generation did not succeed. Failing {selected_lxm}.", RNS.LOG_ERROR) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + self.fail_message(selected_lxm) def process_outbound(self, sender = None): @@ -1820,8 +1849,14 @@ class LXMRouter: RNS.log("Propagation has occurred for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) self.pending_outbound.remove(lxmessage) + elif lxmessage.state == LXMessage.CANCELLED: + RNS.log("Cancellation requested for "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) + self.pending_outbound.remove(lxmessage) + if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): + lxmessage.failed_callback(lxmessage) + else: - RNS.log("Starting outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) + RNS.log("Outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) if lxmessage.progress == None or lxmessage.progress < 0.01: lxmessage.progress = 0.01 diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 253085b..316e798 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -16,8 +16,9 @@ class LXMessage: SENDING = 0x02 SENT = 0x04 DELIVERED = 0x08 + CANCELLED = 0xFE FAILED = 0xFF - states = [GENERATING, OUTBOUND, SENDING, SENT, DELIVERED, FAILED] + states = [GENERATING, OUTBOUND, SENDING, SENT, DELIVERED, CANCELLED, FAILED] UNKNOWN = 0x00 PACKET = 0x01 @@ -564,22 +565,24 @@ class LXMessage: if resource.status == RNS.Resource.COMPLETE: self.__mark_delivered() else: - resource.link.teardown() - self.state = LXMessage.OUTBOUND + if self.state != LXMessage.CANCELLED: + resource.link.teardown() + self.state = LXMessage.OUTBOUND def __propagation_resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: self.__mark_propagated() else: - resource.link.teardown() - self.state = LXMessage.OUTBOUND + if self.state != LXMessage.CANCELLED: + resource.link.teardown() + self.state = LXMessage.OUTBOUND def __link_packet_timed_out(self, packet_receipt): - if packet_receipt: - packet_receipt.destination.teardown() - - self.state = LXMessage.OUTBOUND - + if self.state != LXMessage.CANCELLED: + if packet_receipt: + packet_receipt.destination.teardown() + + self.state = LXMessage.OUTBOUND def __update_transfer_progress(self, resource): self.progress = 0.10 + (resource.get_progress()*0.90) From d6b1b9c94dc245b3255f3d5b5830ede1b5f97ef7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 18 Jan 2025 20:11:31 +0100 Subject: [PATCH 111/161] Added ability to cancel stamp generation --- LXMF/LXMRouter.py | 24 ++++++++++++--- LXMF/LXStamper.py | 66 ++++++++++++++++++++++++++++++++++------ docs/example_receiver.py | 2 +- 3 files changed, 78 insertions(+), 14 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 5c71f93..2ec4e8e 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -16,6 +16,8 @@ from .LXMessage import LXMessage from .Handlers import LXMFDeliveryAnnounceHandler from .Handlers import LXMFPropagationAnnounceHandler +import LXMF.LXStamper as LXStamper + class LXMRouter: MAX_DELIVERY_ATTEMPTS = 5 PROCESSING_INTERVAL = 4 @@ -1236,14 +1238,17 @@ class LXMRouter: def cancel_outbound(self, message_id): try: + if message_id in self.pending_deferred_stamps: + lxm = self.pending_deferred_stamps[message_id] + RNS.log(f"Cancelling deferred stamp generation for {lxm}", RNS.LOG_DEBUG) + lxm.state = LXMessage.CANCELLED + LXStamper.cancel_work(message_id) + lxmessage = None for lxm in self.pending_outbound: if lxm.message_id == message_id: lxmessage = lxm - if message_id in self.pending_deferred_stamps: - RNS.log(f"Cancelling deferred stamp generation for {lxmessage}", RNS.LOG_DEBUG) - if lxmessage != None: lxmessage.state = LXMessage.CANCELLED if lxmessage in self.pending_outbound: @@ -1793,6 +1798,15 @@ class LXMRouter: selected_message_id = message_id if selected_lxm != None: + if selected_lxm.state == LXMessage.CANCELLED: + RNS.log(f"Message cancelled during deferred stamp generation for {selected_lxm}.", RNS.LOG_DEBUG) + selected_lxm.stamp_generation_failed = True + self.pending_deferred_stamps.pop(selected_message_id) + if selected_lxm.failed_callback != None and callable(selected_lxm.failed_callback): + selected_lxm.failed_callback(lxmessage) + + return + RNS.log(f"Starting stamp generation for {selected_lxm}...", RNS.LOG_DEBUG) generated_stamp = selected_lxm.get_stamp() if generated_stamp: @@ -1805,9 +1819,11 @@ class LXMRouter: RNS.log(f"Stamp generation completed for {selected_lxm}", RNS.LOG_DEBUG) else: if selected_lxm.state == LXMessage.CANCELLED: - RNS.log(f"Message cancelled during deferred stamp generation for {selected_lxm}.", RNS.LOG_ERROR) + RNS.log(f"Message cancelled during deferred stamp generation for {selected_lxm}.", RNS.LOG_DEBUG) selected_lxm.stamp_generation_failed = True self.pending_deferred_stamps.pop(selected_message_id) + if selected_lxm.failed_callback != None and callable(selected_lxm.failed_callback): + selected_lxm.failed_callback(lxmessage) else: RNS.log(f"Deferred stamp generation did not succeed. Failing {selected_lxm}.", RNS.LOG_ERROR) selected_lxm.stamp_generation_failed = True diff --git a/LXMF/LXStamper.py b/LXMF/LXStamper.py index 2023ec0..bcfa95b 100644 --- a/LXMF/LXStamper.py +++ b/LXMF/LXStamper.py @@ -7,6 +7,8 @@ import multiprocessing WORKBLOCK_EXPAND_ROUNDS = 3000 +active_jobs = {} + def stamp_workblock(message_id): wb_st = time.time() expand_rounds = WORKBLOCK_EXPAND_ROUNDS @@ -44,23 +46,56 @@ def generate_stamp(message_id, stamp_cost): value = 0 if RNS.vendor.platformutils.is_windows() or RNS.vendor.platformutils.is_darwin(): - stamp, rounds = job_simple(stamp_cost, workblock) + stamp, rounds = job_simple(stamp_cost, workblock, message_id) elif RNS.vendor.platformutils.is_android(): - stamp, rounds = job_android(stamp_cost, workblock) + stamp, rounds = job_android(stamp_cost, workblock, message_id) else: - stamp, rounds = job_linux(stamp_cost, workblock) + stamp, rounds = job_linux(stamp_cost, workblock, message_id) duration = time.time() - start_time speed = rounds/duration - value = stamp_value(workblock, stamp) + if stamp != None: + value = stamp_value(workblock, stamp) RNS.log(f"Stamp with value {value} generated in {RNS.prettytime(duration)}, {rounds} rounds, {int(speed)} rounds per second", RNS.LOG_DEBUG) return stamp, value -def job_simple(stamp_cost, workblock): +def cancel_work(message_id): + if RNS.vendor.platformutils.is_windows() or RNS.vendor.platformutils.is_darwin(): + try: + if message_id in active_jobs: + active_jobs[message_id] = True + + except Exception as e: + RNS.log("Error while terminating stamp generation workers: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + elif RNS.vendor.platformutils.is_android(): + try: + if message_id in active_jobs: + active_jobs[message_id] = True + + except Exception as e: + RNS.log("Error while terminating stamp generation workers: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + + else: + try: + if message_id in active_jobs: + stop_event = active_jobs[message_id][0] + result_queue = active_jobs[message_id][1] + stop_event.set() + result_queue.put(None) + active_jobs.pop(message_id) + + except Exception as e: + RNS.log("Error while terminating stamp generation workers: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + +def job_simple(stamp_cost, workblock, message_id): # A simple, single-process stamp generator. # should work on any platform, and is used # as a fall-back, in case of limited multi- @@ -73,6 +108,8 @@ def job_simple(stamp_cost, workblock): pstamp = os.urandom(256//8) st = time.time() + active_jobs[message_id] = False; + def sv(s, c, w): target = 0b1<<256-c; m = w+s result = RNS.Identity.full_hash(m) @@ -81,15 +118,20 @@ def job_simple(stamp_cost, workblock): else: return True - while not sv(pstamp, stamp_cost, workblock): + while not sv(pstamp, stamp_cost, workblock) and not active_jobs[message_id]: pstamp = os.urandom(256//8); rounds += 1 if rounds % 2500 == 0: speed = rounds / (time.time()-st) RNS.log(f"Stamp generation running. {rounds} rounds completed so far, {int(speed)} rounds per second", RNS.LOG_DEBUG) + if active_jobs[message_id] == True: + pstamp = None + + active_jobs.pop(message_id) + return pstamp, rounds -def job_linux(stamp_cost, workblock): +def job_linux(stamp_cost, workblock, message_id): allow_kill = True stamp = None total_rounds = 0 @@ -126,6 +168,8 @@ def job_linux(stamp_cost, workblock): job_procs.append(process) process.start() + active_jobs[message_id] = [stop_event, result_queue] + stamp = result_queue.get() RNS.log("Got stamp result from worker", RNS.LOG_DEBUG) # TODO: Remove @@ -170,7 +214,7 @@ def job_linux(stamp_cost, workblock): return stamp, total_rounds -def job_android(stamp_cost, workblock): +def job_android(stamp_cost, workblock, message_id): # Semaphore support is flaky to non-existent on # Android, so we need to manually dispatch and # manage workloads here, while periodically @@ -230,10 +274,12 @@ def job_android(stamp_cost, workblock): RNS.log(f"Stamp generation worker error: {e}", RNS.LOG_ERROR) RNS.trace_exception(e) + active_jobs[message_id] = False; + RNS.log(f"Dispatching {jobs} workers for stamp generation...", RNS.LOG_DEBUG) # TODO: Remove results_dict = wm.dict() - while stamp == None: + while stamp == None and active_jobs[message_id] == False: job_procs = [] try: for pnum in range(jobs): @@ -260,6 +306,8 @@ def job_android(stamp_cost, workblock): RNS.log(f"Stamp generation job error: {e}") RNS.trace_exception(e) + active_jobs.pop(message_id) + return stamp, total_rounds if __name__ == "__main__": diff --git a/docs/example_receiver.py b/docs/example_receiver.py index 9bf1c61..20c2efe 100644 --- a/docs/example_receiver.py +++ b/docs/example_receiver.py @@ -69,4 +69,4 @@ while True: # input() # RNS.log("Requesting messages from propagation node...") - # router.request_messages_from_propagation_node(identity) \ No newline at end of file + # router.request_messages_from_propagation_node(identity) From 3948c9a1875417179b1d0ec4c1298ea0230a422f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 18 Jan 2025 21:36:08 +0100 Subject: [PATCH 112/161] Added message reject on too large transfer size --- LXMF/LXMRouter.py | 10 +++++++++- LXMF/LXMessage.py | 8 ++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 2ec4e8e..79678c6 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1777,7 +1777,9 @@ class LXMRouter: self.failed_outbound.append(lxmessage) - lxmessage.state = LXMessage.FAILED + if lxmessage.state != LXMessage.REJECTED: + lxmessage.state = LXMessage.FAILED + if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): lxmessage.failed_callback(lxmessage) @@ -1871,6 +1873,12 @@ class LXMRouter: if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): lxmessage.failed_callback(lxmessage) + elif lxmessage.state == LXMessage.REJECTED: + RNS.log("Receiver rejected "+str(lxmessage)+", removing from outbound queue", RNS.LOG_DEBUG) + self.pending_outbound.remove(lxmessage) + if lxmessage.failed_callback != None and callable(lxmessage.failed_callback): + lxmessage.failed_callback(lxmessage) + else: RNS.log("Outbound processing for "+str(lxmessage)+" to "+RNS.prettyhexrep(lxmessage.get_destination().hash), RNS.LOG_DEBUG) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 316e798..2342708 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -16,9 +16,10 @@ class LXMessage: SENDING = 0x02 SENT = 0x04 DELIVERED = 0x08 + REJECTED = 0xFD CANCELLED = 0xFE FAILED = 0xFF - states = [GENERATING, OUTBOUND, SENDING, SENT, DELIVERED, CANCELLED, FAILED] + states = [GENERATING, OUTBOUND, SENDING, SENT, DELIVERED, REJECTED, CANCELLED, FAILED] UNKNOWN = 0x00 PACKET = 0x01 @@ -565,7 +566,10 @@ class LXMessage: if resource.status == RNS.Resource.COMPLETE: self.__mark_delivered() else: - if self.state != LXMessage.CANCELLED: + if resource.status == RNS.Resource.REJECTED: + self.state = LXMessage.REJECTED + + elif self.state != LXMessage.CANCELLED: resource.link.teardown() self.state = LXMessage.OUTBOUND From 7bd3cf986d490eef2d1b0ec15e42e62e84b37a0d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sat, 18 Jan 2025 21:39:39 +0100 Subject: [PATCH 113/161] Updated versions --- LXMF/_version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 40e294f..906d362 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.5.9" +__version__ = "0.6.0" diff --git a/setup.py b/setup.py index 6d25b47..cabf20a 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.9.0'], + install_requires=['rns>=0.9.1'], python_requires='>=3.7', ) From 356cb6412fbfda050dd37d8a680bb6b13351b52f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 10:46:59 +0100 Subject: [PATCH 114/161] Optimise structure overhead --- LXMF/LXMPeer.py | 30 +++++++++++++++++------------- LXMF/LXMRouter.py | 14 +++++++++++++- 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a88f6da..2b10987 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -63,12 +63,13 @@ class LXMPeer: for transient_id in dictionary["handled_ids"]: if transient_id in router.propagation_entries: - peer.handled_messages[transient_id] = router.propagation_entries[transient_id] + peer.handled_messages.append(transient_id) for transient_id in dictionary["unhandled_ids"]: if transient_id in router.propagation_entries: - peer.unhandled_messages[transient_id] = router.propagation_entries[transient_id] + peer.unhandled_messages.append(transient_id) + del dictionary return peer def to_bytes(self): @@ -108,8 +109,8 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - self.unhandled_messages = {} - self.handled_messages = {} + self.unhandled_messages = [] + self.handled_messages = [] self.last_offer = [] self.router = router @@ -118,6 +119,7 @@ class LXMPeer: if self.identity != None: self.destination = RNS.Destination(self.identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") else: + self.destination = None RNS.log(f"Could not recall identity for LXMF propagation peer {RNS.prettyhexrep(self.destination_hash)}, will retry identity resolution on next sync", RNS.LOG_WARNING) def sync(self): @@ -171,7 +173,7 @@ class LXMPeer: for transient_id in purged_ids: RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) - self.unhandled_messages.pop(transient_id) + self.unhandled_messages.remove(transient_id) unhandled_entries.sort(key=lambda e: e[1], reverse=False) per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now @@ -189,7 +191,7 @@ class LXMPeer: RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) self.last_offer = unhandled_ids - self.link.request(LXMPeer.OFFER_REQUEST_PATH, self.last_offer, response_callback=self.offer_response, failed_callback=self.request_failed) + self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT else: @@ -226,13 +228,14 @@ class LXMPeer: # Peer already has all advertised messages for transient_id in self.last_offer: if transient_id in self.unhandled_messages: - self.handled_messages[transient_id] = self.unhandled_messages.pop(transient_id) + self.handled_messages.append(transient_id) + self.unhandled_messages.remove(transient_id) elif response == True: # Peer wants all advertised messages for transient_id in self.last_offer: - wanted_messages.append(self.unhandled_messages[transient_id]) + wanted_messages.append(self.router.propagation_entries[transient_id]) wanted_message_ids.append(transient_id) else: @@ -242,10 +245,11 @@ class LXMPeer: # already received it from another peer. if not transient_id in response: if transient_id in self.unhandled_messages: - self.handled_messages[transient_id] = self.unhandled_messages.pop(transient_id) + self.handled_messages.append(transient_id) + self.unhandled_messages.remove(transient_id) for transient_id in response: - wanted_messages.append(self.unhandled_messages[transient_id]) + wanted_messages.append(self.router.propagation_entries[transient_id]) wanted_message_ids.append(transient_id) if len(wanted_messages) > 0: @@ -288,8 +292,8 @@ class LXMPeer: def resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: for transient_id in resource.transferred_messages: - message = self.unhandled_messages.pop(transient_id) - self.handled_messages[transient_id] = message + self.handled_messages.append(transient_id) + self.unhandled_messages.remove(transient_id) if self.link != None: self.link.teardown() @@ -330,7 +334,7 @@ class LXMPeer: def handle_message(self, transient_id): if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - self.unhandled_messages[transient_id] = self.router.propagation_entries[transient_id] + self.unhandled_messages.append(transient_id) def __str__(self): if self.destination_hash: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 79678c6..a19f401 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1,5 +1,6 @@ import os import time +import math import random import base64 import atexit @@ -427,6 +428,8 @@ class LXMRouter: os.makedirs(self.messagepath) self.propagation_entries = {} + + st = time.time(); RNS.log("Indexing messagestore...", RNS.LOG_NOTICE) for filename in os.listdir(self.messagepath): components = filename.split("_") if len(components) == 2: @@ -452,9 +455,13 @@ class LXMRouter: except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) + et = time.time(); RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {math.floor(len(self.propagation_entries)/(et-st))} msgs/s", RNS.LOG_NOTICE) + st = time.time(); RNS.log("Loading propagation node peers...", RNS.LOG_NOTICE) + if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") peers_data = peers_file.read() + peers_file.close() if len(peers_data) > 0: serialised_peers = msgpack.unpackb(peers_data) @@ -468,8 +475,13 @@ class LXMRouter: lim_str = ", "+RNS.prettysize(peer.propagation_transfer_limit*1000)+" transfer limit" RNS.log("Loaded peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(len(peer.unhandled_messages))+" unhandled messages"+lim_str, RNS.LOG_DEBUG) else: + del peer RNS.log("Peer "+RNS.prettyhexrep(peer.destination_hash)+" could not be loaded, because its identity could not be recalled. Dropping peer.", RNS.LOG_DEBUG) + del serialised_peers + del peers_data + + RNS.log(f"Loaded {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) self.propagation_node = True self.propagation_destination.set_link_established_callback(self.propagation_link_established) @@ -1676,7 +1688,7 @@ class LXMRouter: if remote_hash != None and remote_hash in self.peers: transient_id = RNS.Identity.full_hash(lxmf_data) peer = self.peers[remote_hash] - peer.handled_messages[transient_id] = [transient_id, remote_timebase, lxmf_data] + peer.handled_messages.append(transient_id) self.lxmf_propagation(lxmf_data) else: From 7701f326d99b20bfed3d64c3a80809e02755a06f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:33:39 +0100 Subject: [PATCH 115/161] Memory optimisations --- LXMF/LXMPeer.py | 120 +++++++++++++++++++++++++++++++------ LXMF/LXMRouter.py | 149 +++++++++++++++++++++++++++++++++------------- 2 files changed, 209 insertions(+), 60 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 2b10987..f4c522c 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -38,11 +38,16 @@ class LXMPeer: @staticmethod def from_bytes(peer_bytes, router): dictionary = msgpack.unpackb(peer_bytes) + peer_destination_hash = dictionary["destination_hash"] + peer_peering_timebase = dictionary["peering_timebase"] + peer_alive = dictionary["alive"] + peer_last_heard = dictionary["last_heard"] + + peer = LXMPeer(router, peer_destination_hash) + peer.peering_timebase = peer_peering_timebase + peer.alive = peer_alive + peer.last_heard = peer_last_heard - peer = LXMPeer(router, dictionary["destination_hash"]) - peer.peering_timebase = dictionary["peering_timebase"] - peer.alive = dictionary["alive"] - peer.last_heard = dictionary["last_heard"] if "link_establishment_rate" in dictionary: peer.link_establishment_rate = dictionary["link_establishment_rate"] else: @@ -61,13 +66,22 @@ class LXMPeer: else: peer.propagation_transfer_limit = None + hm_count = 0 for transient_id in dictionary["handled_ids"]: if transient_id in router.propagation_entries: - peer.handled_messages.append(transient_id) + peer.add_handled_message(transient_id) + hm_count += 1 + um_count = 0 for transient_id in dictionary["unhandled_ids"]: if transient_id in router.propagation_entries: - peer.unhandled_messages.append(transient_id) + peer.add_unhandled_message(transient_id) + um_count += 1 + + peer._hm_count = hm_count + peer._um_count = um_count + peer._hm_counts_synced = True + peer._um_counts_synced = True del dictionary return peer @@ -93,7 +107,10 @@ class LXMPeer: dictionary["handled_ids"] = handled_ids dictionary["unhandled_ids"] = unhandled_ids - return msgpack.packb(dictionary) + peer_bytes = msgpack.packb(dictionary) + del dictionary + + return peer_bytes def __init__(self, router, destination_hash): self.alive = False @@ -106,11 +123,14 @@ class LXMPeer: self.sync_transfer_rate = 0 self.propagation_transfer_limit = None + self._hm_count = 0 + self._um_count = 0 + self._hm_counts_synced = False + self._um_counts_synced = False + self.link = None self.state = LXMPeer.IDLE - self.unhandled_messages = [] - self.handled_messages = [] self.last_offer = [] self.router = router @@ -173,7 +193,7 @@ class LXMPeer: for transient_id in purged_ids: RNS.log("Dropping unhandled message "+RNS.prettyhexrep(transient_id)+" for peer "+RNS.prettyhexrep(self.destination_hash)+" since it no longer exists in the message store.", RNS.LOG_DEBUG) - self.unhandled_messages.remove(transient_id) + self.remove_unhandled_message(transient_id) unhandled_entries.sort(key=lambda e: e[1], reverse=False) per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now @@ -228,8 +248,8 @@ class LXMPeer: # Peer already has all advertised messages for transient_id in self.last_offer: if transient_id in self.unhandled_messages: - self.handled_messages.append(transient_id) - self.unhandled_messages.remove(transient_id) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) elif response == True: @@ -244,9 +264,8 @@ class LXMPeer: # If the peer did not want the message, it has # already received it from another peer. if not transient_id in response: - if transient_id in self.unhandled_messages: - self.handled_messages.append(transient_id) - self.unhandled_messages.remove(transient_id) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) for transient_id in response: wanted_messages.append(self.router.propagation_entries[transient_id]) @@ -292,8 +311,8 @@ class LXMPeer: def resource_concluded(self, resource): if resource.status == RNS.Resource.COMPLETE: for transient_id in resource.transferred_messages: - self.handled_messages.append(transient_id) - self.unhandled_messages.remove(transient_id) + self.add_handled_message(transient_id) + self.remove_unhandled_message(transient_id) if self.link != None: self.link.teardown() @@ -332,9 +351,72 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - def handle_message(self, transient_id): + def new_propagation_message(self, transient_id): if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - self.unhandled_messages.append(transient_id) + self.add_unhandled_message(transient_id) + + @property + def handled_messages(self): + pes = self.router.propagation_entries.copy() + hm = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][4], pes)) + self._hm_count = len(hm); del pes + return hm + + @property + def unhandled_messages(self): + pes = self.router.propagation_entries.copy() + um = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][5], pes)) + self._um_count = len(um); del pes + return um + + @property + def handled_message_count(self): + if not self._hm_counts_synced: + self._update_counts() + + return self._hm_count + + @property + def unhandled_message_count(self): + if not self._um_counts_synced: + self._update_counts() + + return self._um_count + + def _update_counts(self): + if not self._hm_counts_synced: + RNS.log("UPDATE HM COUNTS") + hm = self.handled_messages; del hm + self._hm_counts_synced = True + + if not self._um_counts_synced: + RNS.log("UPDATE UM COUNTS") + um = self.unhandled_messages; del um + self._um_counts_synced = True + + def add_handled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if not self.destination_hash in self.router.propagation_entries[transient_id][4]: + self.router.propagation_entries[transient_id][4].append(self.destination_hash) + self._hm_counts_synced = False + + def add_unhandled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if not self.destination_hash in self.router.propagation_entries[transient_id][5]: + self.router.propagation_entries[transient_id][5].append(self.destination_hash) + self._um_count += 1 + + def remove_handled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if self.destination_hash in self.router.propagation_entries[transient_id][4]: + self.router.propagation_entries[transient_id][4].remove(self.destination_hash) + self._hm_counts_synced = False + + def remove_unhandled_message(self, transient_id): + if transient_id in self.router.propagation_entries: + if self.destination_hash in self.router.propagation_entries[transient_id][5]: + self.router.propagation_entries[transient_id][5].remove(self.destination_hash) + self._um_counts_synced = False def __str__(self): if self.destination_hash: diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index a19f401..9163824 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1,9 +1,11 @@ import os +import sys import time import math import random import base64 import atexit +import signal import threading import RNS @@ -94,6 +96,9 @@ class LXMRouter: self.outbound_propagation_node = None self.outbound_propagation_link = None + if delivery_limit == None: + delivery_limit = LXMRouter.DELIVERY_LIMIT + self.message_storage_limit = None self.information_storage_limit = None self.propagation_per_transfer_limit = propagation_limit @@ -117,6 +122,7 @@ class LXMRouter: self.cost_file_lock = threading.Lock() self.ticket_file_lock = threading.Lock() self.stamp_gen_lock = threading.Lock() + self.exit_handler_running = False if identity == None: identity = RNS.Identity() @@ -221,6 +227,8 @@ class LXMRouter: RNS.log("Could not load outbound stamp costs from storage. The contained exception was: "+str(e), RNS.LOG_ERROR) atexit.register(self.exit_handler) + signal.signal(signal.SIGINT, self.sigint_handler) + signal.signal(signal.SIGTERM, self.sigterm_handler) job_thread = threading.Thread(target=self.jobloop) job_thread.setDaemon(True) @@ -446,17 +454,19 @@ class LXMRouter: file.close() self.propagation_entries[transient_id] = [ - destination_hash, - filepath, - received, - msg_size, + destination_hash, # 0: Destination hash + filepath, # 1: Storage location + received, # 2: Receive timestamp + msg_size, # 3: Message size + [], # 4: Handled peers + [], # 5: Unhandled peers ] except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) et = time.time(); RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {math.floor(len(self.propagation_entries)/(et-st))} msgs/s", RNS.LOG_NOTICE) - st = time.time(); RNS.log("Loading propagation node peers...", RNS.LOG_NOTICE) + st = time.time(); RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") @@ -465,23 +475,25 @@ class LXMRouter: if len(peers_data) > 0: serialised_peers = msgpack.unpackb(peers_data) + del peers_data - for serialised_peer in serialised_peers: + while len(serialised_peers) > 0: + serialised_peer = serialised_peers.pop() peer = LXMPeer.from_bytes(serialised_peer, self) + del serialised_peer if peer.identity != None: self.peers[peer.destination_hash] = peer lim_str = ", no transfer limit" if peer.propagation_transfer_limit != None: lim_str = ", "+RNS.prettysize(peer.propagation_transfer_limit*1000)+" transfer limit" - RNS.log("Loaded peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(len(peer.unhandled_messages))+" unhandled messages"+lim_str, RNS.LOG_DEBUG) + RNS.log("Rebuilt peer "+RNS.prettyhexrep(peer.destination_hash)+" with "+str(peer.unhandled_message_count)+" unhandled messages"+lim_str, RNS.LOG_DEBUG) else: - del peer RNS.log("Peer "+RNS.prettyhexrep(peer.destination_hash)+" could not be loaded, because its identity could not be recalled. Dropping peer.", RNS.LOG_DEBUG) + del peer del serialised_peers - del peers_data - RNS.log(f"Loaded {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) self.propagation_node = True self.propagation_destination.set_link_established_callback(self.propagation_link_established) @@ -602,36 +614,37 @@ class LXMRouter: JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 def jobs(self): - self.processing_count += 1 + if not self.exit_handler_running: + self.processing_count += 1 - if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0: - self.process_outbound() + if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0: + self.process_outbound() - if self.processing_count % LXMRouter.JOB_STAMPS_INTERVAL == 0: - threading.Thread(target=self.process_deferred_stamps, daemon=True).start() + if self.processing_count % LXMRouter.JOB_STAMPS_INTERVAL == 0: + threading.Thread(target=self.process_deferred_stamps, daemon=True).start() - if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0: - self.clean_links() + if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0: + self.clean_links() - if self.processing_count % LXMRouter.JOB_TRANSIENT_INTERVAL == 0: - self.clean_transient_id_caches() + if self.processing_count % LXMRouter.JOB_TRANSIENT_INTERVAL == 0: + self.clean_transient_id_caches() - if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: - self.clean_message_store() + if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: + self.clean_message_store() - if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: - self.sync_peers() + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: + self.sync_peers() def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual # triggers can delay next run - try: self.jobs() except Exception as e: RNS.log("An error ocurred while running LXMF Router jobs.", RNS.LOG_ERROR) RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR) + RNS.trace_exception(e) time.sleep(LXMRouter.PROCESSING_INTERVAL) def clean_links(self): @@ -888,22 +901,24 @@ class LXMRouter: def save_locally_delivered_transient_ids(self): try: - if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + if len(self.locally_delivered_transient_ids) > 0: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) - with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: - locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) + with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: + locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) except Exception as e: RNS.log("Could not save locally delivered message ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) def save_locally_processed_transient_ids(self): try: - if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + if len(self.locally_processed_transient_ids) > 0: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) - with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: - locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) + with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: + locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) except Exception as e: RNS.log("Could not save locally processed transient ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1001,10 +1016,43 @@ class LXMRouter: RNS.log(f"An error occurred while reloading available tickets from storage: {e}", RNS.LOG_ERROR) def exit_handler(self): + if self.exit_handler_running: + return + + self.exit_handler_running = True + + RNS.log("Tearing down delivery destinations...", RNS.LOG_NOTICE) + for destination_hash in self.delivery_destinations: + delivery_destination = self.delivery_destinations[destination_hash] + delivery_destination.set_packet_callback(None) + delivery_destination.set_link_established_callback(None) + for link in delivery_destination.links: + try: + if link.status == RNS.Link.ACTIVE: + link.teardown() + except Exception as e: + RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) + + if self.propagation_node: + RNS.log("Tearing down propagation node destination...", RNS.LOG_NOTICE) + self.propagation_destination.set_link_established_callback(None) + self.propagation_destination.set_packet_callback(None) + self.propagation_destination.deregister_request_handler(LXMPeer.OFFER_REQUEST_PATH) + self.propagation_destination.deregister_request_handler(LXMPeer.MESSAGE_GET_PATH) + for link in self.active_propagation_links: + try: + if link.status == RNS.Link.ACTIVE: + link.teardown() + except Exception as e: + RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) + + RNS.log("Persisting LXMF state data to storage...", RNS.LOG_NOTICE) if self.propagation_node: try: + st = time.time(); RNS.log("Saving peer synchronisation states to storage...", RNS.LOG_NOTICE) serialised_peers = [] - for peer_id in self.peers: + peer_dict = self.peers.copy() + for peer_id in peer_dict: peer = self.peers[peer_id] serialised_peers.append(peer.to_bytes()) @@ -1012,7 +1060,7 @@ class LXMRouter: peers_file.write(msgpack.packb(serialised_peers)) peers_file.close() - RNS.log("Saved "+str(len(serialised_peers))+" peers to storage", RNS.LOG_DEBUG) + RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) except Exception as e: RNS.log("Could not save propagation node peers to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1020,6 +1068,20 @@ class LXMRouter: self.save_locally_delivered_transient_ids() self.save_locally_processed_transient_ids() + def sigint_handler(self, signal, frame): + if not self.exit_handler_running: + RNS.log("Received SIGINT, shutting down now!", RNS.LOG_WARNING) + sys.exit(0) + else: + RNS.log("Received SIGINT, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) + + def sigterm_handler(self, signal, frame): + if not self.exit_handler_running: + RNS.log("Received SIGTERM, shutting down now!", RNS.LOG_WARNING) + sys.exit(0) + else: + RNS.log("Received SIGTERM, but exit handler is running, keeping process alive until storage persist is complete", RNS.LOG_WARNING) + def __str__(self): return "" @@ -1685,19 +1747,23 @@ class LXMRouter: messages = data[1] for lxmf_data in messages: + peer = None + transient_id = RNS.Identity.full_hash(lxmf_data) if remote_hash != None and remote_hash in self.peers: - transient_id = RNS.Identity.full_hash(lxmf_data) peer = self.peers[remote_hash] - peer.handled_messages.append(transient_id) - self.lxmf_propagation(lxmf_data) + self.lxmf_propagation(lxmf_data, from_peer=peer) + if peer != None: + peer.add_handled_message(transient_id) + else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) except Exception as e: RNS.log("Error while unpacking received propagation resource", RNS.LOG_DEBUG) + RNS.trace_exception(e) - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False): + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False, from_peer=None): no_stamp_enforcement = False if is_paper_message: no_stamp_enforcement = True @@ -1708,7 +1774,6 @@ class LXMRouter: if not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids: received = time.time() - propagation_entry = [transient_id, received, lxmf_data] destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] self.locally_processed_transient_ids[transient_id] = received @@ -1732,12 +1797,13 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data)] + self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) for peer_id in self.peers: peer = self.peers[peer_id] - peer.handle_message(transient_id) + if peer != from_peer: + peer.new_propagation_message(transient_id) else: # TODO: Add message to sneakernet queues when implemented @@ -1757,6 +1823,7 @@ class LXMRouter: except Exception as e: RNS.log("Could not assemble propagated LXMF message from received data", RNS.LOG_DEBUG) RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG) + RNS.trace_exception(e) return False def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None): From 44d1d992f8a9be4b81c5f6b302f6f48b1e46e161 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:34:00 +0100 Subject: [PATCH 116/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 906d362..43c4ab0 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.0" +__version__ = "0.6.1" From bfed126a7c17fd90551204afd0bbab3fac1441f9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:44:24 +0100 Subject: [PATCH 117/161] Memory optimisations --- LXMF/LXMPeer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index f4c522c..d133027 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -358,14 +358,14 @@ class LXMPeer: @property def handled_messages(self): pes = self.router.propagation_entries.copy() - hm = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][4], pes)) + hm = list(filter(lambda tid: self.destination_hash in pes[tid][4], pes)) self._hm_count = len(hm); del pes return hm @property def unhandled_messages(self): pes = self.router.propagation_entries.copy() - um = list(filter(lambda tid: self.destination_hash in self.router.propagation_entries[tid][5], pes)) + um = list(filter(lambda tid: self.destination_hash in pes[tid][5], pes)) self._um_count = len(um); del pes return um From 1c9c74410790188db976dbecdff3b994d33ac5d9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 16:51:25 +0100 Subject: [PATCH 118/161] Memory optimisations --- LXMF/LXMPeer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index d133027..add54da 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -360,6 +360,7 @@ class LXMPeer: pes = self.router.propagation_entries.copy() hm = list(filter(lambda tid: self.destination_hash in pes[tid][4], pes)) self._hm_count = len(hm); del pes + self._hm_counts_synced = True return hm @property @@ -367,6 +368,7 @@ class LXMPeer: pes = self.router.propagation_entries.copy() um = list(filter(lambda tid: self.destination_hash in pes[tid][5], pes)) self._um_count = len(um); del pes + self._um_counts_synced = True return um @property @@ -387,12 +389,10 @@ class LXMPeer: if not self._hm_counts_synced: RNS.log("UPDATE HM COUNTS") hm = self.handled_messages; del hm - self._hm_counts_synced = True if not self._um_counts_synced: RNS.log("UPDATE UM COUNTS") um = self.unhandled_messages; del um - self._um_counts_synced = True def add_handled_message(self, transient_id): if transient_id in self.router.propagation_entries: From 1430b1ce90b989e9627d07841b5634e6f3a1f8e1 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 20:20:39 +0100 Subject: [PATCH 119/161] Enqueue and batch process distribution queue mappings --- LXMF/LXMPeer.py | 40 ++++++++++++++++++++++++++++++----- LXMF/LXMRouter.py | 53 +++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 79 insertions(+), 14 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index add54da..74a40c7 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -4,6 +4,7 @@ import time import RNS import RNS.vendor.umsgpack as msgpack +from collections import deque from .LXMF import APP_NAME class LXMPeer: @@ -122,6 +123,8 @@ class LXMPeer: self.link_establishment_rate = 0 self.sync_transfer_rate = 0 self.propagation_transfer_limit = None + self.handled_messages_queue = deque() + self.unhandled_messages_queue = deque() self._hm_count = 0 self._um_count = 0 @@ -351,9 +354,38 @@ class LXMPeer: self.link = None self.state = LXMPeer.IDLE - def new_propagation_message(self, transient_id): - if not transient_id in self.handled_messages and not transient_id in self.unhandled_messages: - self.add_unhandled_message(transient_id) + def queued_items(self): + return len(self.handled_messages_queue) > 0 or len(self.unhandled_messages_queue) > 0 + + def queue_unhandled_message(self, transient_id): + self.unhandled_messages_queue.append(transient_id) + + def queue_handled_message(self, transient_id): + self.handled_messages_queue.append(transient_id) + + def process_queues(self): + if len(self.unhandled_messages_queue) > 0 or len(self.handled_messages_queue) > 0: + # TODO: Remove debug + # st = time.time(); lu = len(self.unhandled_messages_queue); lh = len(self.handled_messages_queue) + + handled_messages = self.handled_messages + unhandled_messages = self.unhandled_messages + + while len(self.handled_messages_queue) > 0: + transient_id = self.handled_messages_queue.pop() + if not transient_id in handled_messages: + self.add_handled_message(transient_id) + if transient_id in unhandled_messages: + self.remove_unhandled_message(transient_id) + + while len(self.unhandled_messages_queue) > 0: + transient_id = self.unhandled_messages_queue.pop() + if not transient_id in handled_messages and not transient_id in unhandled_messages: + self.add_unhandled_message(transient_id) + + del handled_messages, unhandled_messages + # TODO: Remove debug + # RNS.log(f"{self} processed {lh}/{lu} in {RNS.prettytime(time.time()-st)}") @property def handled_messages(self): @@ -387,11 +419,9 @@ class LXMPeer: def _update_counts(self): if not self._hm_counts_synced: - RNS.log("UPDATE HM COUNTS") hm = self.handled_messages; del hm if not self._um_counts_synced: - RNS.log("UPDATE UM COUNTS") um = self.unhandled_messages; del um def add_handled_message(self, transient_id): diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 9163824..1e62914 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -8,6 +8,8 @@ import atexit import signal import threading +from collections import deque + import RNS import RNS.vendor.umsgpack as msgpack @@ -143,6 +145,8 @@ class LXMRouter: self.peers = {} self.propagation_entries = {} + self.peer_distribution_queue = deque() + RNS.Transport.register_announce_handler(LXMFDeliveryAnnounceHandler(self)) RNS.Transport.register_announce_handler(LXMFPropagationAnnounceHandler(self)) @@ -613,6 +617,7 @@ class LXMRouter: JOB_TRANSIENT_INTERVAL = 60 JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 + JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL def jobs(self): if not self.exit_handler_running: self.processing_count += 1 @@ -632,6 +637,9 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: self.clean_message_store() + if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: + self.flush_queues() + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: self.sync_peers() @@ -647,6 +655,17 @@ class LXMRouter: RNS.trace_exception(e) time.sleep(LXMRouter.PROCESSING_INTERVAL) + def flush_queues(self): + self.flush_peer_distribution_queue() + RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + if peer.queued_items(): + peer.process_queues() + + RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) + def clean_links(self): closed_links = [] for link_hash in self.direct_links: @@ -1047,6 +1066,7 @@ class LXMRouter: RNS.log("Error while tearing down propagation link: {e}", RNS.LOG_ERROR) RNS.log("Persisting LXMF state data to storage...", RNS.LOG_NOTICE) + self.flush_queues() if self.propagation_node: try: st = time.time(); RNS.log("Saving peer synchronisation states to storage...", RNS.LOG_NOTICE) @@ -1608,8 +1628,9 @@ class LXMRouter: culled_peers = [] waiting_peers = [] unresponsive_peers = [] - for peer_id in self.peers: - peer = self.peers[peer_id] + peers = self.peers.copy() + for peer_id in peers: + peer = peers[peer_id] if time.time() > peer.last_heard + LXMPeer.MAX_UNREACHABLE: culled_peers.append(peer_id) else: @@ -1754,7 +1775,7 @@ class LXMRouter: self.lxmf_propagation(lxmf_data, from_peer=peer) if peer != None: - peer.add_handled_message(transient_id) + peer.queue_handled_message(transient_id) else: RNS.log("Invalid data structure received at propagation destination, ignoring", RNS.LOG_DEBUG) @@ -1763,6 +1784,24 @@ class LXMRouter: RNS.log("Error while unpacking received propagation resource", RNS.LOG_DEBUG) RNS.trace_exception(e) + def enqueue_peer_distribution(self, transient_id, from_peer): + self.peer_distribution_queue.append([transient_id, from_peer]) + + def flush_peer_distribution_queue(self): + if len(self.peer_distribution_queue) > 0: + entries = [] + while len(self.peer_distribution_queue) > 0: + entries.append(self.peer_distribution_queue.pop()) + + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + for entry in entries: + transient_id = entry[0] + from_peer = entry[1] + if peer != from_peer: + peer.queue_unhandled_message(transient_id) + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False, from_peer=None): no_stamp_enforcement = False if is_paper_message: @@ -1797,13 +1836,9 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] - RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) - for peer_id in self.peers: - peer = self.peers[peer_id] - if peer != from_peer: - peer.new_propagation_message(transient_id) + self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] + self.enqueue_peer_distribution(transient_id, from_peer) else: # TODO: Add message to sneakernet queues when implemented From c2a08ef35588ccd512a7ea7c9898c83e5fd2864e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 21 Jan 2025 20:44:11 +0100 Subject: [PATCH 120/161] Enqueue and batch process distribution queue mappings --- LXMF/LXMRouter.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 1e62914..ee1dca8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -656,15 +656,16 @@ class LXMRouter: time.sleep(LXMRouter.PROCESSING_INTERVAL) def flush_queues(self): - self.flush_peer_distribution_queue() - RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() - for peer_id in self.peers.copy(): - if peer_id in self.peers: - peer = self.peers[peer_id] - if peer.queued_items(): - peer.process_queues() + if len(self.peers) > 0: + self.flush_peer_distribution_queue() + RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG); st = time.time() + for peer_id in self.peers.copy(): + if peer_id in self.peers: + peer = self.peers[peer_id] + if peer.queued_items(): + peer.process_queues() - RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) + RNS.log(f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}", RNS.LOG_DEBUG) def clean_links(self): closed_links = [] From e69da2ed2a29b33af0acda059aa9a624b475a6e7 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 22 Jan 2025 01:37:09 +0100 Subject: [PATCH 121/161] Added static peers and peering limit --- LXMF/Handlers.py | 14 ++++-- LXMF/LXMPeer.py | 59 ++++++++++++++++++++--- LXMF/LXMRouter.py | 107 +++++++++++++++++++++++++++++++++++------ LXMF/Utilities/lxmd.py | 29 +++++++++-- 4 files changed, 179 insertions(+), 30 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 7420ea5..22c6cd3 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -51,12 +51,16 @@ class LXMFPropagationAnnounceHandler: except: propagation_transfer_limit = None - if data[0] == True: - if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + if destination_hash in self.lxmrouter.static_peers: + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) - elif data[0] == False: - self.lxmrouter.unpeer(destination_hash, node_timebase) + else: + if data[0] == True: + if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + + elif data[0] == False: + self.lxmrouter.unpeer(destination_hash, node_timebase) except Exception as e: RNS.log("Error while evaluating propagation node announce, ignoring announce.", RNS.LOG_DEBUG) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 74a40c7..ec0cfe2 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -66,6 +66,31 @@ class LXMPeer: peer.propagation_transfer_limit = None else: peer.propagation_transfer_limit = None + + if "offered" in dictionary: + peer.offered = dictionary["offered"] + else: + peer.offered = 0 + + if "outgoing" in dictionary: + peer.outgoing = dictionary["outgoing"] + else: + peer.outgoing = 0 + + if "incoming" in dictionary: + peer.incoming = dictionary["incoming"] + else: + peer.incoming = 0 + + if "rx_bytes" in dictionary: + peer.rx_bytes = dictionary["rx_bytes"] + else: + peer.rx_bytes = 0 + + if "tx_bytes" in dictionary: + peer.tx_bytes = dictionary["tx_bytes"] + else: + peer.tx_bytes = 0 hm_count = 0 for transient_id in dictionary["handled_ids"]: @@ -96,6 +121,11 @@ class LXMPeer: dictionary["link_establishment_rate"] = self.link_establishment_rate dictionary["sync_transfer_rate"] = self.sync_transfer_rate dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit + dictionary["offered"] = self.offered + dictionary["outgoing"] = self.outgoing + dictionary["incoming"] = self.incoming + dictionary["rx_bytes"] = self.rx_bytes + dictionary["tx_bytes"] = self.tx_bytes handled_ids = [] for transient_id in self.handled_messages: @@ -126,6 +156,12 @@ class LXMPeer: self.handled_messages_queue = deque() self.unhandled_messages_queue = deque() + self.offered = 0 # Messages offered to this peer + self.outgoing = 0 # Messages transferred to this peer + self.incoming = 0 # Messages received from this peer + self.rx_bytes = 0 # Bytes received from this peer + self.tx_bytes = 0 # Bytes sent to this peer + self._hm_count = 0 self._um_count = 0 self._hm_counts_synced = False @@ -212,7 +248,7 @@ class LXMPeer: cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) - RNS.log("Sending sync request to peer "+str(self.destination), RNS.LOG_DEBUG) + RNS.log(f"Offering {len(unhandled_ids)} messages to peer {RNS.prettyhexrep(self.destination.hash)}", RNS.LOG_VERBOSE) self.last_offer = unhandled_ids self.link.request(LXMPeer.OFFER_REQUEST_PATH, unhandled_ids, response_callback=self.offer_response, failed_callback=self.request_failed) self.state = LXMPeer.REQUEST_SENT @@ -242,10 +278,16 @@ class LXMPeer: if response == LXMPeer.ERROR_NO_IDENTITY: if self.link != None: - RNS.log("Remote peer indicated that no identification was received, retrying...", RNS.LOG_DEBUG) + RNS.log("Remote peer indicated that no identification was received, retrying...", RNS.LOG_VERBOSE) self.link.identify() self.state = LXMPeer.LINK_READY self.sync() + return + + elif response == LXMPeer.ERROR_NO_ACCESS: + RNS.log("Remote indicated that access was denied, breaking peering", RNS.LOG_VERBOSE) + self.router.unpeer(self.destination_hash) + return elif response == False: # Peer already has all advertised messages @@ -275,10 +317,9 @@ class LXMPeer: wanted_message_ids.append(transient_id) if len(wanted_messages) > 0: - RNS.log("Peer wanted "+str(len(wanted_messages))+" of the available messages", RNS.LOG_DEBUG) + RNS.log("Peer wanted "+str(len(wanted_messages))+" of the available messages", RNS.LOG_VERBOSE) lxm_list = [] - for message_entry in wanted_messages: file_path = message_entry[1] if os.path.isfile(file_path): @@ -294,7 +335,8 @@ class LXMPeer: self.state = LXMPeer.RESOURCE_TRANSFERRING else: - RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_DEBUG) + RNS.log("Peer "+RNS.prettyhexrep(self.destination_hash)+" did not request any of the available messages, sync completed", RNS.LOG_VERBOSE) + self.offered += len(self.last_offer) if self.link != None: self.link.teardown() @@ -328,12 +370,15 @@ class LXMPeer: self.sync_transfer_rate = (resource.get_transfer_size()*8)/(time.time()-resource.sync_transfer_started) rate_str = f" at {RNS.prettyspeed(self.sync_transfer_rate)}" - RNS.log("Sync to peer "+RNS.prettyhexrep(self.destination_hash)+" completed"+rate_str, RNS.LOG_DEBUG) + RNS.log(f"Syncing {len(resource.transferred_messages)} messages to peer {RNS.prettyhexrep(self.destination_hash)} completed{rate_str}", RNS.LOG_VERBOSE) self.alive = True self.last_heard = time.time() + self.offered += len(self.last_offer) + self.outgoing += len(resource.transferred_messages) + self.tx_bytes += resource.get_data_size() else: - RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_DEBUG) + RNS.log("Resource transfer for LXMF peer sync failed to "+str(self.destination), RNS.LOG_VERBOSE) if self.link != None: self.link.teardown() diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index ee1dca8..bfe863d 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -37,6 +37,7 @@ class LXMRouter: NODE_ANNOUNCE_DELAY = 20 + MAX_PEERS = 50 AUTOPEER = True AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 @@ -67,7 +68,10 @@ class LXMRouter: ### Developer-facing API ############################## ####################################################### - def __init__(self, identity = None, storagepath = None, autopeer = AUTOPEER, autopeer_maxdepth = None, propagation_limit = PROPAGATION_LIMIT, delivery_limit = DELIVERY_LIMIT, enforce_ratchets = False, enforce_stamps = False): + def __init__(self, identity=None, storagepath=None, autopeer=AUTOPEER, autopeer_maxdepth=None, + propagation_limit=PROPAGATION_LIMIT, delivery_limit=DELIVERY_LIMIT, enforce_ratchets=False, + enforce_stamps=False, static_peers = [], max_peers=None, from_static_only=False): + random.seed(os.urandom(10)) self.pending_inbound = [] @@ -142,6 +146,27 @@ class LXMRouter: else: self.autopeer_maxdepth = LXMRouter.AUTOPEER_MAXDEPTH + if max_peers == None: + self.max_peers = LXMRouter.MAX_PEERS + else: + if type(max_peers) == int and max_peers >= 0: + self.max_peers = max_peers + else: + raise ValueError(f"Invalid value for max_peers: {max_peers}") + + self.from_static_only = from_static_only + if type(static_peers) != list: + raise ValueError(f"Invalid type supplied for static peer list: {type(static_peers)}") + else: + for static_peer in static_peers: + if type(static_peer) != bytes: + raise ValueError(f"Invalid static peer destination hash: {static_peer}") + else: + if len(static_peer) != RNS.Reticulum.TRUNCATED_HASHLENGTH//8: + raise ValueError(f"Invalid static peer destination hash: {static_peer}") + + self.static_peers = static_peers + self.peers = {} self.propagation_entries = {} @@ -245,8 +270,9 @@ class LXMRouter: def announce_propagation_node(self): def delayed_announce(): time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY) + node_state = self.propagation_node and not self.from_static_only announce_data = [ - self.propagation_node, # Boolean flag signalling propagation node state + node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes ] @@ -485,6 +511,11 @@ class LXMRouter: serialised_peer = serialised_peers.pop() peer = LXMPeer.from_bytes(serialised_peer, self) del serialised_peer + if peer.destination_hash in self.static_peers and peer.last_heard == 0: + # TODO: Allow path request responses through announce handler + # momentarily here, so peering config can be updated even if + # the static peer is not available to directly send an announce. + RNS.Transport.request_path(peer.destination_hash) if peer.identity != None: self.peers[peer.destination_hash] = peer lim_str = ", no transfer limit" @@ -497,6 +528,17 @@ class LXMRouter: del serialised_peers + if len(self.static_peers) > 0: + for static_peer in self.static_peers: + if not static_peer in self.peers: + RNS.log(f"Activating static peering with {RNS.prettyhexrep(static_peer)}", RNS.LOG_NOTICE) + self.peers[static_peer] = LXMPeer(self, static_peer) + if self.peers[static_peer].last_heard == 0: + # TODO: Allow path request responses through announce handler + # momentarily here, so peering config can be updated even if + # the static peer is not available to directly send an announce. + RNS.Transport.request_path(static_peer) + RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) self.propagation_node = True @@ -643,6 +685,11 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: self.sync_peers() + # def syncstats(self): + # for peer_id in self.peers: + # p = self.peers[peer_id] + # RNS.log(f"{RNS.prettyhexrep(peer_id)} O={p.offered} S={p.outgoing} I={p.incoming} TX={RNS.prettysize(p.tx_bytes)} RX={RNS.prettysize(p.rx_bytes)}") + def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual @@ -1070,7 +1117,7 @@ class LXMRouter: self.flush_queues() if self.propagation_node: try: - st = time.time(); RNS.log("Saving peer synchronisation states to storage...", RNS.LOG_NOTICE) + st = time.time(); RNS.log(f"Saving {len(self.peers)} peer synchronisation states to storage...", RNS.LOG_NOTICE) serialised_peers = [] peer_dict = self.peers.copy() for peer_id in peer_dict: @@ -1081,7 +1128,7 @@ class LXMRouter: peers_file.write(msgpack.packb(serialised_peers)) peers_file.close() - RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + RNS.log(f"Saved {len(serialised_peers)} peers to storage in {RNS.prettyshorttime(time.time()-st)}", RNS.LOG_NOTICE) except Exception as e: RNS.log("Could not save propagation node peers to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) @@ -1605,14 +1652,18 @@ class LXMRouter: peer.peering_timebase = timestamp peer.last_heard = time.time() peer.propagation_transfer_limit = propagation_transfer_limit + RNS.log(f"Peering config updated for {RNS.prettyhexrep(destination_hash)}", RNS.LOG_VERBOSE) else: - peer = LXMPeer(self, destination_hash) - peer.alive = True - peer.last_heard = time.time() - peer.propagation_transfer_limit = propagation_transfer_limit - self.peers[destination_hash] = peer - RNS.log("Peered with "+str(peer.destination)) + if len(self.peers) < self.max_peers: + peer = LXMPeer(self, destination_hash) + peer.alive = True + peer.last_heard = time.time() + peer.propagation_transfer_limit = propagation_transfer_limit + self.peers[destination_hash] = peer + RNS.log(f"Peered with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_NOTICE) + else: + RNS.log(f"Max peers reached, not peering with {RNS.prettyhexrep(destination_hash)}", RNS.LOG_DEBUG) def unpeer(self, destination_hash, timestamp = None): if timestamp == None: @@ -1633,7 +1684,8 @@ class LXMRouter: for peer_id in peers: peer = peers[peer_id] if time.time() > peer.last_heard + LXMPeer.MAX_UNREACHABLE: - culled_peers.append(peer_id) + if not peer_id in self.static_peers: + culled_peers.append(peer_id) else: if peer.state == LXMPeer.IDLE and len(peer.unhandled_messages) > 0: if peer.alive: @@ -1693,10 +1745,23 @@ class LXMRouter: self.active_propagation_links.append(link) def propagation_resource_advertised(self, resource): + if self.from_static_only: + remote_identity = resource.link.get_remote_identity() + if remote_identity == None: + RNS.log(f"Rejecting propagation resource from unidentified peer", RNS.LOG_DEBUG) + return False + else: + remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) + if not remote_hash in self.static_peers: + RNS.log(f"Rejecting propagation resource from {remote_str} not in static peers list", RNS.LOG_DEBUG) + return False + size = resource.get_data_size() limit = self.propagation_per_transfer_limit*1000 if limit != None and size > limit: - RNS.log("Rejecting "+RNS.prettysize(size)+" incoming LXMF propagation resource, since it exceeds the limit of "+RNS.prettysize(limit), RNS.LOG_DEBUG) + RNS.log(f"Rejecting {RNS.prettysize(size)} incoming propagation resource, since it exceeds the limit of {RNS.prettysize(limit)}", RNS.LOG_DEBUG) return False else: return True @@ -1723,6 +1788,14 @@ class LXMRouter: if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY else: + if self.from_static_only: + remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") + remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) + if not remote_hash in self.static_peers: + RNS.log(f"Rejecting propagation request from {remote_str} not in static peers list", RNS.LOG_DEBUG) + return LXMPeer.ERROR_NO_ACCESS + try: transient_ids = data wanted_ids = [] @@ -1745,7 +1818,6 @@ class LXMRouter: return None def propagation_resource_concluded(self, resource): - RNS.log("Transfer concluded for incoming propagation resource "+str(resource), RNS.LOG_DEBUG) if resource.status == RNS.Resource.COMPLETE: # TODO: The peer this was received from should # have the transient id added to its list of @@ -1757,22 +1829,29 @@ class LXMRouter: # This is a series of propagation messages from a peer or originator remote_timebase = data[0] remote_hash = None + remote_str = "unknown peer" remote_identity = resource.link.get_remote_identity() if remote_identity != None: remote_destination = RNS.Destination(remote_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation") remote_hash = remote_destination.hash + remote_str = RNS.prettyhexrep(remote_hash) if not remote_hash in self.peers: if self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: self.peer(remote_hash, remote_timebase) + else: + remote_str = f"peer {remote_str}" messages = data[1] + RNS.log(f"Received {len(messages)} messages from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) if remote_hash != None and remote_hash in self.peers: peer = self.peers[remote_hash] + peer.incoming += 1 + peer.rx_bytes += len(lxmf_data) self.lxmf_propagation(lxmf_data, from_peer=peer) if peer != None: @@ -1837,7 +1916,7 @@ class LXMRouter: msg_file.write(lxmf_data) msg_file.close() - RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_DEBUG) + RNS.log("Received propagated LXMF message "+RNS.prettyhexrep(transient_id)+", adding to peer distribution queues...", RNS.LOG_EXTREME) self.propagation_entries[transient_id] = [destination_hash, file_path, time.time(), len(lxmf_data), [], []] self.enqueue_peer_distribution(transient_id, from_peer) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 38e71b1..0c87a73 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -140,6 +140,24 @@ def apply_config(): else: active_configuration["prioritised_lxmf_destinations"] = [] + if "propagation" in lxmd_config and "static_peers" in lxmd_config["propagation"]: + static_peers = lxmd_config["propagation"].as_list("static_peers") + active_configuration["static_peers"] = [] + for static_peer in static_peers: + active_configuration["static_peers"].append(bytes.fromhex(static_peer)) + else: + active_configuration["static_peers"] = [] + + if "propagation" in lxmd_config and "max_peers" in lxmd_config["propagation"]: + active_configuration["max_peers"] = lxmd_config["propagation"].as_int("max_peers") + else: + active_configuration["max_peers"] = None + + if "propagation" in lxmd_config and "from_static_only" in lxmd_config["propagation"]: + active_configuration["from_static_only"] = lxmd_config["propagation"].as_bool("from_static_only") + else: + active_configuration["from_static_only"] = False + # Load various settings if "logging" in lxmd_config and "loglevel" in lxmd_config["logging"]: targetloglevel = lxmd_config["logging"].as_int("loglevel") @@ -305,7 +323,10 @@ def program_setup(configdir = None, rnsconfigdir = None, run_pn = False, on_inbo autopeer_maxdepth = active_configuration["autopeer_maxdepth"], propagation_limit = active_configuration["propagation_transfer_max_accepted_size"], delivery_limit = active_configuration["delivery_transfer_max_accepted_size"], - ) + max_peers = active_configuration["max_peers"], + static_peers = active_configuration["static_peers"], + from_static_only = active_configuration["from_static_only"]) + message_router.register_delivery_callback(lxmf_delivery) for destination_hash in active_configuration["ignored_lxmf_destinations"]: @@ -362,13 +383,13 @@ def jobs(): try: if "peer_announce_interval" in active_configuration and active_configuration["peer_announce_interval"] != None: if time.time() > last_peer_announce + active_configuration["peer_announce_interval"]: - RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_EXTREME) + RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_VERBOSE) message_router.announce(lxmf_destination.hash) last_peer_announce = time.time() if "node_announce_interval" in active_configuration and active_configuration["node_announce_interval"] != None: if time.time() > last_node_announce + active_configuration["node_announce_interval"]: - RNS.log("Sending announce for LXMF Propagation Node", RNS.LOG_EXTREME) + RNS.log("Sending announce for LXMF Propagation Node", RNS.LOG_VERBOSE) message_router.announce_propagation_node() last_node_announce = time.time() @@ -381,7 +402,7 @@ def deferred_start_jobs(): global active_configuration, last_peer_announce, last_node_announce global message_router, lxmf_destination time.sleep(DEFFERED_JOBS_DELAY) - RNS.log("Running deferred start jobs") + RNS.log("Running deferred start jobs", RNS.LOG_DEBUG) if active_configuration["peer_announce_at_start"]: RNS.log("Sending announce for LXMF delivery destination", RNS.LOG_EXTREME) message_router.announce(lxmf_destination.hash) From 68257a441ff1029054378185b09f4b61020e9d3e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 22 Jan 2025 09:44:03 +0100 Subject: [PATCH 122/161] Set transfer limit on reverse auto-peer --- LXMF/LXMRouter.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index bfe863d..5465356 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1839,7 +1839,12 @@ class LXMRouter: if not remote_hash in self.peers: if self.autopeer and RNS.Transport.hops_to(remote_hash) <= self.autopeer_maxdepth: - self.peer(remote_hash, remote_timebase) + # TODO: Query cache for an announce and get propagation + # transfer limit from that. For now, initialise it to a + # sane default value, and wait for an announce to arrive + # that will update the peering config to the actual limit. + propagation_transfer_limit = LXMRouter.PROPAGATION_LIMIT//4 + self.peer(remote_hash, remote_timebase, propagation_transfer_limit) else: remote_str = f"peer {remote_str}" From 61b1ecce276631a4ec2c1165c33b5195e46e946d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 22 Jan 2025 10:10:57 +0100 Subject: [PATCH 123/161] Updated readme --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index faced95..ed7e4f0 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,7 @@ User-facing clients built on LXMF include: Community-provided tools and utilities for LXMF include: +- [LXMFy](https://lxmfy.quad4.io/) - [LXMF-Bot](https://github.com/randogoth/lxmf-bot) - [LXMF Messageboard](https://github.com/chengtripp/lxmf_messageboard) - [LXMEvent](https://github.com/faragher/LXMEvent) From 2c71cea7a0d2fc0a3ab5bbd26883befb5a0dd9fc Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 14:13:08 +0100 Subject: [PATCH 124/161] Added local node stats request handler --- LXMF/LXMRouter.py | 134 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 131 insertions(+), 3 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 5465356..22ef3ac 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -64,6 +64,8 @@ class LXMRouter: PR_ALL_MESSAGES = 0x00 + STATS_GET_PATH = "/pn/get/stats" + ### Developer-facing API ############################## ####################################################### @@ -92,6 +94,7 @@ class LXMRouter: self.processing_count = 0 self.propagation_node = False + self.propagation_node_start_time = None if storagepath == None: raise ValueError("LXMF cannot be initialised without a storage path") @@ -135,6 +138,11 @@ class LXMRouter: self.identity = identity self.propagation_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation") + self.control_destination = None + self.client_propagation_messages_received = 0 + self.client_propagation_messages_served = 0 + self.unpeered_propagation_incoming = 0 + self.unpeered_propagation_rx_bytes = 0 if autopeer != None: self.autopeer = autopeer @@ -541,13 +549,35 @@ class LXMRouter: RNS.log(f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}", RNS.LOG_NOTICE) + try: + if os.path.isfile(self.storagepath+"/node_stats"): + node_stats_file = open(self.storagepath+"/node_stats", "rb") + data = node_stats_file.read() + node_stats_file.close() + node_stats = msgpack.unpackb(data) + + if not type(node_stats) == dict: + RNS.log("Invalid data format for loaded local node stats, node stats will be reset", RNS.LOG_ERROR) + else: + self.client_propagation_messages_received = node_stats["client_propagation_messages_received"] + self.client_propagation_messages_served = node_stats["client_propagation_messages_served"] + self.unpeered_propagation_incoming = node_stats["unpeered_propagation_incoming"] + self.unpeered_propagation_rx_bytes = node_stats["unpeered_propagation_rx_bytes"] + + except Exception as e: + RNS.log("Could not load local node stats. The contained exception was: "+str(e), RNS.LOG_ERROR) + self.propagation_node = True + self.propagation_node_start_time = time.time() self.propagation_destination.set_link_established_callback(self.propagation_link_established) self.propagation_destination.set_packet_callback(self.propagation_packet) self.propagation_destination.register_request_handler(LXMPeer.OFFER_REQUEST_PATH, self.offer_request, allow = RNS.Destination.ALLOW_ALL) self.propagation_destination.register_request_handler(LXMPeer.MESSAGE_GET_PATH, self.message_get_request, allow = RNS.Destination.ALLOW_ALL) + self.control_destination = RNS.Destination(self.identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + self.control_destination.register_request_handler(LXMRouter.STATS_GET_PATH, self.stats_get_request, allow = RNS.Destination.ALLOW_LIST, allowed_list=[self.identity.hash]) + if self.message_storage_limit != None: limit_str = ", limit is "+RNS.prettysize(self.message_storage_limit) else: @@ -650,6 +680,76 @@ class LXMRouter: return False + ### Propagation Node Control ########################## + ####################################################### + + def compile_stats(self): + if not self.propagation_node: + return None + else: + peer_stats = {} + for peer_id in self.peers.copy(): + peer = self.peers[peer_id] + peer_stats[peer_id] = { + "type": "static" if peer_id in self.static_peers else "discovered", + "state": peer.state, + "alive": peer.alive, + "last_heard": int(peer.last_heard), + "next_sync_attempt": peer.next_sync_attempt, + "last_sync_attempt": peer.last_sync_attempt, + "sync_backoff": peer.sync_backoff, + "peering_timebase": peer.peering_timebase, + "ler": int(peer.link_establishment_rate), + "str": int(peer.sync_transfer_rate), + "transfer_limit": peer.propagation_transfer_limit, + "network_distance": RNS.Transport.hops_to(peer_id), + "rx_bytes": peer.rx_bytes, + "tx_bytes": peer.tx_bytes, + "messages": { + "offered": peer.offered, + "outgoing": peer.outgoing, + "incoming": peer.incoming, + }, + } + + node_stats = { + "identity_hash": self.identity.hash, + "destination_hash": self.propagation_destination.hash, + "uptime": time.time()-self.propagation_node_start_time, + "delivery_limit": self.delivery_per_transfer_limit, + "propagation_limit": self.propagation_per_transfer_limit, + "autopeer_maxdepth": self.autopeer_maxdepth, + "from_static_only": self.from_static_only, + "messagestore": { + "count": len(self.propagation_entries), + "bytes": self.message_storage_size(), + "limit": self.message_storage_limit, + }, + "clients" : { + "client_propagation_messages_received": self.client_propagation_messages_received, + "client_propagation_messages_served": self.client_propagation_messages_served, + }, + "unpeered_propagation_incoming": self.unpeered_propagation_incoming, + "unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes, + "static_peers": len(self.static_peers), + "discovered_peers": len(self.peers)-len(self.static_peers), + "total_peers": len(self.peers), + "max_peers": self.max_peers, + "peers": peer_stats, + } + + return node_stats + + def stats_get_request(self, path, data, request_id, remote_identity, requested_at): + RNS.log("Stats request", RNS.LOG_DEBUG) # TODO: Remove debug + if remote_identity == None: + return LXMPeer.ERROR_NO_IDENTITY + elif remote_identity.hash != self.identity.hash: + return LXMPeer.ERROR_NO_ACCESS + else: + return self.compile_stats() + + ### Utility & Maintenance ############################# ####################################################### @@ -970,7 +1070,7 @@ class LXMRouter: try: if len(self.locally_delivered_transient_ids) > 0: if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + os.makedirs(self.storagepath) with open(self.storagepath+"/local_deliveries", "wb") as locally_delivered_file: locally_delivered_file.write(msgpack.packb(self.locally_delivered_transient_ids)) @@ -982,7 +1082,7 @@ class LXMRouter: try: if len(self.locally_processed_transient_ids) > 0: if not os.path.isdir(self.storagepath): - os.makedirs(self.storagepath) + os.makedirs(self.storagepath) with open(self.storagepath+"/locally_processed", "wb") as locally_processed_file: locally_processed_file.write(msgpack.packb(self.locally_processed_transient_ids)) @@ -990,6 +1090,24 @@ class LXMRouter: except Exception as e: RNS.log("Could not save locally processed transient ID cache to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + def save_node_stats(self): + try: + if not os.path.isdir(self.storagepath): + os.makedirs(self.storagepath) + + with open(self.storagepath+"/node_stats", "wb") as stats_file: + node_stats = { + "client_propagation_messages_received": self.client_propagation_messages_received, + "client_propagation_messages_served": self.client_propagation_messages_served, + "unpeered_propagation_incoming": self.unpeered_propagation_incoming, + "unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes, + } + stats_file.write(msgpack.packb(node_stats)) + + except Exception as e: + RNS.log("Could not save local node stats to storage. The contained exception was: "+str(e), RNS.LOG_ERROR) + + def clean_outbound_stamp_costs(self): try: expired = [] @@ -1106,6 +1224,7 @@ class LXMRouter: self.propagation_destination.set_packet_callback(None) self.propagation_destination.deregister_request_handler(LXMPeer.OFFER_REQUEST_PATH) self.propagation_destination.deregister_request_handler(LXMPeer.MESSAGE_GET_PATH) + self.propagation_destination.deregister_request_handler(LXMRouter.STATS_GET_PATH) for link in self.active_propagation_links: try: if link.status == RNS.Link.ACTIVE: @@ -1135,6 +1254,7 @@ class LXMRouter: self.save_locally_delivered_transient_ids() self.save_locally_processed_transient_ids() + self.save_node_stats() def sigint_handler(self, signal, frame): if not self.exit_handler_running: @@ -1263,6 +1383,7 @@ class LXMRouter: except Exception as e: RNS.log("Error while processing message download request from "+RNS.prettyhexrep(remote_destination.hash)+". The contained exception was: "+str(e), RNS.LOG_ERROR) + self.client_propagation_messages_served += len(response_messages) return response_messages @@ -1777,6 +1898,7 @@ class LXMRouter: messages = data[1] for lxmf_data in messages: self.lxmf_propagation(lxmf_data) + self.client_propagation_messages_received += 1 packet.prove() @@ -1849,7 +1971,7 @@ class LXMRouter: remote_str = f"peer {remote_str}" messages = data[1] - RNS.log(f"Received {len(messages)} messages from {remote_str}", RNS.LOG_VERBOSE) + RNS.log(f"Received {len(messages)} message{"" if len(messages) == 1 else "s"} from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) @@ -1857,6 +1979,12 @@ class LXMRouter: peer = self.peers[remote_hash] peer.incoming += 1 peer.rx_bytes += len(lxmf_data) + else: + if remote_identity != None: + self.unpeered_propagation_incoming += 1 + self.unpeered_propagation_rx_bytes += len(lxmf_data) + else: + self.client_propagation_messages_received += 1 self.lxmf_propagation(lxmf_data, from_peer=peer) if peer != None: From f683e038910e45cf9be83b4dc01465ce8c8877ff Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 14:15:12 +0100 Subject: [PATCH 125/161] Added lxmd status getter --- LXMF/Utilities/lxmd.py | 96 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 9 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 0c87a73..1bc1d12 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -35,6 +35,7 @@ import time import os from LXMF._version import __version__ +from LXMF import APP_NAME from RNS.vendor.configobj import ConfigObj @@ -415,6 +416,75 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() +def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5): + global configpath, identitypath, storagedir, lxmdir + global lxmd_config, active_configuration, targetloglevel + targetlogdest = RNS.LOG_STDOUT + + if configdir == None: + if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): + configdir = "/etc/lxmd" + elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): + configdir = RNS.Reticulum.userdir+"/.config/lxmd" + else: + configdir = RNS.Reticulum.userdir+"/.lxmd" + + configpath = configdir+"/config" + identitypath = configdir+"/identity" + identity = None + + if not os.path.isdir(configdir): + RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) + exit(201) + if not os.path.isfile(identitypath): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identitypath) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) + exit(4) + + if targetloglevel == None: + targetloglevel = 3 + if verbosity != 0 or quietness != 0: + targetloglevel = targetloglevel+verbosity-quietness + + reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) + control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) + exit(200) + else: + time.sleep(0.1) + + if not RNS.Transport.has_path(control_destination.hash): + RNS.Transport.request_path(control_destination.hash) + while not RNS.Transport.has_path(control_destination.hash): + check_timeout() + + link = RNS.Link(control_destination) + while not link.status == RNS.Link.ACTIVE: + check_timeout() + + link.identify(identity) + request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) + while not request_receipt.get_status() == RNS.RequestReceipt.READY: + check_timeout() + + response = request_receipt.get_response() + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: + RNS.log("Remote received no identity") + exit(203) + if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_ACCESS: + RNS.log("Access denied") + exit(204) + else: + # TODO: Output stats + def main(): try: parser = argparse.ArgumentParser(description="Lightweight Extensible Messaging Daemon") @@ -425,6 +495,8 @@ def main(): parser.add_argument("-v", "--verbose", action="count", default=0) parser.add_argument("-q", "--quiet", action="count", default=0) parser.add_argument("-s", "--service", action="store_true", default=False, help="lxmd is running as a service and should log to file") + parser.add_argument("--status", action="store_true", default=False, help="display node status") + parser.add_argument("--timeout", action="store", default=5, help="timeout in seconds for query operations", type=float) parser.add_argument("--exampleconfig", action="store_true", default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version="lxmd {version}".format(version=__version__)) @@ -434,15 +506,21 @@ def main(): print(__default_lxmd_config__) exit() - program_setup( - configdir = args.config, - rnsconfigdir=args.rnsconfig, - run_pn=args.propagation_node, - on_inbound=args.on_inbound, - verbosity=args.verbose, - quietness=args.quiet, - service=args.service - ) + if args.status: + get_status(configdir = args.config, + rnsconfigdir=args.rnsconfig, + verbosity=args.verbose, + quietness=args.quiet, + timeout=args.timeout) + exit() + + program_setup(configdir = args.config, + rnsconfigdir=args.rnsconfig, + run_pn=args.propagation_node, + on_inbound=args.on_inbound, + verbosity=args.verbose, + quietness=args.quiet, + service=args.service) except KeyboardInterrupt: print("") From 460645cea2abc0a72b8f5d6444184286c4c676e8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 14:15:31 +0100 Subject: [PATCH 126/161] Added lxmd status getter --- LXMF/Utilities/lxmd.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 1bc1d12..d8b24d3 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -484,6 +484,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = exit(204) else: # TODO: Output stats + pass def main(): try: From e3be7e0cfdb529dece6e51165b67f697c70724b3 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:27:01 +0100 Subject: [PATCH 127/161] Persist last sync attempt --- LXMF/LXMPeer.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index ec0cfe2..61602c3 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -91,6 +91,11 @@ class LXMPeer: peer.tx_bytes = dictionary["tx_bytes"] else: peer.tx_bytes = 0 + + if "last_sync_attempt" in dictionary: + peer.last_sync_attempt = dictionary["last_sync_attempt"] + else: + peer.last_sync_attempt = 0 hm_count = 0 for transient_id in dictionary["handled_ids"]: @@ -121,6 +126,7 @@ class LXMPeer: dictionary["link_establishment_rate"] = self.link_establishment_rate dictionary["sync_transfer_rate"] = self.sync_transfer_rate dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit + dictionary["last_sync_attempt"] = self.last_sync_attempt dictionary["offered"] = self.offered dictionary["outgoing"] = self.outgoing dictionary["incoming"] = self.incoming From a198e96064fa47af3f8e1dc8db225fbb39f77f80 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:27:23 +0100 Subject: [PATCH 128/161] Include unhandled message count in stats --- LXMF/LXMRouter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 22ef3ac..8e824e4 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -709,6 +709,7 @@ class LXMRouter: "offered": peer.offered, "outgoing": peer.outgoing, "incoming": peer.incoming, + "unhandled": peer.unhandled_message_count }, } From 35dd70c59e09a7fa093f7e24b60065317e7507c9 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:27:48 +0100 Subject: [PATCH 129/161] Format status and peers output --- LXMF/Utilities/lxmd.py | 140 +++++++++++++++++++++++++++++++++-------- 1 file changed, 115 insertions(+), 25 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index d8b24d3..dd070fc 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -416,34 +416,45 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() -def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5): +def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): global configpath, identitypath, storagedir, lxmdir global lxmd_config, active_configuration, targetloglevel targetlogdest = RNS.LOG_STDOUT - if configdir == None: - if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): - configdir = "/etc/lxmd" - elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): - configdir = RNS.Reticulum.userdir+"/.config/lxmd" + if identity_path == None: + if configdir == None: + if os.path.isdir("/etc/lxmd") and os.path.isfile("/etc/lxmd/config"): + configdir = "/etc/lxmd" + elif os.path.isdir(RNS.Reticulum.userdir+"/.config/lxmd") and os.path.isfile(Reticulum.userdir+"/.config/lxmd/config"): + configdir = RNS.Reticulum.userdir+"/.config/lxmd" + else: + configdir = RNS.Reticulum.userdir+"/.lxmd" + + configpath = configdir+"/config" + identitypath = configdir+"/identity" + identity = None + + if not os.path.isdir(configdir): + RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) + exit(201) + if not os.path.isfile(identitypath): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) else: - configdir = RNS.Reticulum.userdir+"/.lxmd" + identity = RNS.Identity.from_file(identitypath) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) + exit(4) - configpath = configdir+"/config" - identitypath = configdir+"/identity" - identity = None - - if not os.path.isdir(configdir): - RNS.log("Specified configuration directory does not exist, exiting now", RNS.LOG_ERROR) - exit(201) - if not os.path.isfile(identitypath): - RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) - exit(202) else: - identity = RNS.Identity.from_file(identitypath) - if identity == None: - RNS.log("Could not load the Primary Identity from "+identitypath, RNS.LOG_ERROR) - exit(4) + if not os.path.isfile(identity_path): + RNS.log("Identity file not found in specified configuration directory, exiting now", RNS.LOG_ERROR) + exit(202) + else: + identity = RNS.Identity.from_file(identity_path) + if identity == None: + RNS.log("Could not load the Primary Identity from "+identity_path, RNS.LOG_ERROR) + exit(4) if targetloglevel == None: targetloglevel = 3 @@ -483,8 +494,82 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = RNS.log("Access denied") exit(204) else: - # TODO: Output stats - pass + s = response + ms_util = f"{round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2)}%" + if s["from_static_only"]: + who_str = "static peers only" + else: + who_str = "all nodes" + + available_peers = 0 + unreachable_peers = 0 + peered_incoming = 0 + peered_outgoing = 0 + peered_rx_bytes = 0 + peered_tx_bytes = 0 + for peer_id in s["peers"]: + p = s["peers"][peer_id] + pm = p["messages"] + peered_incoming += pm["incoming"] + peered_outgoing += pm["outgoing"] + peered_rx_bytes += p["rx_bytes"] + peered_tx_bytes += p["tx_bytes"] + if p["alive"]: + available_peers += 1 + else: + unreachable_peers += 1 + + total_incoming = peered_incoming+s["unpeered_propagation_incoming"]+s["clients"]["client_propagation_messages_received"] + total_rx_bytes = peered_rx_bytes+s["unpeered_propagation_rx_bytes"] + df = round(peered_outgoing/total_incoming, 2) + + print(f"\nLXMF Propagation Node running on {RNS.prettyhexrep(s["destination_hash"])}, uptime is {RNS.prettytime(s["uptime"])}") + + if show_status: + print(f"Messagestore contains {s["messagestore"]["count"]} messages, {RNS.prettysize(s["messagestore"]["bytes"])} of {RNS.prettysize(s["messagestore"]["limit"])} ({ms_util} utilised)") + print(f"Accepting propagated messages from {who_str}, {RNS.prettysize(s["propagation_limit"]*1000)} per-transfer limit") + print(f"") + print(f"Peers : {s["total_peers"]} total (peer limit is {s["max_peers"]})") + print(f" {s["discovered_peers"]} discovered, {s["static_peers"]} static") + print(f" {available_peers} available, {unreachable_peers} unreachable") + print(f"") + print(f"Traffic : {s["unpeered_propagation_incoming"]} messages received from unpeered nodes ({RNS.prettysize(s["unpeered_propagation_rx_bytes"])})") + print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") + print(f" {total_incoming} messages received from peered nodes ({RNS.prettysize(total_rx_bytes)})") + print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") + print(f" {s["clients"]["client_propagation_messages_received"]} messages received from clients") + print(f" {s["clients"]["client_propagation_messages_served"]} messages served to clients") + print(f" Distribution factor is {df}") + print(f"") + + if show_peers: + for peer_id in s["peers"]: + ind = " " + p = s["peers"][peer_id] + if p["type"] == "static": + t = "Static peer " + elif p["type"] == "discovered": + t = "Discovered peer " + else: + t = "Unknown peer " + a = "Available" if p["alive"] == True else "Unreachable" + h = max(time.time()-p["last_heard"], 0) + hops = p["network_distance"] + hs = f"{hops} hop away" if hops == 1 else f"{hops} hops away" + pm = p["messages"] + if p["last_sync_attempt"] != 0: + ls = f"last synced {RNS.prettytime(max(time.time()-p["last_sync_attempt"], 0))} ago" + else: + ls = "never synced" + + print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") + print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") + print(f"{ind*2}Speeds : {RNS.prettyspeed(p["str"])} STR, {RNS.prettyspeed(p["ler"])} LER, {RNS.prettysize(p["transfer_limit"]*1000)} transfer limit") + print(f"{ind*2}Messages : {pm["offered"]} offered, {pm["outgoing"]} outgoing, {pm["incoming"]} incoming") + print(f"{ind*2}Traffic : {RNS.prettysize(p["rx_bytes"])} received, {RNS.prettysize(p["tx_bytes"])} sent") + print(f"{ind*2}Sync state : {pm["unhandled"]} unhandled message{"" if pm["unhandled"] == 1 else "s"}, {ls}") + print("") + def main(): try: @@ -497,7 +582,9 @@ def main(): parser.add_argument("-q", "--quiet", action="count", default=0) parser.add_argument("-s", "--service", action="store_true", default=False, help="lxmd is running as a service and should log to file") parser.add_argument("--status", action="store_true", default=False, help="display node status") + parser.add_argument("--peers", action="store_true", default=False, help="display peered nodes") parser.add_argument("--timeout", action="store", default=5, help="timeout in seconds for query operations", type=float) + parser.add_argument("--identity", action="store", default=None, help="path to identity used for query request", type=str) parser.add_argument("--exampleconfig", action="store_true", default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version="lxmd {version}".format(version=__version__)) @@ -507,12 +594,15 @@ def main(): print(__default_lxmd_config__) exit() - if args.status: + if args.status or args.peers: get_status(configdir = args.config, rnsconfigdir=args.rnsconfig, verbosity=args.verbose, quietness=args.quiet, - timeout=args.timeout) + timeout=args.timeout, + show_status=args.status, + show_peers=args.peers, + identity_path=args.identity) exit() program_setup(configdir = args.config, From a87458d25f794d84c68f0c4212fedc91bcd7e7fb Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:28:11 +0100 Subject: [PATCH 130/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 43c4ab0..22049ab 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.1" +__version__ = "0.6.2" From fe59b265c50ce87ca33e183b2b154b3eaaf163c0 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 16:54:12 +0100 Subject: [PATCH 131/161] Fixed fstrings not working on Python < 3.12 --- LXMF/LXMRouter.py | 3 ++- LXMF/Utilities/lxmd.py | 40 ++++++++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 8e824e4..0358428 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1972,7 +1972,8 @@ class LXMRouter: remote_str = f"peer {remote_str}" messages = data[1] - RNS.log(f"Received {len(messages)} message{"" if len(messages) == 1 else "s"} from {remote_str}", RNS.LOG_VERBOSE) + ms = "" if len(messages) == 1 else "s" + RNS.log(f"Received {len(messages)} message{ms} from {remote_str}", RNS.LOG_VERBOSE) for lxmf_data in messages: peer = None transient_id = RNS.Identity.full_hash(lxmf_data) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index dd070fc..54e0b54 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -495,7 +495,8 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = exit(204) else: s = response - ms_util = f"{round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2)}%" + mutil = round((s["messagestore"]["bytes"]/s["messagestore"]["limit"])*100, 2) + ms_util = f"{mutil}%" if s["from_static_only"]: who_str = "static peers only" else: @@ -523,22 +524,28 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = total_rx_bytes = peered_rx_bytes+s["unpeered_propagation_rx_bytes"] df = round(peered_outgoing/total_incoming, 2) - print(f"\nLXMF Propagation Node running on {RNS.prettyhexrep(s["destination_hash"])}, uptime is {RNS.prettytime(s["uptime"])}") + dhs = RNS.prettyhexrep(s["destination_hash"]); uts = RNS.prettytime(s["uptime"]) + print(f"\nLXMF Propagation Node running on {dhs}, uptime is {uts}") if show_status: - print(f"Messagestore contains {s["messagestore"]["count"]} messages, {RNS.prettysize(s["messagestore"]["bytes"])} of {RNS.prettysize(s["messagestore"]["limit"])} ({ms_util} utilised)") - print(f"Accepting propagated messages from {who_str}, {RNS.prettysize(s["propagation_limit"]*1000)} per-transfer limit") + msb = RNS.prettysize(s["messagestore"]["bytes"]); msl = RNS.prettysize(s["messagestore"]["limit"]) + ptl = RNS.prettysize(s["propagation_limit"]*1000); uprx = RNS.prettysize(s["unpeered_propagation_rx_bytes"]) + mscnt = s["messagestore"]["count"]; stp = s["total_peers"]; smp = s["max_peers"]; sdp = s["discovered_peers"] + ssp = s["static_peers"]; cprr = s["clients"]["client_propagation_messages_received"] + cprs = s["clients"]["client_propagation_messages_served"]; upi = s["unpeered_propagation_incoming"] + print(f"Messagestore contains {mscnt} messages, {msb} ({ms_util} utilised of {msl})") + print(f"Accepting propagated messages from {who_str}, {ptl} per-transfer limit") print(f"") - print(f"Peers : {s["total_peers"]} total (peer limit is {s["max_peers"]})") - print(f" {s["discovered_peers"]} discovered, {s["static_peers"]} static") + print(f"Peers : {stp} total (peer limit is {smp})") + print(f" {sdp} discovered, {ssp} static") print(f" {available_peers} available, {unreachable_peers} unreachable") print(f"") - print(f"Traffic : {s["unpeered_propagation_incoming"]} messages received from unpeered nodes ({RNS.prettysize(s["unpeered_propagation_rx_bytes"])})") + print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") print(f" {total_incoming} messages received from peered nodes ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") - print(f" {s["clients"]["client_propagation_messages_received"]} messages received from clients") - print(f" {s["clients"]["client_propagation_messages_served"]} messages served to clients") + print(f" {cprr} messages received from clients") + print(f" {cprs} messages served to clients") print(f" Distribution factor is {df}") print(f"") @@ -558,16 +565,21 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = hs = f"{hops} hop away" if hops == 1 else f"{hops} hops away" pm = p["messages"] if p["last_sync_attempt"] != 0: - ls = f"last synced {RNS.prettytime(max(time.time()-p["last_sync_attempt"], 0))} ago" + lsa = p["last_sync_attempt"] + ls = f"last synced {RNS.prettytime(max(time.time()-lsa, 0))} ago" else: ls = "never synced" + sstr = RNS.prettyspeed(p["str"]); sler = RNS.prettyspeed(p["ler"]); stl = RNS.prettysize(p["transfer_limit"]*1000) + srxb = RNS.prettysize(p["rx_bytes"]); stxb = RNS.prettysize(p["tx_bytes"]); pmo = pm["offered"]; pmout = pm["outgoing"] + pmi = pm["incoming"]; pmuh = pm["unhandled"] print(f"{ind}{t}{RNS.prettyhexrep(peer_id)}") print(f"{ind*2}Status : {a}, {hs}, last heard {RNS.prettytime(h)} ago") - print(f"{ind*2}Speeds : {RNS.prettyspeed(p["str"])} STR, {RNS.prettyspeed(p["ler"])} LER, {RNS.prettysize(p["transfer_limit"]*1000)} transfer limit") - print(f"{ind*2}Messages : {pm["offered"]} offered, {pm["outgoing"]} outgoing, {pm["incoming"]} incoming") - print(f"{ind*2}Traffic : {RNS.prettysize(p["rx_bytes"])} received, {RNS.prettysize(p["tx_bytes"])} sent") - print(f"{ind*2}Sync state : {pm["unhandled"]} unhandled message{"" if pm["unhandled"] == 1 else "s"}, {ls}") + print(f"{ind*2}Speeds : {sstr} STR, {sler} LER, {stl} transfer limit") + print(f"{ind*2}Messages : {pmo} offered, {pmout} outgoing, {pmi} incoming") + print(f"{ind*2}Traffic : {srxb} received, {stxb} sent") + ms = "" if pm["unhandled"] == 1 else "s" + print(f"{ind*2}Sync state : {pmuh} unhandled message{ms}, {ls}") print("") From 70186cf8d9fc780eba6ce39494964c31b2519a57 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:07:20 +0100 Subject: [PATCH 132/161] Fixed typo --- LXMF/Utilities/lxmd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 54e0b54..bb29661 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -542,7 +542,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"") print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") - print(f" {total_incoming} messages received from peered nodes ({RNS.prettysize(total_rx_bytes)})") + print(f" {total_incoming} messages received from all nodes ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") print(f" {cprr} messages received from clients") print(f" {cprs} messages served to clients") From a3e3868f9258ed1f0b930e85a8993234440d448d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:09:40 +0100 Subject: [PATCH 133/161] Changed formatting --- LXMF/Utilities/lxmd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index bb29661..415ecb6 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -542,7 +542,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"") print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") - print(f" {total_incoming} messages received from all nodes ({RNS.prettysize(total_rx_bytes)})") + print(f" {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") print(f" {cprr} messages received from clients") print(f" {cprs} messages served to clients") From fb4bf9b0b9307cb872e97619a685e8adf44a467e Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:36:30 +0100 Subject: [PATCH 134/161] Cleanup --- LXMF/LXMRouter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 0358428..bd63e17 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -742,7 +742,6 @@ class LXMRouter: return node_stats def stats_get_request(self, path, data, request_id, remote_identity, requested_at): - RNS.log("Stats request", RNS.LOG_DEBUG) # TODO: Remove debug if remote_identity == None: return LXMPeer.ERROR_NO_IDENTITY elif remote_identity.hash != self.identity.hash: From cdea838a6c38f0b9a4f7d983b48361565bbc835f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 23 Jan 2025 17:43:24 +0100 Subject: [PATCH 135/161] Updated status output --- LXMF/Utilities/lxmd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 415ecb6..b52d5ae 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -544,8 +544,8 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") print(f" {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") - print(f" {cprr} messages received from clients") - print(f" {cprs} messages served to clients") + print(f" {cprr} propagation messages received diretly from clients") + print(f" {cprs} propagation messages served to clients") print(f" Distribution factor is {df}") print(f"") From aa57b16cf562d8f9409e877f959d9751f8c5b300 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 00:09:36 +0100 Subject: [PATCH 136/161] Fixed #23 --- LXMF/LXMRouter.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index bd63e17..b0a4cc8 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -777,13 +777,16 @@ class LXMRouter: self.clean_transient_id_caches() if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0: - self.clean_message_store() + if self.propagation_node == True: + self.clean_message_store() if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: - self.flush_queues() + if self.propagation_node == True: + self.flush_queues() if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: - self.sync_peers() + if self.propagation_node == True: + self.sync_peers() # def syncstats(self): # for peer_id in self.peers: @@ -986,12 +989,12 @@ class LXMRouter: lxm_size = self.propagation_entries[transient_id][3] return lxm_size - def clean_message_store(self): + RNS.log("Cleaning message store", RNS.LOG_VERBOSE) # Check and remove expired messages now = time.time() removed_entries = {} - for transient_id in self.propagation_entries: + for transient_id in self.propagation_entries.copy(): entry = self.propagation_entries[transient_id] filepath = entry[1] components = filepath.split("_") @@ -999,7 +1002,7 @@ class LXMRouter: if len(components) == 2 and float(components[1]) > 0 and len(os.path.split(components[0])[1]) == (RNS.Identity.HASHLENGTH//8)*2: timestamp = float(components[1]) if now > timestamp+LXMRouter.MESSAGE_EXPIRY: - RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_DEBUG) + RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to expiry", RNS.LOG_EXTREME) removed_entries[transient_id] = filepath else: RNS.log("Purging message "+RNS.prettyhexrep(transient_id)+" due to invalid file path", RNS.LOG_WARNING) @@ -1017,7 +1020,7 @@ class LXMRouter: RNS.log("Could not remove "+RNS.prettyhexrep(transient_id)+" from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) if removed_count > 0: - RNS.log("Cleaned "+str(removed_count)+" entries from the message store", RNS.LOG_DEBUG) + RNS.log("Cleaned "+str(removed_count)+" entries from the message store", RNS.LOG_VERBOSE) # Check size of message store and cull if needed try: @@ -1029,7 +1032,7 @@ class LXMRouter: bytes_cleaned = 0 weighted_entries = [] - for transient_id in self.propagation_entries: + for transient_id in self.propagation_entries.copy(): weighted_entries.append([ self.propagation_entries[transient_id], self.get_weight(transient_id), From a8cc5f41cf92a7e35b80bc2f6b55292ad4cf170d Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 00:21:37 +0100 Subject: [PATCH 137/161] Fixed typo --- LXMF/Utilities/lxmd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index b52d5ae..2f21108 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -540,11 +540,11 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f" {sdp} discovered, {ssp} static") print(f" {available_peers} available, {unreachable_peers} unreachable") print(f"") - print(f"Traffic : {upi} messages received from unpeered nodes ({uprx})") + print(f"Traffic : {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") print(f" {peered_incoming} messages received from peered nodes ({RNS.prettysize(peered_rx_bytes)})") - print(f" {total_incoming} messages received in total ({RNS.prettysize(total_rx_bytes)})") + print(f" {upi} messages received from unpeered nodes ({uprx})") print(f" {peered_outgoing} messages transferred to peered nodes ({RNS.prettysize(peered_tx_bytes)})") - print(f" {cprr} propagation messages received diretly from clients") + print(f" {cprr} propagation messages received directly from clients") print(f" {cprs} propagation messages served to clients") print(f" Distribution factor is {df}") print(f"") From 6d2eb4f97375dc2c637dd531d94a11738d2cb7ed Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 00:26:47 +0100 Subject: [PATCH 138/161] Updated default config --- LXMF/Utilities/lxmd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 2f21108..7f54835 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -127,7 +127,7 @@ def apply_config(): if active_configuration["message_storage_limit"] < 0.005: active_configuration["message_storage_limit"] = 0.005 else: - active_configuration["message_storage_limit"] = 2000 + active_configuration["message_storage_limit"] = 500 if "propagation" in lxmd_config and "propagation_transfer_max_accepted_size" in lxmd_config["propagation"]: active_configuration["propagation_transfer_max_accepted_size"] = lxmd_config["propagation"].as_float("propagation_transfer_max_accepted_size") @@ -679,9 +679,9 @@ propagation_transfer_max_accepted_size = 256 # LXMF prioritises keeping messages that are # new and small. Large and old messages will # be removed first. This setting is optional -# and defaults to 2 gigabytes. +# and defaults to 500 megabytes. -# message_storage_limit = 2000 +# message_storage_limit = 500 # You can tell the LXMF message router to # prioritise storage for one or more From 962d9c90d1c468e95589b15ccaf2384a379dea35 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 13:50:56 +0100 Subject: [PATCH 139/161] Added wanted inbound peers to PN announce data --- LXMF/Handlers.py | 9 +++++++-- LXMF/LXMRouter.py | 10 ++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 22c6cd3..ea8960d 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,6 +45,11 @@ class LXMFPropagationAnnounceHandler: if pn_announce_data_is_valid(data): node_timebase = data[1] propagation_transfer_limit = None + if len(data) >= 4: + try: + wanted_inbound_peers = int(data[3]) + except: + wanted_inbound_peers = None if len(data) >= 3: try: propagation_transfer_limit = float(data[2]) @@ -52,12 +57,12 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit = None if destination_hash in self.lxmrouter.static_peers: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) else: if data[0] == True: if RNS.Transport.hops_to(destination_hash) <= self.lxmrouter.autopeer_maxdepth: - self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit) + self.lxmrouter.peer(destination_hash, node_timebase, propagation_transfer_limit, wanted_inbound_peers) elif data[0] == False: self.lxmrouter.unpeer(destination_hash, node_timebase) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index b0a4cc8..9eccedc 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -283,6 +283,7 @@ class LXMRouter: node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes + self.get_wanted_inbound_peers(), # How many more inbound peers this node wants ] data = msgpack.packb(announce_data) @@ -888,6 +889,10 @@ class LXMRouter: self.save_outbound_stamp_costs() threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() + def get_wanted_inbound_peers(self): + # TODO: Implement + return None + def get_announce_app_data(self, destination_hash): if destination_hash in self.delivery_destinations: delivery_destination = self.delivery_destinations[destination_hash] @@ -1766,7 +1771,7 @@ class LXMRouter: ### Peer Sync & Propagation ########################### ####################################################### - def peer(self, destination_hash, timestamp, propagation_transfer_limit): + def peer(self, destination_hash, timestamp, propagation_transfer_limit, wanted_inbound_peers = None): if destination_hash in self.peers: peer = self.peers[destination_hash] if timestamp > peer.peering_timebase: @@ -1969,7 +1974,8 @@ class LXMRouter: # sane default value, and wait for an announce to arrive # that will update the peering config to the actual limit. propagation_transfer_limit = LXMRouter.PROPAGATION_LIMIT//4 - self.peer(remote_hash, remote_timebase, propagation_transfer_limit) + wanted_inbound_peers = None + self.peer(remote_hash, remote_timebase, propagation_transfer_limit, wanted_inbound_peers) else: remote_str = f"peer {remote_str}" From cec903a4dcc878f14f8cd8be6a9abc54868cbea6 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 24 Jan 2025 14:05:12 +0100 Subject: [PATCH 140/161] Added status query API function --- LXMF/Handlers.py | 1 + LXMF/LXMPeer.py | 1 + LXMF/Utilities/lxmd.py | 60 ++++++++++++++++++++++++------------------ 3 files changed, 37 insertions(+), 25 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index ea8960d..26a5df6 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -45,6 +45,7 @@ class LXMFPropagationAnnounceHandler: if pn_announce_data_is_valid(data): node_timebase = data[1] propagation_transfer_limit = None + wanted_inbound_peers = None if len(data) >= 4: try: wanted_inbound_peers = int(data[3]) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 61602c3..5036528 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -20,6 +20,7 @@ class LXMPeer: ERROR_NO_IDENTITY = 0xf0 ERROR_NO_ACCESS = 0xf1 + ERROR_TIMEOUT = 0xfe # Maximum amount of time a peer can # be unreachable before it is removed diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 7f54835..a06d60c 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -416,6 +416,36 @@ def deferred_start_jobs(): last_node_announce = time.time() threading.Thread(target=jobs, daemon=True).start() +def query_status(identity, timeout=5, exit_on_fail=False): + control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + + timeout = time.time()+timeout + def check_timeout(): + if time.time() > timeout: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) + if exit_on_fail: + exit(200) + else: + return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT + else: + time.sleep(0.1) + + if not RNS.Transport.has_path(control_destination.hash): + RNS.Transport.request_path(control_destination.hash) + while not RNS.Transport.has_path(control_destination.hash): + check_timeout() + + link = RNS.Link(control_destination) + while not link.status == RNS.Link.ACTIVE: + check_timeout() + + link.identify(identity) + request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) + while not request_receipt.get_status() == RNS.RequestReceipt.READY: + check_timeout() + + return request_receipt.get_response() + def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): global configpath, identitypath, storagedir, lxmdir global lxmd_config, active_configuration, targetloglevel @@ -462,31 +492,8 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = targetloglevel = targetloglevel+verbosity-quietness reticulum = RNS.Reticulum(configdir=rnsconfigdir, loglevel=targetloglevel, logdest=targetlogdest) - control_destination = RNS.Destination(identity, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "propagation", "control") + response = query_status(identity, timeout=timeout, exit_on_fail=True) - timeout = time.time()+timeout - def check_timeout(): - if time.time() > timeout: - RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) - exit(200) - else: - time.sleep(0.1) - - if not RNS.Transport.has_path(control_destination.hash): - RNS.Transport.request_path(control_destination.hash) - while not RNS.Transport.has_path(control_destination.hash): - check_timeout() - - link = RNS.Link(control_destination) - while not link.status == RNS.Link.ACTIVE: - check_timeout() - - link.identify(identity) - request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) - while not request_receipt.get_status() == RNS.RequestReceipt.READY: - check_timeout() - - response = request_receipt.get_response() if response == LXMF.LXMPeer.LXMPeer.ERROR_NO_IDENTITY: RNS.log("Remote received no identity") exit(203) @@ -550,6 +557,9 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = print(f"") if show_peers: + if not show_status: + print("") + for peer_id in s["peers"]: ind = " " p = s["peers"][peer_id] @@ -562,7 +572,7 @@ def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = a = "Available" if p["alive"] == True else "Unreachable" h = max(time.time()-p["last_heard"], 0) hops = p["network_distance"] - hs = f"{hops} hop away" if hops == 1 else f"{hops} hops away" + hs = "hops unknown" if hops == RNS.Transport.PATHFINDER_M else f"{hops} hop away" if hops == 1 else f"{hops} hops away" pm = p["messages"] if p["last_sync_attempt"] != 0: lsa = p["last_sync_attempt"] From 26a10cce8f8a572553084c69603ca6605f2672fd Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Sun, 26 Jan 2025 01:13:11 +0100 Subject: [PATCH 141/161] Status query return code --- LXMF/Utilities/lxmd.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index a06d60c..73d0eb0 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -422,8 +422,8 @@ def query_status(identity, timeout=5, exit_on_fail=False): timeout = time.time()+timeout def check_timeout(): if time.time() > timeout: - RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) if exit_on_fail: + RNS.log("Getting lxmd statistics timed out, exiting now", RNS.LOG_ERROR) exit(200) else: return LXMF.LXMPeer.LXMPeer.ERROR_TIMEOUT @@ -433,16 +433,22 @@ def query_status(identity, timeout=5, exit_on_fail=False): if not RNS.Transport.has_path(control_destination.hash): RNS.Transport.request_path(control_destination.hash) while not RNS.Transport.has_path(control_destination.hash): - check_timeout() + tc = check_timeout() + if tc: + return tc link = RNS.Link(control_destination) while not link.status == RNS.Link.ACTIVE: - check_timeout() + tc = check_timeout() + if tc: + return tc link.identify(identity) request_receipt = link.request(LXMF.LXMRouter.STATS_GET_PATH, data=None, response_callback=None, failed_callback=None) while not request_receipt.get_status() == RNS.RequestReceipt.READY: - check_timeout() + tc = check_timeout() + if tc: + return tc return request_receipt.get_response() From e0163e100a5541ed9abf4c57bb38960739ca23ea Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 27 Jan 2025 10:26:11 +0100 Subject: [PATCH 142/161] Updated issue template --- .github/ISSUE_TEMPLATE/πŸ›-bug-report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/πŸ›-bug-report.md b/.github/ISSUE_TEMPLATE/πŸ›-bug-report.md index 77ad6c2..65b492e 100644 --- a/.github/ISSUE_TEMPLATE/πŸ›-bug-report.md +++ b/.github/ISSUE_TEMPLATE/πŸ›-bug-report.md @@ -12,7 +12,7 @@ Before creating a bug report on this issue tracker, you **must** read the [Contr - The issue tracker is used by developers of this project. **Do not use it to ask general questions, or for support requests**. - Ideas and feature requests can be made on the [Discussions](https://github.com/markqvist/Reticulum/discussions). **Only** feature requests accepted by maintainers and developers are tracked and included on the issue tracker. **Do not post feature requests here**. -- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), delete this section from your bug report. +- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), **delete this section only** (*"Read the Contribution Guidelines"*) from your bug report, **and fill in all the other sections**. **Describe the Bug** A clear and concise description of what the bug is. From 886ac69a8284e8ca3c3c0a4e2106f7160e8c7f62 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 27 Jan 2025 12:04:05 +0100 Subject: [PATCH 143/161] Tear down control link after use --- LXMF/Utilities/lxmd.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index 73d0eb0..f9a2ef6 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -450,6 +450,7 @@ def query_status(identity, timeout=5, exit_on_fail=False): if tc: return tc + link.teardown() return request_receipt.get_response() def get_status(configdir = None, rnsconfigdir = None, verbosity = 0, quietness = 0, timeout=5, show_status=False, show_peers=False, identity_path=None): From e0e901291e20515d12abe105fef30010db7fb1f1 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Mon, 27 Jan 2025 12:04:16 +0100 Subject: [PATCH 144/161] Updated logging --- LXMF/LXMessage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMessage.py b/LXMF/LXMessage.py index 2342708..515ab11 100644 --- a/LXMF/LXMessage.py +++ b/LXMF/LXMessage.py @@ -380,7 +380,7 @@ class LXMessage: if self.desired_method == LXMessage.OPPORTUNISTIC: if self.__destination.type == RNS.Destination.SINGLE: if content_size > LXMessage.ENCRYPTED_PACKET_MAX_CONTENT: - RNS.log(f"Opportunistic delivery was requested for {self}, but content exceeds packet size limit. Falling back to link-based delivery.", RNS.LOG_DEBUG) + RNS.log(f"Opportunistic delivery was requested for {self}, but content of length {content_size} exceeds packet size limit. Falling back to link-based delivery.", RNS.LOG_DEBUG) self.desired_method = LXMessage.DIRECT # Set delivery parameters according to delivery method From f1d060a92ef9ea9b0a0f3402ff46fc9d91fddd5c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Jan 2025 01:26:36 +0100 Subject: [PATCH 145/161] Added peer rotation --- LXMF/LXMPeer.py | 4 +++ LXMF/LXMRouter.py | 68 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index 5036528..e2f951a 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -469,6 +469,10 @@ class LXMPeer: return self._um_count + @property + def acceptance_rate(self): + return 0 if self.offered == 0 else (self.outgoing/self.offered) + def _update_counts(self): if not self._hm_counts_synced: hm = self.handled_messages; del hm diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 9eccedc..4bbd24c 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -41,6 +41,7 @@ class LXMRouter: AUTOPEER = True AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 + ROTATION_HEADROOM_PCT = 10 PROPAGATION_LIMIT = 256 DELIVERY_LIMIT = 1000 @@ -122,6 +123,7 @@ class LXMRouter: self.propagation_transfer_progress = 0.0 self.propagation_transfer_last_result = None self.propagation_transfer_max_messages = None + self.prioritise_rotating_unreachable_peers = False self.active_propagation_links = [] self.locally_delivered_transient_ids = {} self.locally_processed_transient_ids = {} @@ -783,17 +785,13 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: if self.propagation_node == True: + self.rotate_peers() self.flush_queues() if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: if self.propagation_node == True: self.sync_peers() - # def syncstats(self): - # for peer_id in self.peers: - # p = self.peers[peer_id] - # RNS.log(f"{RNS.prettyhexrep(peer_id)} O={p.offered} S={p.outgoing} I={p.incoming} TX={RNS.prettysize(p.tx_bytes)} RX={RNS.prettysize(p.rx_bytes)}") - def jobloop(self): while (True): # TODO: Improve this to scheduling, so manual @@ -1805,6 +1803,66 @@ class LXMRouter: self.peers.pop(destination_hash) RNS.log("Broke peering with "+str(peer.destination)) + def rotate_peers(self): + try: + rotation_headroom = max(1, math.floor(self.max_peers*(LXMRouter.ROTATION_HEADROOM_PCT/100.0))) + required_drops = len(self.peers) - (self.max_peers - rotation_headroom) + if required_drops > 0 and len(self.peers) - required_drops > 1: + peers = self.peers.copy() + untested_peers = [] + for peer_id in self.peers: + peer = self.peers[peer_id] + if peer.last_sync_attempt == 0: + untested_peers.append(peer) + + if len(untested_peers) >= rotation_headroom: + RNS.log("Newly added peer threshold reached, postponing peer rotation", RNS.LOG_DEBUG) + return + + culled_peers = [] + waiting_peers = [] + unresponsive_peers = [] + for peer_id in peers: + peer = peers[peer_id] + if not peer_id in self.static_peers and peer.state == LXMPeer.IDLE: + if peer.alive: + if peer.offered == 0: + # Don't consider for unpeering until at + # least one message has been offered + pass + else: + waiting_peers.append(peer) + else: + unresponsive_peers.append(peer) + + drop_pool = [] + if len(unresponsive_peers) > 0: + drop_pool.extend(unresponsive_peers) + if not self.prioritise_rotating_unreachable_peers: + drop_pool.extend(waiting_peers) + + else: + drop_pool.extend(waiting_peers) + + if len(drop_pool) > 0: + drop_count = min(required_drops, len(drop_pool)) + low_acceptance_rate_peers = sorted( + drop_pool, + key=lambda p: ( 0 if p.offered == 0 else (p.outgoing/p.offered) ), + reverse=False + )[0:drop_count] + + ms = "" if len(low_acceptance_rate_peers) == 1 else "s" + RNS.log(f"Dropping {len(low_acceptance_rate_peers)} lowest acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) + for peer in low_acceptance_rate_peers: + ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) + RNS.log(f"Acceptance rate for {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing} / {peer.offered})", RNS.LOG_DEBUG) + self.unpeer(peer.destination_hash) + + except Exception as e: + RNS.log(f"An error occurred during peer rotation: {e}", RNS.LOG_ERROR) + RNS.trace_exception(e) + def sync_peers(self): culled_peers = [] waiting_peers = [] From 40fc75f5593aee19563ed9403170c7b1c938e7fd Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Jan 2025 14:24:09 +0100 Subject: [PATCH 146/161] Refined peer rotation algorithm --- LXMF/LXMRouter.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 4bbd24c..1d8f50e 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -763,6 +763,7 @@ class LXMRouter: JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL + JOB_ROTATE_INTERVAL = 675 def jobs(self): if not self.exit_handler_running: self.processing_count += 1 @@ -785,9 +786,12 @@ class LXMRouter: if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0: if self.propagation_node == True: - self.rotate_peers() self.flush_queues() + if self.processing_count % LXMRouter.JOB_ROTATE_INTERVAL == 0: + if self.propagation_node == True: + self.rotate_peers() + if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0: if self.propagation_node == True: self.sync_peers() @@ -1819,6 +1823,17 @@ class LXMRouter: RNS.log("Newly added peer threshold reached, postponing peer rotation", RNS.LOG_DEBUG) return + fully_synced_peers = {} + for peer_id in peers: + peer = peers[peer_id] + if peer.unhandled_message_count == 0: + fully_synced_peers[peer_id] = peer + + if len(fully_synced_peers) > 0: + peers = fully_synced_peers + ms = "" if len(fully_synced_peers) == 1 else "s" + RNS.log(f"Found {len(fully_synced_peers)} fully synced peer{ms}, using as peer rotation pool basis", RNS.LOG_DEBUG) + culled_peers = [] waiting_peers = [] unresponsive_peers = [] @@ -1856,7 +1871,8 @@ class LXMRouter: RNS.log(f"Dropping {len(low_acceptance_rate_peers)} lowest acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) for peer in low_acceptance_rate_peers: ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) - RNS.log(f"Acceptance rate for {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing} / {peer.offered})", RNS.LOG_DEBUG) + reachable_str = "reachable" if peer.alive else "unreachable" + RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) self.unpeer(peer.destination_hash) except Exception as e: From 40d0b9a5deca5fb054946dfcf37d2442bdac4469 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Wed, 29 Jan 2025 21:21:51 +0100 Subject: [PATCH 147/161] Added acceptance rate threshold to peer rotation --- LXMF/LXMRouter.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 1d8f50e..df340d2 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -42,6 +42,7 @@ class LXMRouter: AUTOPEER_MAXDEPTH = 4 FASTEST_N_RANDOM_POOL = 2 ROTATION_HEADROOM_PCT = 10 + ROTATION_AR_MAX = 0.5 PROPAGATION_LIMIT = 256 DELIVERY_LIMIT = 1000 @@ -1867,13 +1868,16 @@ class LXMRouter: reverse=False )[0:drop_count] - ms = "" if len(low_acceptance_rate_peers) == 1 else "s" - RNS.log(f"Dropping {len(low_acceptance_rate_peers)} lowest acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) + dropped_peers = 0 for peer in low_acceptance_rate_peers: ar = 0 if peer.offered == 0 else round((peer.outgoing/peer.offered)*100, 2) - reachable_str = "reachable" if peer.alive else "unreachable" - RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) - self.unpeer(peer.destination_hash) + if ar < LXMRouter.ROTATION_AR_MAX*100: + reachable_str = "reachable" if peer.alive else "unreachable" + RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) + self.unpeer(peer.destination_hash) + + ms = "" if dropped_peers == 1 else "s" + RNS.log(f"Dropped {dropped_peers} low acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) except Exception as e: RNS.log(f"An error occurred during peer rotation: {e}", RNS.LOG_ERROR) From b7b67536400e768658dcc9cf63406ccf9baba468 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 00:37:50 +0100 Subject: [PATCH 148/161] Fixed potential division by zero. Fixes #25. --- LXMF/LXMRouter.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index df340d2..7bb44a5 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -507,8 +507,10 @@ class LXMRouter: except Exception as e: RNS.log("Could not read LXM from message store. The contained exception was: "+str(e), RNS.LOG_ERROR) - et = time.time(); RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {math.floor(len(self.propagation_entries)/(et-st))} msgs/s", RNS.LOG_NOTICE) - st = time.time(); RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) + et = time.time(); mps = 0 if et-st == 0 else math.floor(len(self.propagation_entries)/(et-st)) + RNS.log(f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {mps} msgs/s", RNS.LOG_NOTICE) + RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE) + st = time.time(); if os.path.isfile(self.storagepath+"/peers"): peers_file = open(self.storagepath+"/peers", "rb") @@ -1875,6 +1877,7 @@ class LXMRouter: reachable_str = "reachable" if peer.alive else "unreachable" RNS.log(f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)", RNS.LOG_DEBUG) self.unpeer(peer.destination_hash) + dropped_peers += 1 ms = "" if dropped_peers == 1 else "s" RNS.log(f"Dropped {dropped_peers} low acceptance rate peer{ms} to increase peering headroom", RNS.LOG_DEBUG) From 9eca747757933d283922923c3b598d68a32f7902 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 10:46:31 +0100 Subject: [PATCH 149/161] Updated peer rotation timing to align with distribution queue mapping --- LXMF/LXMRouter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 7bb44a5..a364a12 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -766,7 +766,7 @@ class LXMRouter: JOB_STORE_INTERVAL = 120 JOB_PEERSYNC_INTERVAL = 12 JOB_PEERINGEST_INTERVAL= JOB_PEERSYNC_INTERVAL - JOB_ROTATE_INTERVAL = 675 + JOB_ROTATE_INTERVAL = 56*JOB_PEERINGEST_INTERVAL def jobs(self): if not self.exit_handler_running: self.processing_count += 1 From f42ccfc4e93b9a85ed32a6ebc3b5f3ed21a24b49 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 11:23:18 +0100 Subject: [PATCH 150/161] Automatically clean messages exceeding propagation transfer limit for peer from unhandled message queues --- LXMF/LXMPeer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index e2f951a..a777b86 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -250,7 +250,10 @@ class LXMPeer: lxm_size = unhandled_entry[2] next_size = cumulative_size + (lxm_size+per_message_overhead) if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): - pass + if lxm_size+per_message_overhead > self.propagation_transfer_limit: + RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) + self.remove_unhandled_message(transient_id) + self.add_handled_message(transient_id) else: cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) From b94a712bb626e83755fb54694a2e6a30690957f8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 11:30:45 +0100 Subject: [PATCH 151/161] Automatically clean messages exceeding propagation transfer limit for peer from unhandled message queues --- LXMF/LXMPeer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a777b86..a2b6b18 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -250,7 +250,7 @@ class LXMPeer: lxm_size = unhandled_entry[2] next_size = cumulative_size + (lxm_size+per_message_overhead) if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): - if lxm_size+per_message_overhead > self.propagation_transfer_limit: + if lxm_size+per_message_overhead > (self.propagation_transfer_limit*1000): RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) self.remove_unhandled_message(transient_id) self.add_handled_message(transient_id) From 7b4780cfb7537ccc114c9a0d99da7dc6928eb113 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 11:36:11 +0100 Subject: [PATCH 152/161] Automatically clean messages exceeding propagation transfer limit for peer from unhandled message queues --- LXMF/LXMPeer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/LXMPeer.py b/LXMF/LXMPeer.py index a2b6b18..c1294bd 100644 --- a/LXMF/LXMPeer.py +++ b/LXMF/LXMPeer.py @@ -251,9 +251,9 @@ class LXMPeer: next_size = cumulative_size + (lxm_size+per_message_overhead) if self.propagation_transfer_limit != None and next_size > (self.propagation_transfer_limit*1000): if lxm_size+per_message_overhead > (self.propagation_transfer_limit*1000): - RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) self.remove_unhandled_message(transient_id) self.add_handled_message(transient_id) + RNS.log(f"Message {RNS.prettyhexrep(transient_id)} exceeds transfer limit for {self}, considering handled", RNS.LOG_DEBUG) else: cumulative_size += (lxm_size+per_message_overhead) unhandled_ids.append(transient_id) From c0a8f3be498514611ccb7e44925bf78afd5d71ac Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 15:04:21 +0100 Subject: [PATCH 153/161] Cleanup --- LXMF/Handlers.py | 10 ++++++---- LXMF/LXMRouter.py | 5 +++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 26a5df6..9876f4c 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -47,10 +47,12 @@ class LXMFPropagationAnnounceHandler: propagation_transfer_limit = None wanted_inbound_peers = None if len(data) >= 4: - try: - wanted_inbound_peers = int(data[3]) - except: - wanted_inbound_peers = None + # TODO: Rethink, probably not necessary anymore + # try: + # wanted_inbound_peers = int(data[3]) + # except: + # wanted_inbound_peers = None + if len(data) >= 3: try: propagation_transfer_limit = float(data[2]) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index a364a12..070dc71 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -286,7 +286,7 @@ class LXMRouter: node_state, # Boolean flag signalling propagation node state int(time.time()), # Current node timebase self.propagation_per_transfer_limit, # Per-transfer limit for message propagation in kilobytes - self.get_wanted_inbound_peers(), # How many more inbound peers this node wants + None, # How many more inbound peers this node wants ] data = msgpack.packb(announce_data) @@ -895,7 +895,8 @@ class LXMRouter: threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start() def get_wanted_inbound_peers(self): - # TODO: Implement + # TODO: Implement/rethink. + # Probably not necessary anymore. return None def get_announce_app_data(self, destination_hash): From a6cf585109a354554fb223394db3405ed0b9510c Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 30 Jan 2025 15:11:26 +0100 Subject: [PATCH 154/161] Cleanup --- LXMF/Handlers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/LXMF/Handlers.py b/LXMF/Handlers.py index 9876f4c..0705074 100644 --- a/LXMF/Handlers.py +++ b/LXMF/Handlers.py @@ -52,6 +52,7 @@ class LXMFPropagationAnnounceHandler: # wanted_inbound_peers = int(data[3]) # except: # wanted_inbound_peers = None + pass if len(data) >= 3: try: From d5540b927fd96d171a096e85efac64a3de921d37 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 31 Jan 2025 13:38:56 +0100 Subject: [PATCH 155/161] Added allow_duplicate option to message ingest API --- LXMF/LXMRouter.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index 070dc71..d631841 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -1618,7 +1618,7 @@ class LXMRouter: ### Message Routing & Delivery ######################## ####################################################### - def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None, no_stamp_enforcement=False): + def lxmf_delivery(self, lxmf_data, destination_type = None, phy_stats = None, ratchet_id = None, method = None, no_stamp_enforcement=False, allow_duplicate=False): try: message = LXMessage.unpack_from_bytes(lxmf_data) if ratchet_id and not message.ratchet_id: @@ -1685,7 +1685,7 @@ class LXMRouter: RNS.log(str(self)+" ignored message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) return False - if self.has_message(message.hash): + if not allow_duplicate and self.has_message(message.hash): RNS.log(str(self)+" ignored already received message from "+RNS.prettyhexrep(message.source_hash), RNS.LOG_DEBUG) return False else: @@ -2107,7 +2107,7 @@ class LXMRouter: if peer != from_peer: peer.queue_unhandled_message(transient_id) - def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, is_paper_message=False, from_peer=None): + def lxmf_propagation(self, lxmf_data, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False, is_paper_message=False, from_peer=None): no_stamp_enforcement = False if is_paper_message: no_stamp_enforcement = True @@ -2116,9 +2116,10 @@ class LXMRouter: if len(lxmf_data) >= LXMessage.LXMF_OVERHEAD: transient_id = RNS.Identity.full_hash(lxmf_data) - if not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids: + if (not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids) or allow_duplicate == True: received = time.time() destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] + RNS.log("GOT MESSAGE FOR "+RNS.prettyhexrep(destination_hash)) self.locally_processed_transient_ids[transient_id] = received @@ -2128,7 +2129,7 @@ class LXMRouter: decrypted_lxmf_data = delivery_destination.decrypt(encrypted_lxmf_data) if decrypted_lxmf_data != None: delivery_data = lxmf_data[:LXMessage.DESTINATION_LENGTH]+decrypted_lxmf_data - self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED, no_stamp_enforcement=no_stamp_enforcement) + self.lxmf_delivery(delivery_data, delivery_destination.type, ratchet_id=delivery_destination.latest_ratchet_id, method=LXMessage.PROPAGATED, no_stamp_enforcement=no_stamp_enforcement, allow_duplicate=allow_duplicate) self.locally_delivered_transient_ids[transient_id] = time.time() if signal_local_delivery != None: @@ -2166,7 +2167,7 @@ class LXMRouter: RNS.trace_exception(e) return False - def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None): + def ingest_lxm_uri(self, uri, signal_local_delivery=None, signal_duplicate=None, allow_duplicate=False): try: if not uri.lower().startswith(LXMessage.URI_SCHEMA+"://"): RNS.log("Cannot ingest LXM, invalid URI provided.", RNS.LOG_ERROR) @@ -2176,7 +2177,7 @@ class LXMRouter: lxmf_data = base64.urlsafe_b64decode(uri.replace(LXMessage.URI_SCHEMA+"://", "").replace("/", "")+"==") transient_id = RNS.Identity.full_hash(lxmf_data) - router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate, is_paper_message=True) + router_propagation_result = self.lxmf_propagation(lxmf_data, signal_local_delivery=signal_local_delivery, signal_duplicate=signal_duplicate, allow_duplicate=allow_duplicate, is_paper_message=True) if router_propagation_result != False: RNS.log("LXM with transient ID "+RNS.prettyhexrep(transient_id)+" was ingested.", RNS.LOG_DEBUG) return router_propagation_result From 1ef46650738f4ddc67579080d2eab60c9affcfa8 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 18 Feb 2025 20:05:19 +0100 Subject: [PATCH 156/161] Cleanup --- LXMF/LXMRouter.py | 1 - requirements.txt | 5 ++--- setup.py | 4 ++-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/LXMF/LXMRouter.py b/LXMF/LXMRouter.py index d631841..d502ee6 100644 --- a/LXMF/LXMRouter.py +++ b/LXMF/LXMRouter.py @@ -2119,7 +2119,6 @@ class LXMRouter: if (not transient_id in self.propagation_entries and not transient_id in self.locally_processed_transient_ids) or allow_duplicate == True: received = time.time() destination_hash = lxmf_data[:LXMessage.DESTINATION_LENGTH] - RNS.log("GOT MESSAGE FOR "+RNS.prettyhexrep(destination_hash)) self.locally_processed_transient_ids[transient_id] = received diff --git a/requirements.txt b/requirements.txt index 6b7926a..2f4f642 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,2 @@ -qrcode==7.4.2 -rns==0.7.8 -setuptools==70.0.0 +qrcode>=7.4.2 +rns>=0.9.1 diff --git a/setup.py b/setup.py index cabf20a..e01e9eb 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=['rns>=0.9.1'], - python_requires='>=3.7', + install_requires=["rns>=0.9.2"], + python_requires=">=3.7", ) From 570d2c68467e0614609df4dc89d295793e2a4878 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Fri, 7 Mar 2025 11:05:50 +0100 Subject: [PATCH 157/161] Added configuration options to default config file --- LXMF/Utilities/lxmd.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/LXMF/Utilities/lxmd.py b/LXMF/Utilities/lxmd.py index f9a2ef6..48885b2 100644 --- a/LXMF/Utilities/lxmd.py +++ b/LXMF/Utilities/lxmd.py @@ -710,6 +710,25 @@ propagation_transfer_max_accepted_size = 256 # prioritise_destinations = 41d20c727598a3fbbdf9106133a3a0ed, d924b81822ca24e68e2effea99bcb8cf +# You can configure the maximum number of other +# propagation nodes that this node will peer +# with automatically. The default is 50. + +# max_peers = 25 + +# You can configure a list of static propagation +# node peers, that this node will always be +# peered with, by specifying a list of +# destination hashes. + +# static_peers = e17f833c4ddf8890dd3a79a6fea8161d, 5a2d0029b6e5ec87020abaea0d746da4 + +# You can configure the propagation node to +# only accept incoming propagation messages +# from configured static peers. + +# from_static_only = True + # By default, any destination is allowed to # connect and download messages, but you can # optionally restrict this. If you enable From 336792c07a47ca60dcf2df1295b8065f0b458a22 Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 13 Mar 2025 19:45:15 +0100 Subject: [PATCH 158/161] Updated dependencies --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e01e9eb..19737d6 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setuptools.setup( 'lxmd=LXMF.Utilities.lxmd:main', ] }, - install_requires=["rns>=0.9.2"], + install_requires=["rns>=0.9.3"], python_requires=">=3.7", ) From 326c0eed8f767e298eb3f09340a41314c254ddda Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Thu, 13 Mar 2025 19:46:11 +0100 Subject: [PATCH 159/161] Updated version --- LXMF/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LXMF/_version.py b/LXMF/_version.py index 22049ab..63af887 100644 --- a/LXMF/_version.py +++ b/LXMF/_version.py @@ -1 +1 @@ -__version__ = "0.6.2" +__version__ = "0.6.3" From e6021b8fed0ffcffcc6a7b9bbdb7d0d1a282ba8f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 15 Apr 2025 20:21:16 +0200 Subject: [PATCH 160/161] Updated license --- LICENSE | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/LICENSE b/LICENSE index a25bd7a..eda392f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ -MIT License +Reticulum License -Copyright (c) 2020 Mark Qvist / unsigned.io +Copyright (c) 2025 Mark Qvist / unsigned.io Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,8 +9,16 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +- The Software shall not be used in any kind of system which includes amongst + its functions the ability to purposefully do harm to human beings. + +- The Software shall not be used, directly or indirectly, in the creation of + an artificial intelligence, machine learning or language model training + dataset, including but not limited to any use that contributes to the + training or development of such a model or algorithm. + +- The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, From 1bdcf6ad534c3d34e4966d23bb04a129fd5ebc4f Mon Sep 17 00:00:00 2001 From: Mark Qvist Date: Tue, 15 Apr 2025 20:21:54 +0200 Subject: [PATCH 161/161] Updated license --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index eda392f..f5fb92d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ Reticulum License -Copyright (c) 2025 Mark Qvist / unsigned.io +Copyright (c) 2020-2025 Mark Qvist Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal