Compare commits

..

No commits in common. "master" and "0.5.6" have entirely different histories.

10 changed files with 2613 additions and 234 deletions

View File

@ -12,7 +12,7 @@ Before creating a bug report on this issue tracker, you **must** read the [Contr
- The issue tracker is used by developers of this project. **Do not use it to ask general questions, or for support requests**.
- Ideas and feature requests can be made on the [Discussions](https://github.com/markqvist/Reticulum/discussions). **Only** feature requests accepted by maintainers and developers are tracked and included on the issue tracker. **Do not post feature requests here**.
- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), **delete this section only** (*"Read the Contribution Guidelines"*) from your bug report, **and fill in all the other sections**.
- After reading the [Contribution Guidelines](https://github.com/markqvist/Reticulum/blob/master/Contributing.md), delete this section from your bug report.
**Describe the Bug**
A clear and concise description of what the bug is.

View File

@ -16,6 +16,10 @@ ENV PATH="/home/myuser/.local/bin:${PATH}"
################### BEGIN NomadNet ###########################################
COPY --chown=myuser:myuser requirements.txt requirements.txt
RUN pip install --user -r requirements.txt
COPY --chown=myuser:myuser . .

View File

@ -3,7 +3,6 @@ import RNS
import LXMF
import time
import nomadnet
import threading
import RNS.vendor.umsgpack as msgpack
class PNAnnounceHandler:
@ -30,7 +29,7 @@ class PNAnnounceHandler:
RNS.log("The contained exception was: "+str(e), RNS.LOG_DEBUG)
class Directory:
ANNOUNCE_STREAM_MAXLENGTH = 256
ANNOUNCE_STREAM_MAXLENGTH = 64
aspect_filter = "nomadnetwork.node"
@staticmethod
@ -56,7 +55,6 @@ class Directory:
self.directory_entries = {}
self.announce_stream = []
self.app = app
self.announce_lock = threading.Lock()
self.load_from_disk()
self.pn_announce_handler = PNAnnounceHandler(self)
@ -126,7 +124,6 @@ class Directory:
RNS.log("Could not load directory from disk. The contained exception was: "+str(e), RNS.LOG_ERROR)
def lxmf_announce_received(self, source_hash, app_data):
with self.announce_lock:
if app_data != None:
if self.app.compact_stream:
try:
@ -146,12 +143,10 @@ class Directory:
while len(self.announce_stream) > Directory.ANNOUNCE_STREAM_MAXLENGTH:
self.announce_stream.pop()
if hasattr(self.app, "ui") and self.app.ui != None:
if hasattr(self.app.ui, "main_display"):
self.app.ui.main_display.sub_displays.network_display.directory_change_callback()
def node_announce_received(self, source_hash, app_data, associated_peer):
with self.announce_lock:
if app_data != None:
if self.app.compact_stream:
try:
@ -181,7 +176,6 @@ class Directory:
self.app.ui.main_display.sub_displays.network_display.directory_change_callback()
def pn_announce_received(self, source_hash, app_data, associated_peer, associated_node):
with self.announce_lock:
found_node = None
for sh in self.directory_entries:
if sh == associated_node:
@ -212,7 +206,7 @@ class Directory:
while len(self.announce_stream) > Directory.ANNOUNCE_STREAM_MAXLENGTH:
self.announce_stream.pop()
if hasattr(self.app, "ui") and hasattr(self.app.ui, "main_display"):
if hasattr(self.app.ui, "main_display"):
self.app.ui.main_display.sub_displays.network_display.directory_change_callback()
def remove_announce_with_timestamp(self, timestamp):
@ -345,7 +339,7 @@ class Directory:
if e.hosts_node:
node_list.append(e)
node_list.sort(key = lambda e: (e.sort_rank if e.sort_rank != None else 2^32, DirectoryEntry.TRUSTED-e.trust_level, e.display_name if e.display_name != None else "_"))
node_list.sort(key = lambda e: (e.sort_rank if e.sort_rank != None else 2^32, DirectoryEntry.TRUSTED-e.trust_level, e.display_name))
return node_list
def number_of_known_nodes(self):

View File

@ -1,6 +1,4 @@
import os
import sys
import RNS
import time
import threading
@ -161,7 +159,7 @@ class Node:
try:
if request_allowed:
RNS.log("Serving page: "+file_path, RNS.LOG_VERBOSE)
if not RNS.vendor.platformutils.is_windows() and os.access(file_path, os.X_OK):
if os.access(file_path, os.X_OK):
env_map = {}
if "PATH" in os.environ:
env_map["PATH"] = os.environ["PATH"]

View File

@ -19,7 +19,7 @@ from datetime import datetime
import RNS.vendor.umsgpack as msgpack
from ._version import __version__
from RNS.vendor.configobj import ConfigObj
from .vendor.configobj import ConfigObj
class NomadNetworkApp:
time_format = "%Y-%m-%d %H:%M:%S"
@ -290,20 +290,9 @@ class NomadNetworkApp:
self.directory = nomadnet.Directory(self)
static_peers = []
for static_peer in self.static_peers:
try:
dh = bytes.fromhex(static_peer)
if len(dh) != RNS.Reticulum.TRUNCATED_HASHLENGTH//8:
raise ValueError("Invalid destination length")
static_peers.append(dh)
except Exception as e:
RNS.log(f"Could not decode static peer destination hash {static_peer}: {e}", RNS.LOG_ERROR)
self.message_router = LXMF.LXMRouter(
identity = self.identity, storagepath = self.storagepath, autopeer = True,
propagation_limit = self.lxmf_max_propagation_size, delivery_limit = self.lxmf_max_incoming_size,
max_peers = self.max_peers, static_peers = static_peers,
)
self.message_router.register_delivery_callback(self.lxmf_delivery)
@ -537,7 +526,7 @@ class NomadNetworkApp:
RNS.log("Could not autoselect a propagation node! LXMF propagation will not be available until a trusted node announces on the network, or a propagation node is manually selected.", RNS.LOG_WARNING)
else:
pn_name_str = ""
RNS.log("Selecting "+RNS.prettyhexrep(selected_node)+pn_name_str+" as default LXMF propagation node", RNS.LOG_DEBUG)
RNS.log("Selecting "+RNS.prettyhexrep(selected_node)+pn_name_str+" as default LXMF propagation node", RNS.LOG_INFO)
self.message_router.set_outbound_propagation_node(selected_node)
def get_user_selected_propagation_node(self):
@ -931,19 +920,6 @@ class NomadNetworkApp:
else:
self.prioritised_lxmf_destinations = []
if "static_peers" in self.config["node"]:
self.static_peers = self.config["node"].as_list("static_peers")
else:
self.static_peers = []
if not "max_peers" in self.config["node"]:
self.max_peers = None
else:
value = self.config["node"].as_int("max_peers")
if value < 0:
value = 0
self.max_peers = value
if not "message_storage_limit" in self.config["node"]:
self.message_storage_limit = 2000
else:
@ -1160,15 +1136,13 @@ announce_interval = 360
# Whether to announce when the node starts.
announce_at_start = Yes
# When Nomad Network is hosting a page-serving
# node, it can also act as an LXMF propagation
# By default, when Nomad Network is hosting a
# node, it will also act as an LXMF propagation
# node. If there is already a large amount of
# propagation nodes on the network, or you
# simply want to run a pageserving-only node,
# you can disable running a propagation node.
# Due to lots of propagation nodes being
# available, this is currently the default.
disable_propagation = Yes
# disable_propagation = False
# The maximum amount of storage to use for
# the LXMF Propagation Node message store,
@ -1202,16 +1176,9 @@ max_transfer_size = 256
# and generally you do not need to use it.
# prioritise_destinations = 41d20c727598a3fbbdf9106133a3a0ed, d924b81822ca24e68e2effea99bcb8cf
# You can configure the maximum number of other
# propagation nodes that this node will peer
# with automatically. The default is 50.
# max_peers = 25
# You can configure a list of static propagation
# node peers, that this node will always be
# peered with, by specifying a list of
# destination hashes.
# static_peers = e17f833c4ddf8890dd3a79a6fea8161d, 5a2d0029b6e5ec87020abaea0d746da4
# Automatic rescan interval of the pages directory in minutes.
# Default: int = 0 (no rescan)
page_refresh_interval = 0
# You can specify the interval in minutes for
# rescanning the hosted pages path. By default,

View File

@ -1 +1 @@
__version__ = "0.6.2"
__version__ = "0.5.6"

View File

@ -666,9 +666,9 @@ Determines the interval in minutes for rescanning the hosted files path. By defa
<
>>>
`!disable_propagation = yes`!
`!disable_propagation = no`!
>>>>
When Nomad Network is hosting a node, it can also run an LXMF propagation node. If there is already a large amount of propagation nodes on the network, or you simply want to run a pageserving-only node, you can disable running a propagation node.
By default, when Nomad Network is hosting a node, it will also run an LXMF propagation node. If there is already a large amount of propagation nodes on the network, or you simply want to run a pageserving-only node, you can disable running a propagation node.
<
>>>
@ -689,18 +689,6 @@ The maximum accepted transfer size per incoming propagation transfer, in kilobyt
Configures the LXMF Propagation Node to prioritise storing messages for certain destinations. If the message store reaches the specified limit, LXMF will prioritise keeping messages for destinations specified with this option. This setting is optional, and generally you do not need to use it.
<
>>>
`!max_peers = 25`!
>>>>
Configures the maximum number of other nodes the LXMF Propagation Node will automatically peer with. The default is 50, but can be lowered or increased according to available resources.
<
>>>
`!static_peers = e17f833c4ddf8890dd3a79a6fea8161d, 5a2d0029b6e5ec87020abaea0d746da4`!
>>>>
Configures the LXMF Propagation Node to always maintain propagation node peering with the specified list of destination hashes.
<
>> Printing Section
This section holds configuration directives related to printing. It is delimited by the `![printing]`! header in the configuration file. Available directives, along with example values, are as follows:

View File

@ -368,10 +368,6 @@ class AnnounceStreamEntry(urwid.WidgetWrap):
def timestamp(self):
return self.timestamp
class TabButton(urwid.Button):
button_left = urwid.Text("[")
button_right = urwid.Text("]")
class AnnounceStream(urwid.WidgetWrap):
def __init__(self, app, parent):
self.app = app
@ -380,24 +376,11 @@ class AnnounceStream(urwid.WidgetWrap):
self.timeout = self.app.config["textui"]["animation_interval"]*2
self.ilb = None
self.no_content = True
self.current_tab = "nodes"
self.added_entries = []
self.widget_list = []
self.update_widget_list()
# Create tab buttons
self.tab_nodes = TabButton("Nodes", on_press=self.show_nodes_tab)
self.tab_peers = TabButton("Peers", on_press=self.show_peers_tab)
self.tab_pn = TabButton("Propagation Nodes", on_press=self.show_pn_tab)
# Create tab bar with proportional widths
self.tab_bar = urwid.Columns([
('weight', 1, self.tab_nodes),
('weight', 1, self.tab_peers),
('weight', 3, self.tab_pn),
], dividechars=1) # Add 1 character spacing between tabs
self.ilb = ExceptionHandlingListBox(
self.widget_list,
on_selection_change=self.list_selection,
@ -406,13 +389,7 @@ class AnnounceStream(urwid.WidgetWrap):
#highlight_offFocus="list_off_focus"
)
# Combine tab bar and list box
self.pile = urwid.Pile([
('pack', self.tab_bar),
('weight', 1, self.ilb),
])
self.display_widget = self.pile
self.display_widget = self.ilb
super().__init__(urwid.LineBox(self.display_widget, title="Announce Stream"))
def keypress(self, size, key):
@ -435,45 +412,28 @@ class AnnounceStream(urwid.WidgetWrap):
self.update_widget_list()
def update_widget_list(self):
self.widget_list = []
new_entries = []
for e in self.app.directory.announce_stream:
announce_type = e[3]
# Filter based on current tab
if self.current_tab == "nodes" and (announce_type == "node" or announce_type == True):
new_entries.append(e)
elif self.current_tab == "peers" and (announce_type == "peer" or announce_type == False):
new_entries.append(e)
elif self.current_tab == "pn" and announce_type == "pn":
new_entries.append(e)
if not e[0] in self.added_entries:
self.added_entries.insert(0, e[0])
new_entries.insert(0, e)
for e in new_entries:
nw = AnnounceStreamEntry(self.app, e, self)
nw.timestamp = e[0]
self.widget_list.append(nw)
self.widget_list.insert(0, nw)
if len(new_entries) > 0:
self.no_content = False
if self.ilb != None:
self.ilb.set_body(self.widget_list)
else:
if len(self.widget_list) == 0:
self.no_content = True
self.widget_list = [urwid.Text(f"No {self.current_tab} announces", align='center')]
if self.ilb:
if self.ilb != None:
self.ilb.set_body(self.widget_list)
def show_nodes_tab(self, button):
self.current_tab = "nodes"
self.update_widget_list()
def show_peers_tab(self, button):
self.current_tab = "peers"
self.update_widget_list()
def show_pn_tab(self, button):
self.current_tab = "pn"
self.update_widget_list()
def list_selection(self, arg1, arg2):
pass
@ -1634,10 +1594,10 @@ class NetworkDisplay():
if self.list_display == 1:
parent = self.app.ui.main_display.sub_displays.network_display
selected_node_entry = parent.known_nodes_display.ilb.get_selected_item()
if selected_node_entry is not None:
selected_node_hash = selected_node_entry.base_widget.display_widget.source_hash
if selected_node_entry != None:
selected_node_hash = selected_node_entry._get_base_widget().display_widget.source_hash
if selected_node_hash is not None:
if selected_node_hash != None:
info_widget = KnownNodeInfo(selected_node_hash)
options = parent.left_pile.options(height_type=urwid.WEIGHT, height_amount=1)
parent.left_pile.contents[0] = (info_widget, options)
@ -1652,15 +1612,9 @@ class NetworkDisplay():
self.announce_stream_display.rebuild_widget_list()
def reinit_lxmf_peers(self):
if self.lxmf_peers_display:
si = self.lxmf_peers_display.ilb.get_selected_position()
else:
si = None
self.lxmf_peers_display = LXMFPeers(self.app)
self.lxmf_peers_display.delegate = self
self.close_list_dialogs()
if si != None:
self.lxmf_peers_display.ilb.select_item(si)
def close_list_dialogs(self):
if self.list_display == 0:
@ -1799,7 +1753,7 @@ class LXMFPeers(urwid.WidgetWrap):
def make_peer_widgets(self):
widget_list = []
sorted_peers = sorted(self.peer_list, key=lambda pid: (self.app.directory.pn_trust_level(pid), self.peer_list[pid].sync_transfer_rate), reverse=True)
sorted_peers = sorted(self.peer_list, key=lambda pid: (self.app.directory.pn_trust_level(pid), self.peer_list[pid].link_establishment_rate), reverse=True)
for peer_id in sorted_peers:
peer = self.peer_list[peer_id]
trust_level = self.app.directory.pn_trust_level(peer_id)
@ -1843,16 +1797,7 @@ class LXMFPeerEntry(urwid.WidgetWrap):
style = "list_unresponsive"
focus_style = "list_focus_unresponsive"
if peer.propagation_transfer_limit:
txfer_limit = RNS.prettysize(peer.propagation_transfer_limit*1000)
else:
txfer_limit = "No"
ar = round(peer.acceptance_rate*100, 2)
peer_info_str = sym+" "+display_str+"\n "+alive_string+", last heard "+pretty_date(int(peer.last_heard))
peer_info_str += "\n "+str(peer.unhandled_message_count)+f" unhandled LXMs, {txfer_limit} sync limit\n"
peer_info_str += f" {RNS.prettyspeed(peer.sync_transfer_rate)} STR, "
peer_info_str += f"{RNS.prettyspeed(peer.link_establishment_rate)} LER, {ar}% AR\n"
widget = ListEntry(peer_info_str)
widget = ListEntry(sym+" "+display_str+"\n "+alive_string+", last heard "+pretty_date(int(peer.last_heard))+"\n "+str(len(peer.unhandled_messages))+" unhandled LXMs, "+RNS.prettysize(peer.link_establishment_rate/8, "b")+"/s LER")
self.display_widget = urwid.AttrMap(widget, style, focus_style)
self.display_widget.destination_hash = destination_hash
super().__init__(self.display_widget)

2483
nomadnet/vendor/configobj.py vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -30,6 +30,6 @@ setuptools.setup(
entry_points= {
'console_scripts': ['nomadnet=nomadnet.nomadnet:main']
},
install_requires=["rns>=0.9.4", "lxmf>=0.6.3", "urwid>=2.6.16", "qrcode"],
python_requires=">=3.7",
install_requires=["rns>=0.8.8", "lxmf>=0.5.8", "urwid>=2.4.4", "qrcode"],
python_requires=">=3.6",
)