[ci skip] fix watch value integeration test and type assertion fix for watch_dht_values

This commit is contained in:
Christien Rioux 2025-03-23 13:24:09 -04:00
parent 72b1434abc
commit 2f70f8382f
3 changed files with 106 additions and 91 deletions

View file

@ -7,7 +7,7 @@ import time
import os import os
import veilid import veilid
from veilid import ValueSubkey from veilid import ValueSubkey, Timestamp
################################################################## ##################################################################
BOGUS_KEY = veilid.TypedKey.from_value( BOGUS_KEY = veilid.TypedKey.from_value(
@ -245,8 +245,7 @@ async def test_open_writer_dht_value(api_connection: veilid.VeilidAPI):
await rc.delete_dht_record(key) await rc.delete_dht_record(key)
# @pytest.mark.skipif(os.getenv("INTEGRATION") != "1", reason="integration test requires two servers running") @pytest.mark.skipif(os.getenv("INTEGRATION") != "1", reason="integration test requires two servers running")
@pytest.mark.skip(reason = "don't work yet")
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_watch_dht_values(): async def test_watch_dht_values():
@ -256,111 +255,129 @@ async def test_watch_dht_values():
if update.kind == veilid.VeilidUpdateKind.VALUE_CHANGE: if update.kind == veilid.VeilidUpdateKind.VALUE_CHANGE:
await value_change_queue.put(update) await value_change_queue.put(update)
async def null_update_callback(update: veilid.VeilidUpdate):
pass
try: try:
api = await veilid.api_connector(value_change_update_callback) api0 = await veilid.api_connector(value_change_update_callback, 0)
except veilid.VeilidConnectionError: except veilid.VeilidConnectionError:
pytest.skip("Unable to connect to veilid-server.") pytest.skip("Unable to connect to veilid-server 0.")
# Make two routing contexts, one with and one without safety try:
# So we can pretend to be a different node and get the watch updates api1 = await veilid.api_connector(null_update_callback, 1)
# Normally they would not get sent if the set comes from the same target except veilid.VeilidConnectionError:
# as the watch's target pytest.skip("Unable to connect to veilid-server 1.")
# XXX: this logic doesn't work because our node still suppresses updates
# XXX: if the value hasn't changed in the local record store
rcWatch = await api.new_routing_context()
rcSet = await (await api.new_routing_context()).with_safety(veilid.SafetySelection.unsafe())
async with rcWatch, rcSet:
# Make a DHT record
rec = await rcWatch.create_dht_record(veilid.DHTSchema.dflt(10))
# Set some subkey we care about async with api0, api1:
vd = await rcWatch.set_dht_value(rec.key, ValueSubkey(3), b"BLAH BLAH BLAH") # purge local and remote record stores to ensure we start fresh
assert vd is None await api0.debug("record purge local")
await api0.debug("record purge remote")
await api1.debug("record purge local")
await api1.debug("record purge remote")
# Make a watch on that subkey # make routing contexts
ts = await rcWatch.watch_dht_values(rec.key, [], 0, 0xFFFFFFFF) rc0 = await api0.new_routing_context()
assert ts != 0 rc1 = await api1.new_routing_context()
async with rc0, rc1:
# Reopen without closing to change routing context and not lose watch # Server 0: Make a DHT record
rec = await rcSet.open_dht_record(rec.key, rec.owner_key_pair()) rec0 = await rc0.create_dht_record(veilid.DHTSchema.dflt(10))
# Now set the subkey and trigger an update
vd = await rcSet.set_dht_value(rec.key, ValueSubkey(3), b"BLAH")
assert vd is None
# Now we should NOT get an update because the update is the same as our local copy
update = None
try:
update = await asyncio.wait_for(value_change_queue.get(), timeout=5)
except asyncio.TimeoutError:
pass
assert update is None
# Now set multiple subkeys and trigger an update # Server 0: Set some subkey we care about
vd = await asyncio.gather(*[rcSet.set_dht_value(rec.key, ValueSubkey(3), b"BLAH BLAH"), rcSet.set_dht_value(rec.key, ValueSubkey(4), b"BZORT")]) vd = await rc0.set_dht_value(rec0.key, ValueSubkey(3), b"BLAH")
assert vd == [None, None] assert vd is None
# Wait for the update # Server 0: Make a watch on all the subkeys
upd = await asyncio.wait_for(value_change_queue.get(), timeout=5) ts = await rc0.watch_dht_values(rec0.key, [], Timestamp(0), 0xFFFFFFFF)
assert ts != 0
# Verify the update came back but we don't get a new value because the sequence number is the same # Server 1: Open the subkey
assert upd.detail.key == rec.key rec1 = await rc1.open_dht_record(rec0.key, rec0.owner_key_pair())
assert upd.detail.count == 0xFFFFFFFD
assert upd.detail.subkeys == [(3, 4)]
assert upd.detail.value is None
# Reopen without closing to change routing context and not lose watch # Server 1: Now set the subkey and trigger an update
rec = await rcWatch.open_dht_record(rec.key, rec.owner_key_pair()) vd = await rc1.set_dht_value(rec1.key, ValueSubkey(3), b"BLAH")
assert vd is None
# Cancel some subkeys we don't care about # Server 0: Now we should NOT get an update because the update is the same as our local copy
still_active = await rcWatch.cancel_dht_watch(rec.key, [(ValueSubkey(0), ValueSubkey(2))]) update = None
assert still_active try:
update = await asyncio.wait_for(value_change_queue.get(), timeout=10)
except asyncio.TimeoutError:
pass
assert update is None
# Reopen without closing to change routing context and not lose watch # Server 1: Now set subkey and trigger an update
rec = await rcSet.open_dht_record(rec.key, rec.owner_key_pair()) vd = await rc1.set_dht_value(rec1.key, ValueSubkey(3), b"BLAH BLAH")
assert vd is None
# Now set multiple subkeys and trigger an update # Server 0: Wait for the update
vd = await asyncio.gather(*[rcSet.set_dht_value(rec.key, ValueSubkey(3), b"BLAH BLAH BLAH"), rcSet.set_dht_value(rec.key, ValueSubkey(5), b"BZORT BZORT")]) upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
assert vd == [None, None]
# Wait for the update, this longer timeout seems to help the flaky check below # Server 0: Verify the update came back with the first changed subkey's data
upd = await asyncio.wait_for(value_change_queue.get(), timeout=10) assert upd.detail.key == rec0.key
assert upd.detail.count == 0xFFFFFFFE
assert upd.detail.subkeys == [(3, 3)]
assert upd.detail.value.data == b"BLAH BLAH"
# Verify the update came back but we don't get a new value because the sequence number is the same # Server 1: Now set subkey and trigger an update
assert upd.detail.key == rec.key vd = await rc1.set_dht_value(rec1.key, ValueSubkey(4), b"BZORT")
assert vd is None
# This check is flaky on slow connections and often fails with different counts # Server 0: Wait for the update
assert upd.detail.count == 0xFFFFFFFC upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
assert upd.detail.subkeys == [(3, 3), (5, 5)]
assert upd.detail.value is None
# Reopen without closing to change routing context and not lose watch # Server 0: Verify the update came back with the first changed subkey's data
rec = await rcWatch.open_dht_record(rec.key, rec.owner_key_pair()) assert upd.detail.key == rec0.key
assert upd.detail.count == 0xFFFFFFFD
assert upd.detail.subkeys == [(4, 4)]
assert upd.detail.value.data == b"BZORT"
# Now cancel the update # Server 0: Cancel some subkeys we don't care about
still_active = await rcWatch.cancel_dht_watch(rec.key, [(ValueSubkey(3), ValueSubkey(9))]) still_active = await rc0.cancel_dht_watch(rec0.key, [(ValueSubkey(0), ValueSubkey(3))])
assert not still_active assert still_active
# Reopen without closing to change routing context and not lose watch # Server 1: Now set multiple subkeys and trigger an update
rec = await rcSet.open_dht_record(rec.key, rec.owner_key_pair()) vd = await asyncio.gather(*[rc1.set_dht_value(rec1.key, ValueSubkey(3), b"BLAH BLAH BLAH"), rc1.set_dht_value(rec1.key, ValueSubkey(4), b"BZORT BZORT")])
assert vd == [None, None]
# Now set multiple subkeys # Server 0: Wait for the update
vd = await asyncio.gather(*[rcSet.set_dht_value(rec.key, ValueSubkey(3), b"BLAH BLAH BLAH BLAH"), rcSet.set_dht_value(rec.key, ValueSubkey(5), b"BZORT BZORT BZORT")]) upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
assert vd == [None, None]
# Now we should NOT get an update
update = None
try:
update = await asyncio.wait_for(value_change_queue.get(), timeout=5)
except asyncio.TimeoutError:
pass
assert update is None
# Clean up # Server 0: Verify only one update came back
await rcSet.close_dht_record(rec.key) assert upd.detail.key == rec0.key
await rcSet.delete_dht_record(rec.key) assert upd.detail.count == 0xFFFFFFFC
assert upd.detail.subkeys == [(4, 4)]
assert upd.detail.value.data == b"BZORT BZORT"
# Server 0: Now we should NOT get any other update
update = None
try:
update = await asyncio.wait_for(value_change_queue.get(), timeout=10)
except asyncio.TimeoutError:
pass
assert update is None
# Now cancel the update
still_active = await rc0.cancel_dht_watch(rec0.key, [(ValueSubkey(3), ValueSubkey(9))])
assert not still_active
# Now set multiple subkeys
vd = await asyncio.gather(*[rc1.set_dht_value(rec1.key, ValueSubkey(3), b"BLAH BLAH BLAH BLAH"), rc1.set_dht_value(rec1.key, ValueSubkey(5), b"BZORT BZORT BZORT")])
assert vd == [None, None]
# Now we should NOT get an update
update = None
try:
update = await asyncio.wait_for(value_change_queue.get(), timeout=10)
except asyncio.TimeoutError:
pass
assert update is None
# Clean up
await rc1.close_dht_record(rec1.key)
await rc1.delete_dht_record(rec1.key)
await rc0.close_dht_record(rec0.key)
await rc0.delete_dht_record(rec0.key)
@pytest.mark.asyncio @pytest.mark.asyncio
@ -486,8 +503,6 @@ async def test_schema_limit_smpl(api_connection: veilid.VeilidAPI):
@pytest.mark.skipif(os.getenv("INTEGRATION") != "1", reason="integration test requires two servers running") @pytest.mark.skipif(os.getenv("INTEGRATION") != "1", reason="integration test requires two servers running")
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_dht_integration_writer_reader(): async def test_dht_integration_writer_reader():

View file

@ -93,7 +93,7 @@ class RoutingContext(ABC):
self, self,
key: types.TypedKey, key: types.TypedKey,
subkeys: list[tuple[types.ValueSubkey, types.ValueSubkey]], subkeys: list[tuple[types.ValueSubkey, types.ValueSubkey]],
expiration: types.Timestamp = 0, expiration: types.Timestamp = types.Timestamp(0),
count: int = 0xFFFFFFFF, count: int = 0xFFFFFFFF,
) -> types.Timestamp: ) -> types.Timestamp:
pass pass

View file

@ -741,7 +741,7 @@ class _JsonRoutingContext(RoutingContext):
self, self,
key: TypedKey, key: TypedKey,
subkeys: list[tuple[ValueSubkey, ValueSubkey]], subkeys: list[tuple[ValueSubkey, ValueSubkey]],
expiration: Timestamp = 0, expiration: Timestamp = Timestamp(0),
count: int = 0xFFFFFFFF, count: int = 0xFFFFFFFF,
) -> Timestamp: ) -> Timestamp:
assert isinstance(key, TypedKey) assert isinstance(key, TypedKey)