mirror of
https://gitlab.com/veilid/veilid.git
synced 2026-01-06 10:05:28 -05:00
1032 lines
43 KiB
Python
1032 lines
43 KiB
Python
# DHT Transaction Veilid Tests
|
|
from typing import Any, Awaitable, Callable, Optional, Coroutine
|
|
|
|
import pytest
|
|
import asyncio
|
|
import time
|
|
import os
|
|
|
|
import veilid
|
|
from veilid import *
|
|
from veilid.types import *
|
|
|
|
##################################################################
|
|
BOGUS_KEY = RecordKey.from_value(
|
|
CryptoKind.CRYPTO_KIND_VLD0, BareRecordKey.from_parts(BareOpaqueRecordKey.from_bytes(b' '), None))
|
|
|
|
TEST_MESSAGE_1 = b"BLAH BLAH BLAH"
|
|
TEST_MESSAGE_2 = b"blah blah blah blah"
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_empty(api_connection: VeilidAPI):
|
|
with pytest.raises(VeilidAPIError):
|
|
await api_connection.transact_dht_records([], None)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_unopened(api_connection: VeilidAPI):
|
|
with pytest.raises(VeilidAPIError):
|
|
await api_connection.transact_dht_records([BOGUS_KEY], None)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_duplicate(api_connection: VeilidAPI):
|
|
with pytest.raises(VeilidAPIError):
|
|
await api_connection.transact_dht_records([BOGUS_KEY, BOGUS_KEY], None)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_nonexistent_with_options(api_connection: VeilidAPI):
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
cs = await api_connection.get_crypto_system(kind)
|
|
async with cs:
|
|
default_signing_keypair = await cs.generate_key_pair()
|
|
|
|
with pytest.raises(VeilidAPIError):
|
|
await api_connection.transact_dht_records([BOGUS_KEY], TransactDHTRecordsOptions(default_signing_keypair=default_signing_keypair))
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_close_out_of_order_one_of_one(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(1))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_close_out_of_order_one_of_two(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec1 = await rc.create_dht_record(kind, DHTSchema.dflt(1))
|
|
rec2 = await rc.create_dht_record(kind, DHTSchema.dflt(1))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec1.key, rec2.key], None)
|
|
async with rec_tx:
|
|
await rc.close_dht_record(rec1.key)
|
|
await rc.delete_dht_record(rec1.key)
|
|
|
|
await rc.close_dht_record(rec2.key)
|
|
await rc.delete_dht_record(rec2.key)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_close_out_of_order_two_of_two(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec1 = await rc.create_dht_record(kind, DHTSchema.dflt(1))
|
|
rec2 = await rc.create_dht_record(kind, DHTSchema.dflt(1))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec1.key, rec2.key], None)
|
|
async with rec_tx:
|
|
await rc.close_dht_record(rec1.key)
|
|
await rc.close_dht_record(rec2.key)
|
|
|
|
await rc.delete_dht_record(rec1.key)
|
|
await rc.delete_dht_record(rec2.key)
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_get_nonexistent(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(1))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
assert await rec_tx.get(rec.key, ValueSubkey(0)) is None
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_set_commit_get(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(2))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
vd = await rec_tx.set(rec.key, ValueSubkey(0), TEST_MESSAGE_1)
|
|
assert vd is None
|
|
await rec_tx.commit()
|
|
|
|
vd2 = await rc.get_dht_value(rec.key, ValueSubkey(0), True)
|
|
assert vd2 is not None
|
|
|
|
assert vd2.data == TEST_MESSAGE_1
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_set_commit_delete_get(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(2))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
vd = await rec_tx.set(rec.key, ValueSubkey(0), TEST_MESSAGE_1)
|
|
assert vd is None
|
|
await rec_tx.commit()
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
# Reopen the record readonly
|
|
rec = await rc.open_dht_record(rec.key)
|
|
|
|
vd2 = await rc.get_dht_value(rec.key, ValueSubkey(0), True)
|
|
assert vd2 is not None
|
|
|
|
assert vd2.data == TEST_MESSAGE_1
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_set_rollback_get(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(2))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
vd = await rec_tx.set(rec.key, ValueSubkey(0), TEST_MESSAGE_1)
|
|
assert vd is None
|
|
await rec_tx.rollback()
|
|
|
|
vd2 = await rc.get_dht_value(rec.key, ValueSubkey(0), True)
|
|
assert vd2 is None
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_set_drop_get(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(2))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
vd = await rec_tx.set(rec.key, ValueSubkey(0), TEST_MESSAGE_1)
|
|
assert vd is None
|
|
# Drop rec_tx
|
|
|
|
vd2 = await rc.get_dht_value(rec.key, ValueSubkey(0), True)
|
|
assert vd2 is None
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_set_drop_use_dead(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(2))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
vd = await rec_tx.set(rec.key, ValueSubkey(0), TEST_MESSAGE_1)
|
|
assert vd is None
|
|
|
|
vd2 = await rc.get_dht_value(rec.key, ValueSubkey(0), True)
|
|
assert vd2 is None
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_wrong_set(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(2))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
with pytest.raises(VeilidAPIError):
|
|
vd = await rc.set_dht_value(rec.key, ValueSubkey(0), TEST_MESSAGE_1)
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_set_commit_get_commit(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(2))
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
vd = await rec_tx.set(rec.key, ValueSubkey(0), TEST_MESSAGE_1)
|
|
assert vd is None
|
|
await rec_tx.commit()
|
|
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
vd2 = await rec_tx.get(rec.key, ValueSubkey(0))
|
|
assert vd2 is not None
|
|
await rec_tx.commit()
|
|
|
|
assert vd2.data == TEST_MESSAGE_1
|
|
|
|
vd3 = await rc.get_dht_value(rec.key, ValueSubkey(0), False)
|
|
assert vd3 is not None and vd3.data == TEST_MESSAGE_1
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transact_dht_records_set_commit_delete_get_rollback(api_connection: VeilidAPI):
|
|
rc = await api_connection.new_routing_context()
|
|
async with rc:
|
|
for kind in await api_connection.valid_crypto_kinds():
|
|
rec = await rc.create_dht_record(kind, DHTSchema.dflt(2))
|
|
|
|
# Set value transactionally
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
vd = await rec_tx.set(rec.key, ValueSubkey(0), TEST_MESSAGE_1)
|
|
assert vd is None
|
|
await rec_tx.commit()
|
|
|
|
# Delete it locally
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
# Reopen the record readonly
|
|
rec = await rc.open_dht_record(rec.key)
|
|
|
|
# Get the value transactionally but do not commit locally
|
|
rec_tx = await api_connection.transact_dht_records([rec.key], None)
|
|
async with rec_tx:
|
|
vd2 = await rec_tx.get(rec.key, ValueSubkey(0))
|
|
assert vd2 is not None and vd2.data == TEST_MESSAGE_1
|
|
await rec_tx.rollback()
|
|
|
|
# Should not have committed the get result locally due to rollback
|
|
report1 = await rc.inspect_dht_record(rec.key, [], DHTReportScope.LOCAL)
|
|
assert report1.local_seqs == [None, None]
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
# Reopen the record readonly
|
|
rec = await rc.open_dht_record(rec.key)
|
|
|
|
# Should get transactionally set value from online
|
|
vd3 = await rc.get_dht_value(rec.key, ValueSubkey(0))
|
|
assert vd3 is not None and vd3.data == TEST_MESSAGE_1
|
|
|
|
await rc.close_dht_record(rec.key)
|
|
await rc.delete_dht_record(rec.key)
|
|
|
|
|
|
|
|
|
|
# @pytest.mark.skipif(os.getenv("INTEGRATION") != "1", reason="integration test requires two servers running")
|
|
# @pytest.mark.asyncio
|
|
# async def test_watch_dht_values():
|
|
|
|
# value_change_queue: asyncio.Queue[VeilidUpdate] = asyncio.Queue()
|
|
|
|
# async def value_change_update_callback(update: VeilidUpdate):
|
|
# if update.kind == VeilidUpdateKind.VALUE_CHANGE:
|
|
# await value_change_queue.put(update)
|
|
|
|
# async def null_update_callback(update: VeilidUpdate):
|
|
# pass
|
|
|
|
# try:
|
|
# api0 = await api_connector(value_change_update_callback, 0)
|
|
# except VeilidConnectionError:
|
|
# pytest.skip("Unable to connect to veilid-server 0.")
|
|
# return
|
|
|
|
# try:
|
|
# api1 = await api_connector(null_update_callback, 1)
|
|
# except VeilidConnectionError:
|
|
# pytest.skip("Unable to connect to veilid-server 1.")
|
|
# return
|
|
|
|
# async with api0, api1:
|
|
# # purge local and remote record stores to ensure we start fresh
|
|
# await api0.debug("record purge local")
|
|
# await api0.debug("record purge remote")
|
|
# await api1.debug("record purge local")
|
|
# await api1.debug("record purge remote")
|
|
|
|
# # Clear the change queue if record purge cancels old watches
|
|
# while True:
|
|
# try:
|
|
# upd = await asyncio.wait_for(value_change_queue.get(), timeout=3)
|
|
# except asyncio.TimeoutError:
|
|
# break
|
|
|
|
# # make routing contexts
|
|
# rc0 = await api0.new_routing_context()
|
|
# rc1 = await api1.new_routing_context()
|
|
# async with rc0, rc1:
|
|
# for kind in await api0.valid_crypto_kinds():
|
|
|
|
# # Server 0: Make a DHT record
|
|
# rec0 = await rc0.create_dht_record(kind, DHTSchema.dflt(10))
|
|
|
|
# # Server 0: Set some subkey we care about
|
|
# vd = await rc0.set_dht_value(rec0.key, ValueSubkey(3), b"BLAH")
|
|
# assert vd is None
|
|
|
|
# await sync(rc0, [rec0])
|
|
|
|
# # Server 0: Make a watch on all the subkeys
|
|
# active = await rc0.watch_dht_values(rec0.key)
|
|
# assert active
|
|
|
|
# # Server 1: Open the subkey
|
|
# rec1 = await rc1.open_dht_record(rec0.key, rec0.owner_key_pair())
|
|
|
|
# # Server 1: Now set the subkey and trigger an update
|
|
# vd = await rc1.set_dht_value(rec1.key, ValueSubkey(3), b"BLAH")
|
|
# assert vd is None
|
|
# await sync(rc1, [rec1])
|
|
|
|
# # Server 0: Now we should NOT get an update because the update is the same as our local copy
|
|
# upd = None
|
|
# try:
|
|
# upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
|
|
# except asyncio.TimeoutError:
|
|
# pass
|
|
# assert upd is None
|
|
|
|
# # Server 1: Now set subkey and trigger an update
|
|
# vd = await rc1.set_dht_value(rec1.key, ValueSubkey(3), b"BLAH BLAH")
|
|
# assert vd is None
|
|
# await sync(rc1, [rec1])
|
|
|
|
# # Server 0: Wait for the update
|
|
# upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
|
|
|
|
# # Server 0: Verify the update came back with the first changed subkey's data
|
|
# assert isinstance(upd.detail, VeilidValueChange)
|
|
# assert upd.detail.key == rec0.key
|
|
# assert upd.detail.count == 0xFFFFFFFE
|
|
# assert upd.detail.subkeys == [(3, 3)]
|
|
# assert upd.detail.value is not None
|
|
# assert upd.detail.value.data == b"BLAH BLAH"
|
|
|
|
# # Server 1: Now set subkey and trigger an update
|
|
# vd = await rc1.set_dht_value(rec1.key, ValueSubkey(4), b"BZORT")
|
|
# assert vd is None
|
|
# await sync(rc1, [rec1])
|
|
|
|
# # Server 0: Wait for the update
|
|
# upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
|
|
|
|
# # Server 0: Verify the update came back with the first changed subkey's data
|
|
# assert isinstance(upd.detail, VeilidValueChange)
|
|
# assert upd.detail.key == rec0.key
|
|
# assert upd.detail.count == 0xFFFFFFFD
|
|
# assert upd.detail.subkeys == [(4, 4)]
|
|
# assert upd.detail.value is not None
|
|
# assert upd.detail.value.data == b"BZORT"
|
|
|
|
# # Server 0: Cancel some subkeys we don't care about
|
|
# active = await rc0.cancel_dht_watch(rec0.key, [(ValueSubkey(0), ValueSubkey(3))])
|
|
# assert active
|
|
|
|
# # Server 1: Now set multiple subkeys and trigger an update
|
|
# vd = await asyncio.gather(*[rc1.set_dht_value(rec1.key, ValueSubkey(3), b"BLAH BLAH BLAH"), rc1.set_dht_value(rec1.key, ValueSubkey(4), b"BZORT BZORT")])
|
|
# assert vd == [None, None]
|
|
# await sync(rc1, [rec1])
|
|
|
|
# # Server 0: Wait for the update
|
|
# upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
|
|
|
|
# # Server 0: Verify only one update came back
|
|
# assert isinstance(upd.detail, VeilidValueChange)
|
|
# assert upd.detail.key == rec0.key
|
|
# assert upd.detail.count == 0xFFFFFFFC
|
|
# assert upd.detail.subkeys == [(4, 4)]
|
|
# assert upd.detail.value is not None
|
|
# assert upd.detail.value.data == b"BZORT BZORT"
|
|
|
|
# # Server 0: Now we should NOT get any other update
|
|
# upd = None
|
|
# try:
|
|
# upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
|
|
# except asyncio.TimeoutError:
|
|
# pass
|
|
# if upd is not None:
|
|
# print(f"bad update: {VeilidJSONEncoder.dumps(upd)}")
|
|
# assert upd is None
|
|
|
|
# # Now cancel the update
|
|
# active = await rc0.cancel_dht_watch(rec0.key, [(ValueSubkey(3), ValueSubkey(9))])
|
|
# assert not active
|
|
|
|
# # Server 0: Wait for the cancellation update
|
|
# upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
|
|
|
|
# # Server 0: Verify only one update came back
|
|
# assert isinstance(upd.detail, VeilidValueChange)
|
|
# assert upd.detail.key == rec0.key
|
|
# assert upd.detail.count == 0
|
|
# assert upd.detail.subkeys == []
|
|
# assert upd.detail.value is None
|
|
|
|
# # Now set multiple subkeys
|
|
# vd = await asyncio.gather(*[rc1.set_dht_value(rec1.key, ValueSubkey(3), b"BLAH BLAH BLAH BLAH"), rc1.set_dht_value(rec1.key, ValueSubkey(5), b"BZORT BZORT BZORT")])
|
|
# assert vd == [None, None]
|
|
# await sync(rc1, [rec1])
|
|
|
|
# # Now we should NOT get an update
|
|
# upd = None
|
|
# try:
|
|
# upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
|
|
# except asyncio.TimeoutError:
|
|
# pass
|
|
# if upd is not None:
|
|
# print(f"bad update: {VeilidJSONEncoder.dumps(upd)}")
|
|
# assert upd is None
|
|
|
|
# # Clean up
|
|
# await rc1.close_dht_record(rec1.key)
|
|
# await rc1.delete_dht_record(rec1.key)
|
|
# await rc0.close_dht_record(rec0.key)
|
|
# await rc0.delete_dht_record(rec0.key)
|
|
|
|
|
|
# @pytest.mark.skipif(os.getenv("INTEGRATION") != "1", reason="integration test requires two servers running")
|
|
# @pytest.mark.skipif(os.getenv("STRESS") != "1", reason="stress test takes a long time")
|
|
# @pytest.mark.asyncio
|
|
# async def test_watch_many_dht_values():
|
|
|
|
# value_change_queue: asyncio.Queue[VeilidUpdate] = asyncio.Queue()
|
|
|
|
# async def value_change_update_callback(update: VeilidUpdate):
|
|
# if update.kind == VeilidUpdateKind.VALUE_CHANGE:
|
|
# await value_change_queue.put(update)
|
|
|
|
# async def null_update_callback(update: VeilidUpdate):
|
|
# pass
|
|
|
|
# try:
|
|
# api0 = await api_connector(value_change_update_callback, 0)
|
|
# except VeilidConnectionError:
|
|
# pytest.skip("Unable to connect to veilid-server 0.")
|
|
# return
|
|
|
|
# try:
|
|
# api1 = await api_connector(null_update_callback, 1)
|
|
# except VeilidConnectionError:
|
|
# pytest.skip("Unable to connect to veilid-server 1.")
|
|
# return
|
|
|
|
# async with api0, api1:
|
|
# # purge local and remote record stores to ensure we start fresh
|
|
# await api0.debug("record purge local")
|
|
# await api0.debug("record purge remote")
|
|
# await api1.debug("record purge local")
|
|
# await api1.debug("record purge remote")
|
|
|
|
# # make routing contexts
|
|
# # unsafe version for debugging
|
|
# # rc0 = await (await api0.new_routing_context()).with_safety(SafetySelection.unsafe())
|
|
# # rc1 = await (await api1.new_routing_context()).with_safety(SafetySelection.unsafe())
|
|
# # safe default version
|
|
# rc0 = await api0.new_routing_context()
|
|
# rc1 = await api1.new_routing_context()
|
|
|
|
# async with rc0, rc1:
|
|
|
|
# for kind in await api0.valid_crypto_kinds():
|
|
# COUNT = 10
|
|
# records = []
|
|
|
|
# # Make and watch all records
|
|
# for n in range(COUNT):
|
|
# print(f"making record {n}")
|
|
# # Server 0: Make a DHT record
|
|
# records.append(await rc0.create_dht_record(kind, DHTSchema.dflt(1)))
|
|
|
|
# # Server 0: Set some subkey we care about
|
|
# vd = await rc0.set_dht_value(records[n].key, ValueSubkey(0), b"BLAH")
|
|
# assert vd is None
|
|
|
|
# # Server 0: Make a watch on all the subkeys
|
|
# active = await rc0.watch_dht_values(records[n].key)
|
|
# assert active
|
|
|
|
# # Open and set all records
|
|
# missing_records = set()
|
|
# for (n, record) in enumerate(records):
|
|
# print(f"setting record {n}")
|
|
|
|
# # Server 1: Open the subkey
|
|
# _ignore = await rc1.open_dht_record(record.key, record.owner_key_pair())
|
|
|
|
# # Server 1: Now set the subkey and trigger an update
|
|
# vd = await rc1.set_dht_value(record.key, ValueSubkey(0), b"BLAH BLAH")
|
|
# assert vd is None
|
|
|
|
# missing_records.add(record.key)
|
|
|
|
# # Server 0: Now we should get an update for every change
|
|
# for n in range(len(records)):
|
|
# print(f"waiting for change {n}")
|
|
|
|
# # Server 0: Wait for the update
|
|
# try:
|
|
# upd = await asyncio.wait_for(value_change_queue.get(), timeout=10)
|
|
# assert isinstance(upd.detail, VeilidValueChange)
|
|
# missing_records.remove(upd.detail.key)
|
|
# except:
|
|
# # Dump which records didn't get updates
|
|
# for (m, record) in enumerate(records):
|
|
# if record.key not in missing_records:
|
|
# continue
|
|
# print(f"missing update for record {m}: {record}")
|
|
# info0 = await api0.debug(f"record info {record.key}")
|
|
# info1 = await api1.debug(f"record info {record.key}")
|
|
# print(f"from rc0: {info0}")
|
|
# print(f"from rc1: {info1}")
|
|
# raise
|
|
|
|
# # Clean up
|
|
# for record in records:
|
|
# await rc1.close_dht_record(record.key)
|
|
# await rc1.delete_dht_record(record.key)
|
|
# await rc0.close_dht_record(record.key)
|
|
# await rc0.delete_dht_record(record.key)
|
|
|
|
# @pytest.mark.asyncio
|
|
# async def test_inspect_dht_record(api_connection: VeilidAPI):
|
|
# rc = await api_connection.new_routing_context()
|
|
# async with rc:
|
|
# for kind in await api_connection.valid_crypto_kinds():
|
|
# rec = await rc.create_dht_record(kind, DHTSchema.dflt(2))
|
|
|
|
# vd = await rc.set_dht_value(rec.key, ValueSubkey(0), b"BLAH BLAH BLAH")
|
|
# assert vd is None
|
|
|
|
# rr = await rc.inspect_dht_record(rec.key, [], DHTReportScope.LOCAL)
|
|
# #print("rr: {}", rr.__dict__)
|
|
# assert rr.subkeys == [(0, 1)]
|
|
# assert rr.local_seqs == [0, None]
|
|
# assert rr.network_seqs == [None, None]
|
|
|
|
# await sync(rc, [rec])
|
|
|
|
# rr2 = await rc.inspect_dht_record(rec.key, [], DHTReportScope.SYNC_GET)
|
|
# #print("rr2: {}", rr2.__dict__)
|
|
# assert rr2.subkeys == [(0, 1)]
|
|
# assert rr2.local_seqs == [0, None]
|
|
# assert rr2.network_seqs == [0, None]
|
|
|
|
# await rc.close_dht_record(rec.key)
|
|
# await rc.delete_dht_record(rec.key)
|
|
|
|
|
|
|
|
|
|
# async def _run_test_schema_limit(api_connection: VeilidAPI, open_record: Callable[[RoutingContext, CryptoSystem, int], Awaitable[tuple[DHTRecordDescriptor, Optional[KeyPair]]]], count: int, test_data: bytes):
|
|
# rc = await api_connection.new_routing_context()
|
|
# async with rc:
|
|
# for kind in await api_connection.valid_crypto_kinds():
|
|
# cs = await api_connection.get_crypto_system(kind)
|
|
# async with cs:
|
|
# (desc, writer) = await open_record(rc, cs, count)
|
|
# print(f'{desc.key} {writer}')
|
|
|
|
# # write dht records on server 0
|
|
# records = []
|
|
# print(f'writing {count} subkeys')
|
|
# for n in range(count):
|
|
# await rc.set_dht_value(desc.key, ValueSubkey(n), test_data)
|
|
# print(f' {n}')
|
|
|
|
# await sync(rc, [desc])
|
|
|
|
# await rc.close_dht_record(desc.key)
|
|
|
|
# # read dht records on server 0
|
|
# print(f'reading {count} subkeys')
|
|
# desc1 = await rc.open_dht_record(desc.key)
|
|
# for n in range(count):
|
|
# vd0 = await rc.get_dht_value(desc1.key, ValueSubkey(n))
|
|
# assert vd0 is not None
|
|
# assert vd0.data == test_data
|
|
# print(f' {n}')
|
|
|
|
|
|
# @pytest.mark.skipif(os.getenv("INTEGRATION") != "1", reason="integration test requires two servers running")
|
|
# @pytest.mark.asyncio
|
|
# async def test_dht_integration_writer_reader():
|
|
|
|
# async def null_update_callback(update: VeilidUpdate):
|
|
# pass
|
|
|
|
# try:
|
|
# api0 = await api_connector(null_update_callback, 0)
|
|
# except VeilidConnectionError:
|
|
# pytest.skip("Unable to connect to veilid-server 0.")
|
|
# return
|
|
|
|
# try:
|
|
# api1 = await api_connector(null_update_callback, 1)
|
|
# except VeilidConnectionError:
|
|
# pytest.skip("Unable to connect to veilid-server 1.")
|
|
# return
|
|
|
|
# async with api0, api1:
|
|
# # purge local and remote record stores to ensure we start fresh
|
|
# await api0.debug("record purge local")
|
|
# await api0.debug("record purge remote")
|
|
# await api1.debug("record purge local")
|
|
# await api1.debug("record purge remote")
|
|
|
|
# # make routing contexts
|
|
# rc0 = await api0.new_routing_context()
|
|
# rc1 = await api1.new_routing_context()
|
|
# async with rc0, rc1:
|
|
# for kind in await api0.valid_crypto_kinds():
|
|
# COUNT = 100
|
|
# TEST_DATA = b"test data"
|
|
|
|
# # write dht records on server 0
|
|
# records = []
|
|
# schema = DHTSchema.dflt(1)
|
|
# print(f'writing {COUNT} records')
|
|
# for n in range(COUNT):
|
|
# desc = await rc0.create_dht_record(kind, schema)
|
|
# records.append(desc)
|
|
# print(f' {n}: key={desc.key} owner={desc.owner_bare_key_pair()}')
|
|
|
|
# await rc0.set_dht_value(desc.key, ValueSubkey(0), TEST_DATA)
|
|
|
|
# await sync(rc0, records)
|
|
|
|
# # read dht records on server 1
|
|
# print(f'reading {COUNT} records')
|
|
# n = 0
|
|
# for desc in records:
|
|
# print(f' {n}: key={desc.key} owner={desc.owner_key_pair()}')
|
|
# n += 1
|
|
|
|
# desc1 = await rc1.open_dht_record(desc.key)
|
|
# vd1 = await rc1.get_dht_value(desc1.key, ValueSubkey(0))
|
|
# assert vd1 is not None
|
|
# assert vd1.data == TEST_DATA
|
|
# await rc1.close_dht_record(desc1.key)
|
|
|
|
|
|
|
|
# @pytest.mark.skipif(os.getenv("STRESS") != "1", reason="stress test takes a long time")
|
|
# @pytest.mark.asyncio
|
|
# async def test_dht_write_read_local():
|
|
|
|
# async def null_update_callback(update: VeilidUpdate):
|
|
# pass
|
|
|
|
# try:
|
|
# api0 = await api_connector(null_update_callback, 0)
|
|
# except VeilidConnectionError:
|
|
# pytest.skip("Unable to connect to veilid-server 0.")
|
|
# return
|
|
|
|
# async with api0:
|
|
# # purge local and remote record stores to ensure we start fresh
|
|
# await api0.debug("record purge local")
|
|
# await api0.debug("record purge remote")
|
|
|
|
# # make routing contexts
|
|
# rc0 = await api0.new_routing_context()
|
|
# async with rc0:
|
|
# for kind in await api0.valid_crypto_kinds():
|
|
|
|
# # Previously COUNT was set to 500, which causes these tests to take
|
|
# # 10s of minutes on slow connections or debug veilid-server builds
|
|
# COUNT = 100
|
|
# TEST_DATA = b"ABCD"*1024
|
|
# TEST_DATA2 = b"ABCD"*4096
|
|
|
|
# # write dht records on server 0
|
|
# records = []
|
|
# schema = DHTSchema.dflt(2)
|
|
# print(f'writing {COUNT} records')
|
|
# for n in range(COUNT):
|
|
# desc = await rc0.create_dht_record(kind, schema)
|
|
# records.append(desc)
|
|
|
|
# await rc0.set_dht_value(desc.key, ValueSubkey(0), TEST_DATA)
|
|
# await rc0.set_dht_value(desc.key, ValueSubkey(1), TEST_DATA2)
|
|
|
|
# print(f' {n}: {desc.key} {desc.owner}:{desc.owner_secret}')
|
|
|
|
# await sync(rc0, records)
|
|
|
|
# for desc0 in records:
|
|
# await rc0.close_dht_record(desc0.key)
|
|
|
|
# await api0.debug("record purge local")
|
|
# await api0.debug("record purge remote")
|
|
|
|
# # read dht records on server 0
|
|
# print(f'reading {COUNT} records')
|
|
# n = 0
|
|
# for desc0 in records:
|
|
# desc1 = await rc0.open_dht_record(desc0.key)
|
|
|
|
# vd0 = await rc0.get_dht_value(desc1.key, ValueSubkey(0), force_refresh=True)
|
|
# assert vd0 is not None
|
|
# assert vd0.data == TEST_DATA
|
|
|
|
# vd1 = await rc0.get_dht_value(desc1.key, ValueSubkey(1), force_refresh=True)
|
|
# assert vd1 is not None
|
|
# assert vd1.data == TEST_DATA2
|
|
# await rc0.close_dht_record(desc1.key)
|
|
|
|
# print(f' {n}')
|
|
# n += 1
|
|
|
|
|
|
@pytest.mark.skipif(os.getenv("STRESS") != "1", reason="stress test takes a long time")
|
|
@pytest.mark.asyncio
|
|
async def test_dht_transaction_write_read_full_subkeys_local():
|
|
|
|
async def null_update_callback(update: VeilidUpdate):
|
|
pass
|
|
|
|
try:
|
|
api0 = await api_connector(null_update_callback, 0)
|
|
except VeilidConnectionError:
|
|
pytest.skip("Unable to connect to veilid-server 0.")
|
|
return
|
|
|
|
async with api0:
|
|
# purge local and remote record stores to ensure we start fresh
|
|
await api0.debug("record purge local")
|
|
await api0.debug("record purge remote")
|
|
|
|
# make routing contexts
|
|
rc0 = await (await api0.new_routing_context()).with_sequencing(Sequencing.ENSURE_ORDERED)
|
|
async with rc0:
|
|
|
|
for kind in await api0.valid_crypto_kinds():
|
|
print(f"kind: {kind}")
|
|
cs = await api0.get_crypto_system(kind)
|
|
async with cs:
|
|
|
|
# Number of records
|
|
COUNT = 8
|
|
# Number of subkeys per record
|
|
SUBKEY_COUNT = 32
|
|
# BareNonce to encrypt test data
|
|
NONCE = Nonce.from_bytes(b"A"*await cs.nonce_length())
|
|
# Secret to encrypt test data
|
|
SECRET = SharedSecret.from_value(await cs.kind(), BareSharedSecret.from_bytes(b"A"*await cs.shared_secret_length()))
|
|
# Max subkey size
|
|
MAX_SUBKEY_SIZE = min(32768, 1024*1024//SUBKEY_COUNT)
|
|
# MAX_SUBKEY_SIZE = 256
|
|
|
|
# write dht records on server 0
|
|
records : list[DHTRecordDescriptor] = []
|
|
subkey_data_list : list[bytes] = []
|
|
schema = DHTSchema.dflt(SUBKEY_COUNT)
|
|
print(f'writing {COUNT} records with full subkeys')
|
|
for n in range(COUNT):
|
|
desc = await rc0.create_dht_record(kind, schema)
|
|
print(f' {n}: {desc.key} {desc.owner}:{desc.owner_secret}')
|
|
records.append(desc)
|
|
|
|
# Make encrypted data that is consistent and hard to compress
|
|
subkey_data = bytes(chr(ord("A")+n)*MAX_SUBKEY_SIZE, 'ascii')
|
|
subkey_data = await cs.crypt_no_auth(subkey_data, NONCE, SECRET)
|
|
subkey_data_list.append(subkey_data)
|
|
|
|
start = time.time()
|
|
transaction = await api0.transact_dht_records([x.key for x in records], None)
|
|
print(f'transaction begin: {time.time()-start}')
|
|
|
|
for i in range(SUBKEY_COUNT):
|
|
start = time.time()
|
|
|
|
init_set_futures : set[Coroutine[Any, Any, ValueData | None]] = set()
|
|
|
|
for n in range(COUNT):
|
|
key = records[n].key
|
|
subkey_data = subkey_data_list[n]
|
|
init_set_futures.add(transaction.set(key, ValueSubkey(i), subkey_data))
|
|
|
|
# Update each subkey for each record in parallel
|
|
# This ensures that each record gets its own expiration update
|
|
await asyncio.gather(*init_set_futures)
|
|
|
|
print(f'transaction set subkey {i}: {time.time()-start}')
|
|
|
|
|
|
start = time.time()
|
|
await transaction.commit()
|
|
print(f'transaction commit: {time.time()-start}')
|
|
|
|
for desc in records:
|
|
await rc0.close_dht_record(desc.key)
|
|
|
|
await api0.debug("record purge local")
|
|
await api0.debug("record purge remote")
|
|
|
|
# read dht records on server 0
|
|
print(f'reading {COUNT} records')
|
|
|
|
for desc in records:
|
|
await rc0.open_dht_record(desc.key)
|
|
|
|
start = time.time()
|
|
transaction = await api0.transact_dht_records([x.key for x in records], None)
|
|
print(f'transaction begin: {time.time()-start}')
|
|
|
|
for i in range(SUBKEY_COUNT):
|
|
start = time.time()
|
|
subkey = ValueSubkey(i)
|
|
|
|
init_get_futures : set[Coroutine[Any, Any, tuple[RecordKey, ValueSubkey, bytes, ValueData | None]]] = set()
|
|
|
|
for n in range(COUNT):
|
|
key = records[n].key
|
|
subkey_data = subkey_data_list[n]
|
|
|
|
async def getter(key: RecordKey, subkey: ValueSubkey, check_data: bytes):
|
|
return (key, subkey, check_data, await transaction.get(key, subkey))
|
|
|
|
init_get_futures.add(getter(key, subkey, subkey_data))
|
|
|
|
# Update each subkey for each record in parallel
|
|
# This ensures that each record gets its own expiration update
|
|
get_results = await asyncio.gather(*init_get_futures)
|
|
for key, sk, check_data, vd in get_results:
|
|
assert vd is not None and vd.data == check_data
|
|
|
|
print(f'transaction get subkey {i}: {time.time()-start}')
|
|
|
|
await transaction.rollback()
|
|
|
|
for desc in records:
|
|
await rc0.close_dht_record(desc.key)
|
|
|
|
|
|
@pytest.mark.skipif(os.getenv("STRESS") != "1", reason="stress test takes a long time")
|
|
@pytest.mark.asyncio
|
|
async def test_dht_transaction_write_read_full_parallel_local():
|
|
|
|
async def null_update_callback(update: VeilidUpdate):
|
|
pass
|
|
|
|
try:
|
|
api0 = await api_connector(null_update_callback, 0)
|
|
except VeilidConnectionError:
|
|
pytest.skip("Unable to connect to veilid-server 0.")
|
|
return
|
|
|
|
async with api0:
|
|
# purge local and remote record stores to ensure we start fresh
|
|
await api0.debug("record purge local")
|
|
await api0.debug("record purge remote")
|
|
|
|
# make routing contexts
|
|
rc0 = await (await api0.new_routing_context()).with_sequencing(Sequencing.ENSURE_ORDERED)
|
|
async with rc0:
|
|
|
|
for kind in await api0.valid_crypto_kinds():
|
|
print(f"kind: {kind}")
|
|
cs = await api0.get_crypto_system(kind)
|
|
async with cs:
|
|
|
|
# Number of records
|
|
COUNT = 48
|
|
# Number of subkeys per record
|
|
SUBKEY_COUNT = 32
|
|
# BareNonce to encrypt test data
|
|
NONCE = Nonce.from_bytes(b"A"*await cs.nonce_length())
|
|
# Secret to encrypt test data
|
|
SECRET = SharedSecret.from_value(await cs.kind(), BareSharedSecret.from_bytes(b"A"*await cs.shared_secret_length()))
|
|
# Max subkey size
|
|
MAX_SUBKEY_SIZE = min(32768, 1024*1024//SUBKEY_COUNT)
|
|
# MAX_SUBKEY_SIZE = 256
|
|
|
|
# write dht records on server 0
|
|
records : list[DHTRecordDescriptor] = []
|
|
subkey_data_list : list[bytes] = []
|
|
schema = DHTSchema.dflt(SUBKEY_COUNT)
|
|
print(f'writing {COUNT} records with full subkeys')
|
|
for n in range(COUNT):
|
|
desc = await rc0.create_dht_record(kind, schema)
|
|
print(f' {n}: {desc.key} {desc.owner}:{desc.owner_secret}')
|
|
records.append(desc)
|
|
|
|
# Make encrypted data that is consistent and hard to compress
|
|
subkey_data = bytes(chr(ord("A")+n%32)*MAX_SUBKEY_SIZE, 'ascii')
|
|
subkey_data = await cs.crypt_no_auth(subkey_data, NONCE, SECRET)
|
|
subkey_data_list.append(subkey_data)
|
|
|
|
start = time.time()
|
|
transaction = await api0.transact_dht_records([x.key for x in records], None)
|
|
print(f'transaction begin: {time.time()-start}')
|
|
|
|
init_set_futures : set[Coroutine[Any, Any, ValueData | None]] = set()
|
|
|
|
for i in range(SUBKEY_COUNT):
|
|
for n in range(COUNT):
|
|
key = records[n].key
|
|
subkey_data = subkey_data_list[n]
|
|
|
|
async def setter(key: RecordKey, subkey: ValueSubkey, data: bytes):
|
|
start = time.time()
|
|
cnt = 0
|
|
while True:
|
|
try:
|
|
await transaction.set(key, subkey, data)
|
|
break
|
|
except veilid.VeilidAPIErrorTryAgain:
|
|
cnt += 1
|
|
print(f' retry #{cnt} setting {key} #{subkey}')
|
|
continue
|
|
print(f'set {key} #{subkey}: {time.time()-start}')
|
|
|
|
init_set_futures.add(setter(key, ValueSubkey(i), subkey_data))
|
|
|
|
# Update all subkeys for all records simultaneously
|
|
start = time.time()
|
|
await asyncio.gather(*init_set_futures)
|
|
print(f'transaction set subkeys: {time.time()-start}')
|
|
|
|
start = time.time()
|
|
await transaction.commit()
|
|
print(f'transaction commit: {time.time()-start}')
|
|
|
|
for desc in records:
|
|
await rc0.close_dht_record(desc.key)
|
|
|
|
await api0.debug("record purge local")
|
|
await api0.debug("record purge remote")
|
|
|
|
# read dht records on server 0
|
|
print(f'reading {COUNT} records')
|
|
|
|
for desc in records:
|
|
await rc0.open_dht_record(desc.key)
|
|
|
|
start = time.time()
|
|
transaction = await api0.transact_dht_records([x.key for x in records], None)
|
|
print(f'transaction begin: {time.time()-start}')
|
|
|
|
init_get_futures : set[Coroutine[Any, Any, tuple[RecordKey, ValueSubkey, bytes, ValueData | None]]] = set()
|
|
for i in range(SUBKEY_COUNT):
|
|
subkey = ValueSubkey(i)
|
|
|
|
for n in range(COUNT):
|
|
key = records[n].key
|
|
subkey_data = subkey_data_list[n]
|
|
|
|
async def getter(key: RecordKey, subkey: ValueSubkey, check_data: bytes):
|
|
start = time.time()
|
|
out = (key, subkey, check_data, await transaction.get(key, subkey))
|
|
print(f'get {key} #{subkey}: {time.time()-start}')
|
|
return out
|
|
|
|
init_get_futures.add(getter(key, subkey, subkey_data))
|
|
|
|
# Update each subkey for each record in parallel
|
|
# This ensures that each record gets its own expiration update
|
|
start = time.time()
|
|
get_results = await asyncio.gather(*init_get_futures)
|
|
for key, sk, check_data, vd in get_results:
|
|
assert vd is not None and vd.data == check_data
|
|
print(f'transaction get subkeys: {time.time()-start}')
|
|
|
|
await transaction.rollback()
|
|
|
|
for desc in records:
|
|
await rc0.close_dht_record(desc.key)
|