ci: add unittests for the benchmark actions (#1466)

Co-authored-by: Nils Hanke <nils.hanke@outlook.com>
This commit is contained in:
Moritz Eckert 2023-03-23 17:04:55 +01:00 committed by GitHub
parent 01d6724bae
commit feb23ea3da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 786 additions and 104 deletions

View File

@ -14,6 +14,20 @@ BIGGER_BETTER = [
'udp_bw_mbit',
]
# List of FIO tests
FIO_TESTS = [
"read_iops",
"write_iops",
"read_bw",
"write_bw",
]
# List KNB tests
KNB_TESTS = [
"pod2pod",
"pod2svc"
]
# Lookup for test suite -> unit
UNIT_STR = {
'iops': 'IOPS',
@ -32,6 +46,7 @@ ALLOWED_RATIO_DELTA = {
'udp_bw_mbit': 0.7,
}
def is_bigger_better(bench_suite: str) -> bool:
return bench_suite in BIGGER_BETTER
@ -55,69 +70,78 @@ def get_paths() -> Tuple[str, str]:
return path_prev, path_curr
def main() -> None:
"""Compare the current benchmark data against the previous.
class BenchmarkComparer:
def __init__(self, path_prev, path_curr):
self.path_prev = path_prev
self.path_curr = path_curr
Create a markdown table showing the benchmark progressions.
def compare(self) -> str:
"""Compare the current benchmark data against the previous.
Print the result to stdout.
"""
path_prev, path_curr = get_paths()
try:
with open(path_prev) as f_prev:
bench_prev = json.load(f_prev)
with open(path_curr) as f_curr:
bench_curr = json.load(f_curr)
except OSError as e:
raise ValueError('Failed reading benchmark file: {e}'.format(e=e))
Create a markdown table showing the benchmark progressions.
try:
name = bench_curr['provider']
except KeyError:
raise ValueError(
'Current benchmark record file does not contain provider.')
try:
prev_name = bench_prev['provider']
except KeyError:
raise ValueError(
'Previous benchmark record file does not contain provider.')
if name != prev_name:
raise ValueError(
'Cloud providers of previous and current benchmark data do not match.')
Print the result to stdout.
"""
try:
with open(self.path_prev) as f_prev:
bench_prev = json.load(f_prev)
with open(self.path_curr) as f_curr:
bench_curr = json.load(f_curr)
except OSError as e:
raise ValueError('Failed reading benchmark file: {e}'.format(e=e))
if 'fio' not in bench_prev.keys() or 'fio' not in bench_curr.keys():
raise ValueError('Benchmarks do not both contain fio records.')
try:
name = bench_curr['provider']
except KeyError:
raise ValueError(
'Current benchmark record file does not contain provider.')
try:
prev_name = bench_prev['provider']
except KeyError:
raise ValueError(
'Previous benchmark record file does not contain provider.')
if name != prev_name:
raise ValueError(
'Cloud providers of previous and current benchmark data do not match.')
if 'knb' not in bench_prev.keys() or 'knb' not in bench_curr.keys():
raise ValueError('Benchmarks do not both contain knb records.')
if 'fio' not in bench_prev.keys() or 'fio' not in bench_curr.keys():
raise ValueError('Benchmarks do not both contain fio records.')
md_lines = [
'# {name}'.format(name=name),
'',
'<details>',
'',
'- Commit of current benchmark: [{ch}](https://github.com/edgelesssys/constellation/commit/{ch})'.format(ch=bench_curr['metadata']['github.sha']),
'- Commit of previous benchmark: [{ch}](https://github.com/edgelesssys/constellation/commit/{ch})'.format(ch=bench_prev['metadata']['github.sha']),
'',
'| Benchmark suite | Metric | Current | Previous | Ratio |',
'|-|-|-|-|-|',
]
if 'knb' not in bench_prev.keys() or 'knb' not in bench_curr.keys():
raise ValueError('Benchmarks do not both contain knb records.')
# compare FIO results
for subtest, metrics in bench_prev['fio'].items():
for metric in metrics.keys():
md_lines.append(compare_test('fio', subtest, metric, bench_prev, bench_curr))
md_lines = [
'# {name}'.format(name=name),
'',
'<details>',
'',
'- Commit of current benchmark: [{ch}](https://github.com/edgelesssys/constellation/commit/{ch})'.format(
ch=bench_curr['metadata']['github.sha']),
'- Commit of previous benchmark: [{ch}](https://github.com/edgelesssys/constellation/commit/{ch})'.format(
ch=bench_prev['metadata']['github.sha']),
'',
'| Benchmark suite | Metric | Current | Previous | Ratio |',
'|-|-|-|-|-|',
]
# compare knb results
for subtest, metrics in bench_prev['knb'].items():
for metric in metrics.keys():
md_lines.append(compare_test('knb', subtest, metric, bench_prev, bench_curr))
# compare FIO results
for subtest in FIO_TESTS:
if subtest not in bench_prev['fio']:
raise ValueError(f'Previous benchmarks do not include the "{subtest}" test.')
for metric in bench_prev['fio'][subtest].keys():
md_lines.append(self.compare_test('fio', subtest, metric, bench_prev, bench_curr))
md_lines += ['', '</details>']
print('\n'.join(md_lines))
# compare knb results
for subtest in KNB_TESTS:
if subtest not in bench_prev['knb']:
raise ValueError(f'Previous benchmarks do not include the "{subtest}" test.')
for metric in bench_prev['knb'][subtest].keys():
md_lines.append(self.compare_test('knb', subtest, metric, bench_prev, bench_curr))
md_lines += ['', '</details>']
return '\n'.join(md_lines)
def compare_test(test, subtest, metric, bench_prev, bench_curr) -> str:
def compare_test(self, test, subtest, metric, bench_prev, bench_curr) -> str:
if subtest not in bench_curr[test]:
raise ValueError(
'Benchmark record from previous benchmark not in current.')
@ -138,15 +162,24 @@ def compare_test(test, subtest, metric, bench_prev, bench_curr) -> str:
ratio_num = val_prev / val_curr
if ratio_num > ALLOWED_RATIO_DELTA.get(metric, 1):
set_failed()
ratio_num = round(ratio_num, 3)
emoji = PROGRESS[int(ratio_num >= 1)]
ratio = f'{ratio_num} {emoji}'
return f'| {subtest} | {metric} ({unit}) | {val_curr} | {val_prev} | {ratio} |'
def set_failed() -> None:
os.environ['COMPARISON_SUCCESS'] = str(False)
def main():
path_prev, path_curr = get_paths()
c = BenchmarkComparer(path_prev, path_curr)
output = c.compare()
print(output)
if __name__ == '__main__':
main()

View File

@ -37,60 +37,73 @@ def configure() -> Tuple[str, str, str, str | None, str, str, str, str]:
workflow = os.environ.get('GITHUB_WORKFLOW', 'N/A')
return base_path, csp, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow
class BenchmarkParser:
def __init__(self, base_path, csp, out_dir, ext_provider_name=None, commit_hash="N/A", commit_ref="N/A", actor="N/A", workflow="N/A"):
self.base_path = base_path
self.csp= csp
self.out_dir = out_dir
self.ext_provider_name = ext_provider_name
if not self.ext_provider_name:
self.ext_provider_name = f'constellation-{csp}'
self.commit_hash = commit_hash
self.commit_ref = commit_ref
self.actor = actor
self.workflow = workflow
def main() -> None:
"""Read and parse the K-Bench tests.
Write results of the current environment to a JSON file.
"""
def parse(self) -> None:
"""Read and parse the K-Bench tests.
Write results of the current environment to a JSON file.
"""
# Expect the results in directory:
fio_path = os.path.join(
self.base_path,
f'fio-{self.ext_provider_name}.json',
)
knb_path = os.path.join(
self.base_path,
f'knb-{self.ext_provider_name}.json',
)
out_file_name = f'{self.ext_provider_name}.json'
if not os.path.exists(fio_path) or not os.path.exists(knb_path):
raise ValueError(
f'Benchmarks do not exist at {fio_path} or {knb_path}.')
# Parse subtest
knb_results = knb.evaluate(knb_path)
fio_results = fio.evaluate(fio_path)
# Get timestamp
now = datetime.now()
timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
combined_results = {'metadata': {
'github.sha': self.commit_hash,
'github.ref-name': self.commit_ref,
'github.actor': self.actor,
'github.workflow': self.workflow,
},
'@timestamp': str(timestamp),
'provider': self.ext_provider_name,
'fio': {},
'knb': {}}
combined_results['knb'].update(knb_results)
combined_results['fio'].update(fio_results)
# Write the compact results.
save_path = os.path.join(self.out_dir, out_file_name)
with open(save_path, 'w+') as w:
json.dump(combined_results, fp=w, sort_keys=False, indent=2)
def main():
base_path, csp, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow = configure()
if ext_provider_name is None:
# Constellation benchmark.
ext_provider_name = f'constellation-{csp}'
# Expect the results in directory:
fio_path = os.path.join(
base_path,
f'fio-{ext_provider_name}.json',
)
knb_path = os.path.join(
base_path,
f'knb-{ext_provider_name}.json',
)
out_file_name = f'{ext_provider_name}.json'
if not os.path.exists(fio_path) or not os.path.exists(knb_path):
raise ValueError(
f'Benchmarks do not exist at {fio_path} or {knb_path}.')
# Parse subtest
knb_results = knb.evaluate(knb_path)
fio_results = fio.evaluate(fio_path)
# Get timestamp
now = datetime.now()
timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
combined_results = {'metadata': {
'github.sha': commit_hash,
'github.ref-name': commit_ref,
'github.actor': actor,
'github.workflow': workflow,
},
'@timestamp': str(timestamp),
'provider': ext_provider_name,
'fio': {},
'knb': {}}
combined_results['knb'].update(knb_results)
combined_results['fio'].update(fio_results)
# Write the compact results.
save_path = os.path.join(out_dir, out_file_name)
with open(save_path, 'w+') as w:
json.dump(combined_results, fp=w, sort_keys=False, indent=2)
p = BenchmarkParser(base_path, csp, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow)
p.parse()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,36 @@
{
"metadata": {
"github.sha": "N/A",
"github.ref-name": "N/A",
"github.actor": "N/A",
"github.workflow": "N/A"
},
"@timestamp": "2023-03-20T12:01:30.283191Z",
"provider": "constellation-azure",
"fio": {
"read_iops": {
"iops": 2165.847
},
"write_iops": {
"iops": 219.97105
},
"read_bw": {
"bw_kbytes": 184151.0
},
"write_bw": {
"bw_kbytes": 18604.0
},
"volumesize": 400
},
"knb": {
"pod2pod": {
"tcp_bw_mbit": 943.0,
"udp_bw_mbit": 595.0
},
"pod2svc": {
"tcp_bw_mbit": 932.0,
"udp_bw_mbit": 564.0
},
"mtu": 1500
}
}

View File

@ -0,0 +1,391 @@
[
{
"TestName": "FIO test results",
"Status": [
{
"StatusCode": "OK",
"StatusMessage": "\nFIO version - fio-3.30\nGlobal options - ioengine=libaio verify=0 direct=1 gtod_reduce=1\n\nJobName: read_iops\n blocksize=4K filesize=2G iodepth=64 rw=randread\nread:\n IOPS=2165.846924 BW(KiB/s)=8679\n iops: min=1064 max=4184 avg=2199.766602\n bw(KiB/s): min=4256 max=16736 avg=8799.066406\n\nJobName: write_iops\n blocksize=4K filesize=2G iodepth=64 rw=randwrite\nwrite:\n IOPS=219.971054 BW(KiB/s)=896\n iops: min=56 max=360 avg=222.333328\n bw(KiB/s): min=224 max=1440 avg=890.000000\n\nJobName: read_bw\n blocksize=128K filesize=2G iodepth=64 rw=randread\nread:\n IOPS=1434.548828 BW(KiB/s)=184151\n iops: min=920 max=2168 avg=1457.500000\n bw(KiB/s): min=117760 max=277504 avg=186562.031250\n\nJobName: write_bw\n blocksize=128k filesize=2G iodepth=64 rw=randwrite\nwrite:\n IOPS=141.218124 BW(KiB/s)=18604\n iops: min=2 max=288 avg=142.800003\n bw(KiB/s): min=256 max=36864 avg=18293.966797\n\nDisk stats (read/write):\n dm-3: ios=65084/6657 merge=0/0 ticks=2133228/2168554 in_queue=4301782, util=99.246300%\n sdc: ios=65082/6623 merge=2/202 ticks=2098593/918415 in_queue=3017008, util=99.402611%\n"
}
],
"Raw": {
"size": "400Gi",
"storageClass": {
"metadata": {
"name": "encrypted-rwo",
"uid": "a6687a94-da84-4c43-8c75-31a893d5e736",
"resourceVersion": "425",
"creationTimestamp": "2023-03-09T12:51:20Z",
"labels": {
"app.kubernetes.io/managed-by": "Helm"
},
"annotations": {
"meta.helm.sh/release-name": "constellation-services",
"meta.helm.sh/release-namespace": "kube-system",
"storageclass.kubernetes.io/is-default-class": "true"
},
"managedFields": [
{
"manager": "bootstrapper",
"operation": "Update",
"apiVersion": "storage.k8s.io/v1",
"time": "2023-03-09T12:51:20Z",
"fieldsType": "FieldsV1",
"fieldsV1": {
"f:allowVolumeExpansion": {},
"f:metadata": {
"f:annotations": {
".": {},
"f:meta.helm.sh/release-name": {},
"f:meta.helm.sh/release-namespace": {},
"f:storageclass.kubernetes.io/is-default-class": {}
},
"f:labels": {
".": {},
"f:app.kubernetes.io/managed-by": {}
}
},
"f:parameters": {
".": {},
"f:skuname": {}
},
"f:provisioner": {},
"f:reclaimPolicy": {},
"f:volumeBindingMode": {}
}
}
]
},
"provisioner": "azuredisk.csi.confidential.cloud",
"parameters": {
"skuname": "StandardSSD_LRS"
},
"reclaimPolicy": "Delete",
"allowVolumeExpansion": true,
"volumeBindingMode": "Immediate"
},
"fioConfig": "[global]\nrandrepeat=0\nverify=0\nioengine=libaio\ndirect=1\ngtod_reduce=1\n[job1]\nname=read_iops\nbs=4K\niodepth=64\nsize=2G\nreadwrite=randread\ntime_based\nramp_time=2s\nruntime=15s\n[job2]\nname=write_iops\nbs=4K\niodepth=64\nsize=2G\nreadwrite=randwrite\ntime_based\nramp_time=2s\nruntime=15s\n[job3]\nname=read_bw\nbs=128K\niodepth=64\nsize=2G\nreadwrite=randread\ntime_based\nramp_time=2s\nruntime=15s\n[job4]\nname=write_bw\nbs=128k\niodepth=64\nsize=2G\nreadwrite=randwrite\ntime_based\nramp_time=2s\nruntime=15s\n",
"result": {
"fio version": "fio-3.30",
"timestamp": 1678366708,
"timestamp_ms": 1678366708715,
"time": "Thu Mar 9 12:58:28 2023",
"global options": {
"directory": "/dataset",
"randrepeat": "0",
"verify": "0",
"ioengine": "libaio",
"direct": "1",
"gtod_reduce": "1"
},
"jobs": [
{
"jobname": "read_iops",
"elapsed": 18,
"job options": {
"name": "read_iops",
"bs": "4K",
"iodepth": "64",
"size": "2G",
"rw": "randread",
"ramp_time": "2s",
"runtime": "15s"
},
"read": {
"io_bytes": 135536640,
"io_kbytes": 132360,
"bw_bytes": 8888231,
"bw": 8679,
"iops": 2165.847,
"runtime": 15249,
"total_ios": 33027,
"slat_ns": {},
"clat_ns": {},
"lat_ns": {},
"bw_min": 4256,
"bw_max": 16736,
"bw_agg": 4.563348,
"bw_mean": 8799.066,
"bw_dev": 3406.0356,
"bw_samples": 30,
"iops_min": 1064,
"iops_max": 4184,
"iops_mean": 2199.7666,
"iops_stddev": 851.5089,
"iops_samples": 30
},
"write": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"trim": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"sync": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"job_runtime": 15248,
"usr_cpu": 0.209864,
"sys_cpu": 1.974029,
"ctx": 1425,
"minf": 37,
"iodepth_level": {
"\u003e=64": 100
},
"iodepth_submit": {
"4": 100
},
"iodepth_complete": {
"4": 99.99697,
"64": 0.1
},
"latency_ns": {},
"latency_us": {},
"latency_ms": {},
"latency_depth": 64,
"latency_percentile": 100
},
{
"jobname": "write_iops",
"elapsed": 18,
"job options": {
"name": "write_iops",
"bs": "4K",
"iodepth": "64",
"size": "2G",
"rw": "randwrite",
"ramp_time": "2s",
"runtime": "15s"
},
"read": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"write": {
"io_bytes": 13955072,
"io_kbytes": 13628,
"bw_bytes": 917976,
"bw": 896,
"iops": 219.97105,
"runtime": 15202,
"total_ios": 3344,
"slat_ns": {},
"clat_ns": {},
"lat_ns": {},
"bw_min": 224,
"bw_max": 1440,
"bw_agg": 4.559427,
"bw_mean": 890,
"bw_dev": 263.00085,
"bw_samples": 30,
"iops_min": 56,
"iops_max": 360,
"iops_mean": 222.33333,
"iops_stddev": 65.6618,
"iops_samples": 30
},
"trim": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"sync": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"job_runtime": 15201,
"usr_cpu": 0.072364,
"sys_cpu": 0.66443,
"ctx": 2157,
"minf": 36,
"iodepth_level": {
"\u003e=64": 100
},
"iodepth_submit": {
"4": 100
},
"iodepth_complete": {
"4": 99.97011,
"64": 0.1
},
"latency_ns": {},
"latency_us": {},
"latency_ms": {},
"latency_depth": 64,
"latency_percentile": 100
},
{
"jobname": "read_bw",
"elapsed": 18,
"job options": {
"name": "read_bw",
"bs": "128K",
"iodepth": "64",
"size": "2G",
"rw": "randread",
"ramp_time": "2s",
"runtime": "15s"
},
"read": {
"io_bytes": 2875326464,
"io_kbytes": 2807936,
"bw_bytes": 188570728,
"bw": 184151,
"iops": 1434.5488,
"runtime": 15248,
"total_ios": 21874,
"slat_ns": {},
"clat_ns": {},
"lat_ns": {},
"bw_min": 117760,
"bw_max": 277504,
"bw_agg": 96.75502,
"bw_mean": 186562.03,
"bw_dev": 46217.277,
"bw_samples": 30,
"iops_min": 920,
"iops_max": 2168,
"iops_mean": 1457.5,
"iops_stddev": 361.0814,
"iops_samples": 30
},
"write": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"trim": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"sync": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"job_runtime": 15247,
"usr_cpu": 0.275464,
"sys_cpu": 3.469535,
"ctx": 1933,
"minf": 37,
"iodepth_level": {
"\u003e=64": 100
},
"iodepth_submit": {
"4": 100
},
"iodepth_complete": {
"4": 99.99543,
"64": 0.1
},
"latency_ns": {},
"latency_us": {},
"latency_ms": {},
"latency_depth": 64,
"latency_percentile": 100
},
{
"jobname": "write_bw",
"elapsed": 18,
"job options": {
"name": "write_bw",
"bs": "128k",
"iodepth": "64",
"size": "2G",
"rw": "randwrite",
"ramp_time": "2s",
"runtime": "15s"
},
"read": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"write": {
"io_bytes": 290586624,
"io_kbytes": 283776,
"bw_bytes": 19051112,
"bw": 18604,
"iops": 141.21812,
"runtime": 15253,
"total_ios": 2154,
"slat_ns": {},
"clat_ns": {},
"lat_ns": {},
"bw_min": 256,
"bw_max": 36864,
"bw_agg": 93.819565,
"bw_mean": 18293.967,
"bw_dev": 7049.828,
"bw_samples": 30,
"iops_min": 2,
"iops_max": 288,
"iops_mean": 142.8,
"iops_stddev": 55.03754,
"iops_samples": 30
},
"trim": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"sync": {
"slat_ns": {},
"clat_ns": {},
"lat_ns": {}
},
"job_runtime": 15252,
"usr_cpu": 0.059009,
"sys_cpu": 0.445843,
"ctx": 1601,
"minf": 36,
"iodepth_level": {
"\u003e=64": 100
},
"iodepth_submit": {
"4": 100
},
"iodepth_complete": {
"4": 99.9536,
"64": 0.1
},
"latency_ns": {},
"latency_us": {},
"latency_ms": {},
"latency_depth": 64,
"latency_percentile": 100
}
],
"disk_util": [
{
"name": "dm-3",
"read_ios": 65084,
"write_ios": 6657,
"read_ticks": 2133228,
"write_ticks": 2168554,
"in_queue": 4301782,
"util": 99.2463
},
{
"name": "sdc",
"read_ios": 65082,
"write_ios": 6623,
"read_merges": 2,
"write_merges": 202,
"read_ticks": 2098593,
"write_ticks": 918415,
"in_queue": 3017008,
"util": 99.40261
}
]
}
}
}
]

View File

@ -0,0 +1,146 @@
{
"metadata": {
"name": "knb-21696",
"generator": "knb",
"version": "1.5.0",
"date": "2023-03-09 12:58:29",
"server-node": "e2e-test-79e6174e-worker000000",
"client-node": "e2e-test-79e6174e-worker000001",
"socket-buffer-size": "auto"
},
"data": {
"cpu": "19/01",
"kernel": "6.1.14-200.fc37.x86_64",
"k8s-version": "v1.26.2",
"mtu": "1500",
"idle": {
"bandwidth": 0,
"client": {
"cpu": {
"total": 3.90,
"user": 1.29,
"nice": 0.00,
"system": 1.58,
"iowait": 1.03,
"steal": 0.00
},
"ram": 1989
},
"server": {
"cpu": {
"total": 2.15,
"user": 0.78,
"nice": 0.00,
"system": 1.21,
"iowait": 0.16,
"steal": 0.00
},
"ram": 1950
}
},
"pod2pod": {
"tcp": {
"bandwidth": 943,
"client": {
"cpu": {
"total": 17.15,
"user": 0.52,
"nice": 0.00,
"system": 16.58,
"iowait": 0.05,
"steal": 0.00
},
"ram": 1966
},
"server": {
"cpu": {
"total": 47.35,
"user": 0.77,
"nice": 0.00,
"system": 46.53,
"iowait": 0.05,
"steal": 0.00
},
"ram": 1947
}
},
"udp": {
"bandwidth": 595,
"client": {
"cpu": {
"total": 56.64,
"user": 1.18,
"nice": 0.00,
"system": 55.38,
"iowait": 0.08,
"steal": 0.00
},
"ram": 1965
},
"server": {
"cpu": {
"total": 43.33,
"user": 1.59,
"nice": 0.00,
"system": 41.71,
"iowait": 0.03,
"steal": 0.00
},
"ram": 1947
}
}
},
"pod2svc": {
"tcp": {
"bandwidth": 932,
"client": {
"cpu": {
"total": 17.22,
"user": 0.64,
"nice": 0.00,
"system": 16.37,
"iowait": 0.21,
"steal": 0.00
},
"ram": 1968
},
"server": {
"cpu": {
"total": 47.57,
"user": 1.07,
"nice": 0.00,
"system": 46.45,
"iowait": 0.05,
"steal": 0.00
},
"ram": 1963
}
},
"udp": {
"bandwidth": 564,
"client": {
"cpu": {
"total": 53.33,
"user": 1.34,
"nice": 0.00,
"system": 51.52,
"iowait": 0.47,
"steal": 0.00
},
"ram": 1971
},
"server": {
"cpu": {
"total": 41.43,
"user": 1.33,
"nice": 0.00,
"system": 40.08,
"iowait": 0.02,
"steal": 0.00
},
"ram": 1956
}
}
}
}
}

View File

@ -0,0 +1,63 @@
import unittest
import os
import tempfile
import json
import parse
import compare
TEST_INPUTS = "./test-inputs"
RESULT_FNAME_AZURE = "constellation-azure.json"
class TestParse(unittest.TestCase):
def test_parse(self):
with tempfile.TemporaryDirectory() as tmpdirname:
p = parse.BenchmarkParser(TEST_INPUTS, "azure", tmpdirname)
p.parse()
result_path = os.path.join(tmpdirname, RESULT_FNAME_AZURE)
self.assertTrue(os.path.isfile(result_path))
with open(result_path) as f:
result = json.load(f)
with open(os.path.join(TEST_INPUTS, RESULT_FNAME_AZURE)) as f:
expected = json.load(f)
self.assertEqual(result['fio'], expected['fio'])
self.assertEqual(result['knb'], expected['knb'])
expected_comparison_result = '''# constellation-azure
<details>
- Commit of current benchmark: [N/A](https://github.com/edgelesssys/constellation/commit/N/A)
- Commit of previous benchmark: [N/A](https://github.com/edgelesssys/constellation/commit/N/A)
| Benchmark suite | Metric | Current | Previous | Ratio |
|-|-|-|-|-|
| read_iops | iops (IOPS) | 2165.847 | 2165.847 | 1.0 |
| write_iops | iops (IOPS) | 219.97105 | 219.97105 | 1.0 |
| read_bw | bw_kbytes (KiB/s) | 184151.0 | 184151.0 | 1.0 |
| write_bw | bw_kbytes (KiB/s) | 18604.0 | 18604.0 | 1.0 |
| pod2pod | tcp_bw_mbit (Mbit/s) | 943.0 | 943.0 | 1.0 |
| pod2pod | udp_bw_mbit (Mbit/s) | 595.0 | 595.0 | 1.0 |
| pod2svc | tcp_bw_mbit (Mbit/s) | 932.0 | 932.0 | 1.0 |
| pod2svc | udp_bw_mbit (Mbit/s) | 564.0 | 564.0 | 1.0 |
</details>'''
class TestCompare(unittest.TestCase):
def test_compare(self):
with tempfile.TemporaryDirectory() as tmpdirname:
p = parse.BenchmarkParser(TEST_INPUTS, "azure", tmpdirname)
p.parse()
result_path = os.path.join(tmpdirname, RESULT_FNAME_AZURE)
self.assertTrue(os.path.isfile(result_path))
prev_path = os.path.join(TEST_INPUTS, RESULT_FNAME_AZURE)
c = compare.BenchmarkComparer(prev_path, result_path)
output = c.compare()
self.assertEqual(output, expected_comparison_result)
if __name__ == '__main__':
unittest.main()