Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
aos
GitHub Repository: aos/firecracker
Path: blob/main/tests/integration_tests/performance/test_network_latency.py
1958 views
1
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
# SPDX-License-Identifier: Apache-2.0
3
"""Tests the network latency of a Firecracker guest."""
4
import logging
5
import platform
6
import re
7
import pytest
8
import host_tools.network as net_tools
9
from conftest import ARTIFACTS_COLLECTION
10
from framework.artifacts import ArtifactSet
11
from framework.matrix import TestMatrix, TestContext
12
from framework.builder import MicrovmBuilder
13
from framework.stats import core, consumer, producer, types, criteria,\
14
function
15
from framework.utils import eager_map, CpuMap
16
from framework.artifacts import DEFAULT_HOST_IP
17
from framework.utils_cpuid import get_cpu_model_name
18
from integration_tests.performance.utils import handle_failure, \
19
dump_test_result
20
21
PING = "ping -c {} -i {} {}"
22
LATENCY_AVG_BASELINES = {
23
"x86_64": {
24
"target": 0.250, # milliseconds
25
"delta": 0.020 # milliseconds
26
}
27
}
28
29
PKT_LOSS = "pkt_loss"
30
PKT_LOSS_STAT_KEY = "value"
31
LATENCY = "latency"
32
33
34
def pass_criteria():
35
"""Define pass criteria for the statistics."""
36
return {
37
"Avg": criteria.EqualWith(LATENCY_AVG_BASELINES[platform.machine()])
38
}
39
40
41
def measurements():
42
"""Define measurements."""
43
latency = types.MeasurementDef.create_measurement(
44
LATENCY,
45
"ms",
46
[function.ValuePlaceholder("Avg"),
47
function.ValuePlaceholder("Min"),
48
function.ValuePlaceholder("Max"),
49
function.ValuePlaceholder("Stddev"),
50
function.ValuePlaceholder("Percentile99"),
51
function.ValuePlaceholder("Percentile90"),
52
function.ValuePlaceholder("Percentile50")],
53
pass_criteria())
54
pkt_loss = types.MeasurementDef.create_measurement(
55
PKT_LOSS,
56
"percentage",
57
[function.ValuePlaceholder(PKT_LOSS_STAT_KEY)])
58
59
return [latency, pkt_loss]
60
61
62
def consume_ping_output(cons, raw_data, requests):
63
"""Consume ping output.
64
65
Output example:
66
PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.
67
64 bytes from 8.8.8.8: icmp_seq=1 ttl=118 time=17.7 ms
68
64 bytes from 8.8.8.8: icmp_seq=2 ttl=118 time=17.7 ms
69
64 bytes from 8.8.8.8: icmp_seq=3 ttl=118 time=17.4 ms
70
64 bytes from 8.8.8.8: icmp_seq=4 ttl=118 time=17.8 ms
71
72
--- 8.8.8.8 ping statistics ---
73
4 packets transmitted, 4 received, 0% packet loss, time 3005ms
74
rtt min/avg/max/mdev = 17.478/17.705/17.808/0.210 ms
75
"""
76
eager_map(cons.set_measurement_def, measurements())
77
78
st_keys = ["Min",
79
"Avg",
80
"Max",
81
"Stddev"]
82
83
output = raw_data.strip().split('\n')
84
assert len(output) > 2
85
86
# E.g: round-trip min/avg/max/stddev = 17.478/17.705/17.808/0.210 ms
87
stat_values = output[-1]
88
pattern_stats = "round-trip min/avg/max/stddev = (.+)/(.+)/(.+)/(.+) ms"
89
stat_values = re.findall(pattern_stats, stat_values)[0]
90
assert len(stat_values) == 4
91
92
for index, stat_value in enumerate(stat_values[:4]):
93
cons.consume_stat(st_name=st_keys[index],
94
ms_name=LATENCY,
95
value=float(stat_value))
96
97
# E.g: 4 packets transmitted, 4 received, 0% packet loss
98
packet_stats = output[-2]
99
pattern_packet = ".+ packet.+transmitted, .+ received," \
100
" (.+)% packet loss"
101
pkt_loss = re.findall(pattern_packet, packet_stats)[0]
102
assert len(pkt_loss) == 1
103
cons.consume_stat(st_name=PKT_LOSS_STAT_KEY,
104
ms_name=PKT_LOSS,
105
value=pkt_loss[0])
106
107
# Compute percentiles.
108
seqs = output[1:requests + 1]
109
times = list()
110
pattern_time = ".+ bytes from .+: icmp_seq=.+ ttl=.+ time=(.+) ms"
111
for index, seq in enumerate(seqs):
112
time = re.findall(pattern_time, seq)
113
assert len(time) == 1
114
times.append(time[0])
115
116
times.sort()
117
cons.consume_stat(st_name="Percentile50",
118
ms_name=LATENCY,
119
value=times[int(requests * 0.5)])
120
cons.consume_stat(st_name="Percentile90",
121
ms_name=LATENCY,
122
value=times[int(requests * 0.9)])
123
cons.consume_stat(st_name="Percentile99",
124
ms_name=LATENCY,
125
value=times[int(requests * 0.99)])
126
127
128
@pytest.mark.nonci
129
@pytest.mark.skipif(platform.machine() != "x86_64",
130
reason="This test was observed only on x86_64. Further "
131
"support need to be added for aarch64 and amd64.")
132
@pytest.mark.timeout(3600)
133
def test_network_latency(bin_cloner_path, results_file_dumper):
134
"""Test network latency driver for multiple artifacts."""
135
logger = logging.getLogger("network_latency")
136
microvm_artifacts = ArtifactSet(
137
ARTIFACTS_COLLECTION.microvms(keyword="1vcpu_1024mb")
138
)
139
kernel_artifacts = ArtifactSet(ARTIFACTS_COLLECTION.kernels())
140
disk_artifacts = ArtifactSet(ARTIFACTS_COLLECTION.disks(keyword="ubuntu"))
141
142
# Create a test context and add builder, logger, network.
143
test_context = TestContext()
144
test_context.custom = {
145
'builder': MicrovmBuilder(bin_cloner_path),
146
'logger': logger,
147
'requests': 1000,
148
'interval': 0.2, # Seconds.
149
'name': 'network_latency',
150
'results_file_dumper': results_file_dumper
151
}
152
153
# Create the test matrix.
154
test_matrix = TestMatrix(context=test_context,
155
artifact_sets=[
156
microvm_artifacts,
157
kernel_artifacts,
158
disk_artifacts
159
])
160
161
test_matrix.run_test(_g2h_send_ping)
162
163
164
def _g2h_send_ping(context):
165
"""Send ping from guest to host."""
166
logger = context.custom['logger']
167
vm_builder = context.custom['builder']
168
interval_between_req = context.custom['interval']
169
name = context.custom['name']
170
file_dumper = context.custom['results_file_dumper']
171
172
logger.info("Testing {} with microvm: \"{}\", kernel {}, disk {} "
173
.format(name,
174
context.microvm.name(),
175
context.kernel.name(),
176
context.disk.name()))
177
178
# Create a rw copy artifact.
179
rw_disk = context.disk.copy()
180
# Get ssh key from read-only artifact.
181
ssh_key = context.disk.ssh_key()
182
# Create a fresh microvm from aftifacts.
183
vm_instance = vm_builder.build(kernel=context.kernel,
184
disks=[rw_disk],
185
ssh_key=ssh_key,
186
config=context.microvm)
187
basevm = vm_instance.vm
188
basevm.start()
189
190
# Check if the needed CPU cores are available. We have the API thread, VMM
191
# thread and then one thread for each configured vCPU.
192
assert CpuMap.len() >= 2 + basevm.vcpus_count
193
194
# Pin uVM threads to physical cores.
195
current_cpu_id = 0
196
assert basevm.pin_vmm(current_cpu_id), \
197
"Failed to pin firecracker thread."
198
current_cpu_id += 1
199
assert basevm.pin_api(current_cpu_id), \
200
"Failed to pin fc_api thread."
201
for i in range(basevm.vcpus_count):
202
current_cpu_id += 1
203
assert basevm.pin_vcpu(i, current_cpu_id + i), \
204
f"Failed to pin fc_vcpu {i} thread."
205
206
custom = {"microvm": context.microvm.name(),
207
"kernel": context.kernel.name(),
208
"disk": context.disk.name(),
209
"cpu_model_name": get_cpu_model_name()}
210
211
st_core = core.Core(name="network_latency", iterations=1, custom=custom)
212
cons = consumer.LambdaConsumer(
213
func=consume_ping_output,
214
func_kwargs={"requests": context.custom['requests']}
215
)
216
cmd = PING.format(context.custom['requests'],
217
interval_between_req,
218
DEFAULT_HOST_IP)
219
prod = producer.SSHCommand(cmd,
220
net_tools.SSHConnection(basevm.ssh_config))
221
st_core.add_pipe(producer=prod, consumer=cons, tag="ping")
222
223
# Gather results and verify pass criteria.
224
try:
225
result = st_core.run_exercise()
226
except core.CoreException as err:
227
handle_failure(file_dumper, err)
228
229
dump_test_result(file_dumper, result)
230
231