-
Notifications
You must be signed in to change notification settings - Fork 0
/
locustfile.py
152 lines (110 loc) · 4.21 KB
/
locustfile.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import os
import uuid
import lorem
from locust import HttpUser, task, between
from aymurai.logging import get_logger
logger = get_logger(__name__)
class ServerStats:
def __init__(self):
self.n_cores: int = 0
self.cpu_usage = 0
self.memory_limit = 0
self.memory_usage = 0
@property
def memory_percent(self):
return self.memory_usage / self.memory_limit * 100 if self.memory_limit else 0
def update_metrics(self, n_cores, cpu_percent, memory_limit, memory_percent):
self.n_cores = n_cores
self.cpu_usage = cpu_percent
self.memory_limit = memory_limit
self.memory_usage = memory_percent
stats = ServerStats()
def generate_text():
paragraph = lorem.get_paragraph(
count=1,
comma=(0, 20),
word_range=(4, 12),
sentence_range=(0, 20),
sep=os.linesep,
)
return f"{str(uuid.uuid4())} {paragraph}"
class DataPublic(HttpUser):
wait_time = between(1, 2)
def on_start(self):
self.fetch_server_stats()
@task
def predict_datapublic(self):
payload = {"text": generate_text()}
with self.client.post(
"/datapublic/predict", json=payload, catch_response=True
) as response:
if response.status_code == 200:
response.success()
else:
response.failure(f"Failed with status code: {response.status_code}")
self.fetch_server_stats()
print(
"Request: predict (datapublic),"
f" Response Time: {response.elapsed.total_seconds() * 1000}ms,"
f" CPU Usage: {stats.cpu_usage}% ({stats.n_cores} cores)," # noqa: E501
f" Memory Usage: {stats.memory_usage} / {stats.memory_limit}MB ({stats.memory_percent}%)", # noqa: E501
)
def fetch_server_stats(self):
try:
response = self.client.get("/server/stats/summary")
if response.status_code != 200:
logger.error("Error fetching server stats.")
return
data = response.json()
ncores = data.get("cpu_core_limit", 0)
cpu_usage = data.get("cpu_usage_percent", 0)
mem_limit = data.get("memory_limit_mb", 0)
mem_usage = data.get("memory_usage_mb", 0)
stats.update_metrics(
n_cores=ncores,
cpu_percent=cpu_usage,
memory_limit=mem_limit,
memory_percent=mem_usage,
)
except Exception as e:
print(f"Error fetching server stats: {e}")
class AnonymizerUser(HttpUser):
wait_time = between(1, 2)
def on_start(self):
self.fetch_server_stats()
@task
def predict_anonimizer(self):
payload = {"text": generate_text()}
with self.client.post(
"/anonymizer/predict", json=payload, catch_response=True
) as response:
if response.status_code == 200:
response.success()
else:
response.failure(f"Failed with status code: {response.status_code}")
self.fetch_server_stats()
print(
"Request: predict (anonymizer),"
f" Response Time: {response.elapsed.total_seconds() * 1000}ms,"
f" CPU Usage: {stats.cpu_usage}% ({stats.n_cores} cores)," # noqa: E501
f" Memory Usage: {stats.memory_usage} / {stats.memory_limit}MB ({stats.memory_percent}%)", # noqa: E501
)
def fetch_server_stats(self):
try:
response = self.client.get("/server/stats/summary")
if response.status_code != 200:
logger.error("Error fetching server stats.")
return
data = response.json()
ncores = data.get("cpu_core_limit", 0)
cpu_usage = data.get("cpu_usage_percent", 0)
mem_limit = data.get("memory_limit_mb", 0)
mem_usage = data.get("memory_usage_mb", 0)
stats.update_metrics(
n_cores=ncores,
cpu_percent=cpu_usage,
memory_limit=mem_limit,
memory_percent=mem_usage,
)
except Exception as e:
print(f"Error fetching server stats: {e}")