Skip to content

Commit 843ec95

Browse files
committed
Revert "Revert "Revert "Revert "Revert "fix: remove mocks"""""
This reverts commit 4db4b3e.
1 parent 4db4b3e commit 843ec95

File tree

2 files changed

+257
-17
lines changed

2 files changed

+257
-17
lines changed

src/containers/Storage/PaginatedStorageNodesTable/getNodes.ts

Lines changed: 33 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ import {prepareSortValue} from '../../../utils/filters';
1313
import {getUptimeParamValue} from '../../../utils/nodes';
1414
import {getRequiredDataFields} from '../../../utils/tableUtils/getRequiredDataFields';
1515

16+
import {generateNodes} from './nodes';
17+
1618
export const getStorageNodes: FetchData<
1719
PreparedStorageNode,
1820
PreparedStorageNodeFilters,
@@ -43,23 +45,37 @@ export const getStorageNodes: FetchData<
4345
const sort = sortField ? prepareSortValue(sortField, sortOrder) : undefined;
4446

4547
const dataFieldsRequired = getRequiredDataFields(columnsIds, NODES_COLUMNS_TO_DATA_FIELDS);
46-
47-
const response = await window.api.viewer.getNodes({
48-
type,
49-
storage,
50-
limit,
51-
offset,
52-
sort,
53-
filter: searchValue,
54-
uptime: getUptimeParamValue(nodesUptimeFilter),
55-
with: visibleEntities,
56-
database,
57-
node_id: nodeId,
58-
group_id: groupId,
59-
filter_group: filterGroup,
60-
filter_group_by: filterGroupBy,
61-
fieldsRequired: dataFieldsRequired,
62-
});
48+
let response;
49+
const urlParams = new URLSearchParams(window.location.search);
50+
if (urlParams.get('mocks')) {
51+
// Get mock configuration from URL parameters or use defaults
52+
const pdisks = parseInt(urlParams.get('pdisks') || '10', 10);
53+
const vdisksPerPDisk = parseInt(urlParams.get('vdisksPerPDisk') || '2', 10);
54+
const totalNodes = parseInt(urlParams.get('totalNodes') || '50', 10);
55+
response = generateNodes(totalNodes, {
56+
maxVdisksPerPDisk: vdisksPerPDisk,
57+
maxPdisks: pdisks,
58+
offset,
59+
limit,
60+
});
61+
} else {
62+
response = await window.api.viewer.getNodes({
63+
type,
64+
storage,
65+
limit,
66+
offset,
67+
sort,
68+
filter: searchValue,
69+
uptime: getUptimeParamValue(nodesUptimeFilter),
70+
with: visibleEntities,
71+
database,
72+
node_id: nodeId,
73+
group_id: groupId,
74+
filter_group: filterGroup,
75+
filter_group_by: filterGroupBy,
76+
fieldsRequired: dataFieldsRequired,
77+
});
78+
}
6379
const preparedResponse = prepareStorageNodesResponse(response);
6480
return {
6581
data: preparedResponse.nodes || [],
Lines changed: 224 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,224 @@
1+
import {EFlag} from '../../../types/api/enums';
2+
import type {
3+
TEndpoint,
4+
TNodeInfo,
5+
TNodesInfo,
6+
TPoolStats,
7+
TSystemStateInfo,
8+
} from '../../../types/api/nodes';
9+
import {TPDiskState} from '../../../types/api/pdisk';
10+
import {EVDiskState} from '../../../types/api/vdisk';
11+
12+
// Different disk sizes to simulate variety (in bytes)
13+
const DISK_SIZES = [
14+
'68719476736', // 64 GB
15+
'137438953472', // 128 GB
16+
'274877906944', // 256 GB
17+
'549755813888', // 512 GB
18+
'1099511627776', // 1 TB
19+
];
20+
21+
const getRandomDiskSize = () => DISK_SIZES[Math.floor(Math.random() * DISK_SIZES.length)];
22+
23+
const generatePoolStats = (count = 5): TPoolStats[] => {
24+
const poolNames = ['System', 'User', 'Batch', 'IO', 'IC'] as const;
25+
return poolNames.slice(0, count).map((Name) => ({
26+
Name,
27+
Usage: Math.random() * 0.02,
28+
Threads: Math.floor(Math.random() * 3) + 1,
29+
}));
30+
};
31+
32+
const generateEndpoints = (): TEndpoint[] => [
33+
{Name: 'ic', Address: ':19001'},
34+
{Name: 'http-mon', Address: ':8765'},
35+
{Name: 'grpcs', Address: ':2135'},
36+
{Name: 'grpc', Address: ':2136'},
37+
];
38+
39+
const generateSystemState = (nodeId: number): TSystemStateInfo => ({
40+
StartTime: '1734358137851',
41+
ChangeTime: '1734358421375',
42+
LoadAverage: [3.381347656, 2.489257813, 1.279296875],
43+
NumberOfCpus: 8,
44+
SystemState: EFlag.Green,
45+
NodeId: nodeId,
46+
Host: `localhost-${nodeId}`,
47+
Version: 'main.95ce0df',
48+
PoolStats: generatePoolStats(),
49+
Endpoints: generateEndpoints(),
50+
Roles: ['Bootstrapper', 'StateStorage', 'StateStorageBoard', 'SchemeBoard', 'Storage'],
51+
MemoryLimit: '2147483648',
52+
MaxDiskUsage: 0.002349853516,
53+
Location: {
54+
DataCenter: '1',
55+
Rack: '1',
56+
Unit: '1',
57+
},
58+
TotalSessions: 0,
59+
CoresUsed: 0.07583969556,
60+
CoresTotal: 8,
61+
});
62+
63+
const generatePDisk = (nodeId: number, pdiskId: number, totalSize = '68719476736') => ({
64+
PDiskId: pdiskId,
65+
ChangeTime: '1734358142074',
66+
Path: `/ydb_data/pdisk${pdiskId}l3ki78no.data`,
67+
Guid: pdiskId.toString(),
68+
Category: '0',
69+
TotalSize: totalSize,
70+
AvailableSize: (Number(totalSize) * 0.9).toString(), // 90% available by default
71+
State: TPDiskState.Normal,
72+
NodeId: nodeId,
73+
Device: EFlag.Green,
74+
Realtime: EFlag.Green,
75+
SerialNumber: '',
76+
SystemSize: '213909504',
77+
LogUsedSize: '35651584',
78+
LogTotalSize: '68486692864',
79+
EnforcedDynamicSlotSize: '22817013760',
80+
});
81+
82+
const generateVDisk = (nodeId: number, vdiskId: number, pdiskId: number) => ({
83+
VDiskId: {
84+
GroupID: vdiskId,
85+
GroupGeneration: 1,
86+
Ring: 0,
87+
Domain: 0,
88+
VDisk: 0,
89+
},
90+
ChangeTime: '1734358420919',
91+
PDiskId: pdiskId,
92+
VDiskSlotId: vdiskId,
93+
Guid: '1',
94+
Kind: '0',
95+
NodeId: nodeId,
96+
VDiskState: EVDiskState.OK,
97+
DiskSpace: EFlag.Green,
98+
SatisfactionRank: {
99+
FreshRank: {
100+
Flag: EFlag.Green,
101+
},
102+
LevelRank: {
103+
Flag: EFlag.Green,
104+
},
105+
},
106+
Replicated: true,
107+
ReplicationProgress: 1,
108+
ReplicationSecondsRemaining: 0,
109+
AllocatedSize: '0',
110+
AvailableSize: '22817013760',
111+
HasUnreadableBlobs: false,
112+
IncarnationGuid: '11528832187803248876',
113+
InstanceGuid: '14836434871903384493',
114+
FrontQueues: EFlag.Green,
115+
StoragePoolName: 'static',
116+
ReadThroughput: '0',
117+
WriteThroughput: '420',
118+
});
119+
120+
interface NodeGeneratorOptions {
121+
maxVdisksPerPDisk?: number;
122+
maxPdisks?: number;
123+
}
124+
125+
const DEFAULT_OPTIONS: NodeGeneratorOptions = {
126+
maxVdisksPerPDisk: 3,
127+
maxPdisks: 4,
128+
};
129+
130+
const generateNode = (nodeId: number, options: NodeGeneratorOptions = {}): TNodeInfo => {
131+
const maxPdisks = options.maxPdisks ?? DEFAULT_OPTIONS.maxPdisks!;
132+
const maxVdisksPerPDisk = options.maxVdisksPerPDisk ?? DEFAULT_OPTIONS.maxVdisksPerPDisk!;
133+
134+
// Generate a random number of pdisks up to maxPdisks
135+
const pdisksCount = Math.floor(Math.random() * maxPdisks) + 1;
136+
137+
// For each pdisk, generate a random number of vdisks up to maxVdisksPerPDisk
138+
const pdiskVdisksCounts = Array.from({length: pdisksCount}, () =>
139+
Math.floor(Math.random() * maxVdisksPerPDisk),
140+
);
141+
const totalVdisks = pdiskVdisksCounts.reduce((sum: number, count: number) => sum + count, 0);
142+
143+
return {
144+
NodeId: nodeId,
145+
UptimeSeconds: 284,
146+
CpuUsage: 0.00947996,
147+
DiskSpaceUsage: 0.234985,
148+
SystemState: generateSystemState(nodeId),
149+
PDisks: Array.from({length: pdisksCount}, (_, i) =>
150+
generatePDisk(nodeId, i + 1, getRandomDiskSize()),
151+
),
152+
VDisks: Array.from({length: totalVdisks}, (_, i) => {
153+
// Find which pdisk this vdisk belongs to based on the distribution
154+
let pdiskIndex = 0;
155+
let vdiskCount = pdiskVdisksCounts[0];
156+
while (i >= vdiskCount && pdiskIndex < pdisksCount - 1) {
157+
pdiskIndex++;
158+
vdiskCount += pdiskVdisksCounts[pdiskIndex];
159+
}
160+
return generateVDisk(nodeId, i, pdiskIndex + 1);
161+
}),
162+
};
163+
};
164+
165+
interface GenerateNodesOptions extends NodeGeneratorOptions {
166+
offset?: number;
167+
limit?: number;
168+
}
169+
170+
// Keep a cache of generated nodes to maintain consistency between paginated requests
171+
let cachedNodes: TNodeInfo[] | null = null;
172+
let currentTotalNodes = 50; // Default number of nodes
173+
174+
export const generateNodes = (count?: number, options: GenerateNodesOptions = {}): TNodesInfo => {
175+
const totalNodes = count ?? currentTotalNodes;
176+
const {offset = 0, limit = totalNodes, maxVdisksPerPDisk, maxPdisks} = options;
177+
178+
// Reset cache if total nodes count changes
179+
if (totalNodes !== currentTotalNodes) {
180+
cachedNodes = null;
181+
currentTotalNodes = totalNodes;
182+
}
183+
184+
// Generate or use cached nodes
185+
if (!cachedNodes) {
186+
cachedNodes = Array.from({length: totalNodes}, (_, i) =>
187+
generateNode(i + 1, {maxVdisksPerPDisk, maxPdisks}),
188+
);
189+
}
190+
191+
// Calculate MaximumSlotsPerDisk and MaximumDisksPerNode across all nodes
192+
let maxSlotsPerDisk = 0;
193+
let maxDisksPerNode = 0;
194+
195+
cachedNodes.forEach((node) => {
196+
// Count pdisks per node
197+
if (node.PDisks) {
198+
maxDisksPerNode = Math.max(maxDisksPerNode, node.PDisks.length);
199+
}
200+
201+
// Count vdisks per pdisk
202+
if (node.VDisks) {
203+
const pdiskVdiskCounts = new Map<number, number>();
204+
node.VDisks.forEach((vdisk) => {
205+
if (typeof vdisk.PDiskId === 'number') {
206+
const count = (pdiskVdiskCounts.get(vdisk.PDiskId) || 0) + 1;
207+
pdiskVdiskCounts.set(vdisk.PDiskId, count);
208+
maxSlotsPerDisk = Math.max(maxSlotsPerDisk, count);
209+
}
210+
});
211+
}
212+
});
213+
214+
// Get the requested slice of nodes
215+
const paginatedNodes = cachedNodes.slice(offset, offset + limit);
216+
217+
return {
218+
TotalNodes: totalNodes.toString(),
219+
FoundNodes: totalNodes.toString(),
220+
Nodes: paginatedNodes,
221+
MaximumSlotsPerDisk: maxSlotsPerDisk.toString(),
222+
MaximumDisksPerNode: maxDisksPerNode.toString(),
223+
};
224+
};

0 commit comments

Comments
 (0)