Skip to content

Commit d1a9b3a

Browse files
author
David Roberts
authored
[ML] Add effective max model memory limit to ML info (#55529)
The ML info endpoint returns the max_model_memory_limit setting if one is configured. However, it is still possible to create a job that cannot run anywhere in the current cluster because no node in the cluster has enough memory to accommodate it. This change adds an extra piece of information, limits.effective_max_model_memory_limit, to the ML info response that returns the biggest model memory limit that could be run in the current cluster assuming no other jobs were running. The idea is that the ML UI will be able to warn users who try to create jobs with higher model memory limits that their jobs will not be able to start unless they add a bigger ML node to their cluster. Relates elastic/kibana#63942
1 parent 4e8235f commit d1a9b3a

File tree

4 files changed

+156
-3
lines changed

4 files changed

+156
-3
lines changed

docs/reference/ml/anomaly-detection/apis/get-ml-info.asciidoc

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,9 +113,12 @@ This is a possible response:
113113
"version": "7.0.0",
114114
"build_hash": "99a07c016d5a73"
115115
},
116-
"limits" : { }
116+
"limits" : {
117+
"effective_max_model_memory_limit": "28961mb"
118+
}
117119
}
118120
----
119121
// TESTRESPONSE[s/"upgrade_mode": false/"upgrade_mode": $body.upgrade_mode/]
120122
// TESTRESPONSE[s/"version": "7.0.0",/"version": "$body.native_code.version",/]
121123
// TESTRESPONSE[s/"build_hash": "99a07c016d5a73"/"build_hash": "$body.native_code.build_hash"/]
124+
// TESTRESPONSE[s/"effective_max_model_memory_limit": "28961mb"/"effective_max_model_memory_limit": "$body.limits.effective_max_model_memory_limit"/]

x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
import org.elasticsearch.action.ActionListener;
1111
import org.elasticsearch.action.support.ActionFilters;
1212
import org.elasticsearch.action.support.HandledTransportAction;
13+
import org.elasticsearch.cluster.node.DiscoveryNode;
14+
import org.elasticsearch.cluster.node.DiscoveryNodes;
1315
import org.elasticsearch.cluster.service.ClusterService;
1416
import org.elasticsearch.common.inject.Inject;
1517
import org.elasticsearch.common.unit.ByteSizeUnit;
@@ -21,9 +23,11 @@
2123
import org.elasticsearch.xpack.core.ml.MlMetadata;
2224
import org.elasticsearch.xpack.core.ml.action.MlInfoAction;
2325
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
26+
import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig;
2427
import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits;
2528
import org.elasticsearch.xpack.core.ml.job.config.CategorizationAnalyzerConfig;
2629
import org.elasticsearch.xpack.core.ml.job.config.Job;
30+
import org.elasticsearch.xpack.ml.MachineLearning;
2731
import org.elasticsearch.xpack.ml.process.MlControllerHolder;
2832

2933
import java.io.IOException;
@@ -106,11 +110,50 @@ private Map<String, Object> datafeedsDefaults() {
106110
return anomalyDetectorsDefaults;
107111
}
108112

113+
static ByteSizeValue calculateEffectiveMaxModelMemoryLimit(int maxMachineMemoryPercent, DiscoveryNodes nodes) {
114+
115+
long maxMlMemory = -1;
116+
117+
for (DiscoveryNode node : nodes) {
118+
119+
Map<String, String> nodeAttributes = node.getAttributes();
120+
String machineMemoryStr = nodeAttributes.get(MachineLearning.MACHINE_MEMORY_NODE_ATTR);
121+
if (machineMemoryStr == null) {
122+
continue;
123+
}
124+
long machineMemory;
125+
try {
126+
machineMemory = Long.parseLong(machineMemoryStr);
127+
} catch (NumberFormatException e) {
128+
continue;
129+
}
130+
maxMlMemory = Math.max(maxMlMemory, machineMemory * maxMachineMemoryPercent / 100);
131+
}
132+
133+
if (maxMlMemory <= 0) {
134+
// This implies there are currently no ML nodes in the cluster, so we
135+
// have no idea what the effective limit would be if one were added
136+
return null;
137+
}
138+
139+
maxMlMemory -= Math.max(Job.PROCESS_MEMORY_OVERHEAD.getBytes(), DataFrameAnalyticsConfig.PROCESS_MEMORY_OVERHEAD.getBytes());
140+
maxMlMemory -= MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes();
141+
return new ByteSizeValue(Math.max(0L, maxMlMemory) / 1024 / 1024, ByteSizeUnit.MB);
142+
}
143+
109144
private Map<String, Object> limits() {
110145
Map<String, Object> limits = new HashMap<>();
146+
ByteSizeValue effectiveMaxModelMemoryLimit = calculateEffectiveMaxModelMemoryLimit(
147+
clusterService.getClusterSettings().get(MachineLearning.MAX_MACHINE_MEMORY_PERCENT), clusterService.state().getNodes());
111148
ByteSizeValue maxModelMemoryLimit = clusterService.getClusterSettings().get(MachineLearningField.MAX_MODEL_MEMORY_LIMIT);
112149
if (maxModelMemoryLimit != null && maxModelMemoryLimit.getBytes() > 0) {
113-
limits.put("max_model_memory_limit", maxModelMemoryLimit);
150+
limits.put("max_model_memory_limit", maxModelMemoryLimit.getStringRep());
151+
if (effectiveMaxModelMemoryLimit == null || effectiveMaxModelMemoryLimit.compareTo(maxModelMemoryLimit) > 0) {
152+
effectiveMaxModelMemoryLimit = maxModelMemoryLimit;
153+
}
154+
}
155+
if (effectiveMaxModelMemoryLimit != null) {
156+
limits.put("effective_max_model_memory_limit", effectiveMaxModelMemoryLimit.getStringRep());
114157
}
115158
return limits;
116159
}
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the Elastic License;
4+
* you may not use this file except in compliance with the Elastic License.
5+
*/
6+
7+
package org.elasticsearch.xpack.ml.action;
8+
9+
import org.elasticsearch.Version;
10+
import org.elasticsearch.cluster.node.DiscoveryNode;
11+
import org.elasticsearch.cluster.node.DiscoveryNodes;
12+
import org.elasticsearch.common.transport.TransportAddress;
13+
import org.elasticsearch.common.unit.ByteSizeValue;
14+
import org.elasticsearch.test.ESTestCase;
15+
import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig;
16+
import org.elasticsearch.xpack.core.ml.job.config.Job;
17+
import org.elasticsearch.xpack.ml.MachineLearning;
18+
19+
import java.net.InetAddress;
20+
import java.util.Collections;
21+
22+
import static org.hamcrest.Matchers.lessThanOrEqualTo;
23+
import static org.hamcrest.Matchers.notNullValue;
24+
import static org.hamcrest.Matchers.nullValue;
25+
26+
public class TransportMlInfoActionTests extends ESTestCase {
27+
28+
public void testCalculateEffectiveMaxModelMemoryLimit() {
29+
30+
int mlMemoryPercent = randomIntBetween(5, 90);
31+
long highestMlMachineMemory = -1;
32+
33+
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
34+
for (int i = randomIntBetween(1, 10); i > 0; --i) {
35+
String nodeName = "_node_name" + i;
36+
String nodeId = "_node_id" + i;
37+
TransportAddress ta = new TransportAddress(InetAddress.getLoopbackAddress(), 9300 + i);
38+
if (randomBoolean()) {
39+
// Not an ML node
40+
builder.add(new DiscoveryNode(nodeName, nodeId, ta, Collections.emptyMap(), Collections.emptySet(), Version.CURRENT));
41+
} else {
42+
// ML node
43+
long machineMemory = randomLongBetween(2000000000L, 100000000000L);
44+
highestMlMachineMemory = Math.max(machineMemory, highestMlMachineMemory);
45+
builder.add(new DiscoveryNode(nodeName, nodeId, ta,
46+
Collections.singletonMap(MachineLearning.MACHINE_MEMORY_NODE_ATTR, String.valueOf(machineMemory)),
47+
Collections.emptySet(), Version.CURRENT));
48+
}
49+
}
50+
DiscoveryNodes nodes = builder.build();
51+
52+
ByteSizeValue effectiveMaxModelMemoryLimit =
53+
TransportMlInfoAction.calculateEffectiveMaxModelMemoryLimit(mlMemoryPercent, nodes);
54+
55+
if (highestMlMachineMemory < 0) {
56+
assertThat(effectiveMaxModelMemoryLimit, nullValue());
57+
} else {
58+
assertThat(effectiveMaxModelMemoryLimit, notNullValue());
59+
assertThat(effectiveMaxModelMemoryLimit.getBytes()
60+
+ Math.max(Job.PROCESS_MEMORY_OVERHEAD.getBytes(), DataFrameAnalyticsConfig.PROCESS_MEMORY_OVERHEAD.getBytes())
61+
+ MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(),
62+
lessThanOrEqualTo(highestMlMachineMemory * mlMemoryPercent / 100));
63+
}
64+
}
65+
}

x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_info.yml

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,9 @@ teardown:
1515
- match: { defaults.anomaly_detectors.categorization_examples_limit: 4 }
1616
- match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 }
1717
- match: { defaults.datafeeds.scroll_size: 1000 }
18-
- match: { limits: {} }
18+
- is_false: limits.max_model_memory_limit
19+
# We cannot assert an exact value for the next one as it will vary depending on the test machine
20+
- match: { limits.effective_max_model_memory_limit: "/\\d+[kmg]?b/" }
1921
- match: { upgrade_mode: false }
2022

2123
- do:
@@ -32,6 +34,8 @@ teardown:
3234
- match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 }
3335
- match: { defaults.datafeeds.scroll_size: 1000 }
3436
- match: { limits.max_model_memory_limit: "512mb" }
37+
# We cannot assert an exact value for the next one as it will vary depending on the test machine
38+
- match: { limits.effective_max_model_memory_limit: "/\\d+[kmg]?b/" }
3539
- match: { upgrade_mode: false }
3640

3741
- do:
@@ -48,4 +52,42 @@ teardown:
4852
- match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 }
4953
- match: { defaults.datafeeds.scroll_size: 1000 }
5054
- match: { limits.max_model_memory_limit: "6gb" }
55+
# We cannot assert an exact value for the next one as it will vary depending on the test machine
56+
- match: { limits.effective_max_model_memory_limit: "/\\d+[kmg]?b/" }
57+
- match: { upgrade_mode: false }
58+
59+
- do:
60+
cluster.put_settings:
61+
body:
62+
persistent:
63+
xpack.ml.max_model_memory_limit: "6gb"
64+
65+
- do:
66+
ml.info: {}
67+
- match: { defaults.anomaly_detectors.categorization_analyzer.tokenizer: "ml_classic" }
68+
- match: { defaults.anomaly_detectors.model_memory_limit: "1gb" }
69+
- match: { defaults.anomaly_detectors.categorization_examples_limit: 4 }
70+
- match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 }
71+
- match: { defaults.datafeeds.scroll_size: 1000 }
72+
- match: { limits.max_model_memory_limit: "6gb" }
73+
# We cannot assert an exact value for the next one as it will vary depending on the test machine
74+
- match: { limits.effective_max_model_memory_limit: "/\\d+[kmg]?b/" }
75+
- match: { upgrade_mode: false }
76+
77+
- do:
78+
cluster.put_settings:
79+
body:
80+
persistent:
81+
xpack.ml.max_model_memory_limit: "1mb"
82+
83+
- do:
84+
ml.info: {}
85+
- match: { defaults.anomaly_detectors.categorization_analyzer.tokenizer: "ml_classic" }
86+
- match: { defaults.anomaly_detectors.model_memory_limit: "1mb" }
87+
- match: { defaults.anomaly_detectors.categorization_examples_limit: 4 }
88+
- match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 }
89+
- match: { defaults.datafeeds.scroll_size: 1000 }
90+
- match: { limits.max_model_memory_limit: "1mb" }
91+
# This time we can assert an exact value for the next one because the hard limit is so low
92+
- match: { limits.effective_max_model_memory_limit: "1mb" }
5193
- match: { upgrade_mode: false }

0 commit comments

Comments
 (0)