Skip to content

Commit c791b0e

Browse files
Prabhu JosephPrabhu Joseph
authored andcommitted
YARN-10219. Fix YARN Native Service Placement Constraints with Node Attributes.
Contributed by Eric Yang.
1 parent 3edbe87 commit c791b0e

File tree

5 files changed

+62
-58
lines changed

5 files changed

+62
-58
lines changed

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java

Lines changed: 45 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
import static org.apache.hadoop.yarn.service.api.records.Component
2727
.RestartPolicyEnum;
2828
import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
29+
import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
2930
import org.apache.hadoop.yarn.api.records.Priority;
3031
import org.apache.hadoop.yarn.api.records.Resource;
3132
import org.apache.hadoop.yarn.api.records.ResourceSizing;
@@ -811,16 +812,12 @@ public void requestContainers(long count) {
811812
PlacementConstraint constraint = null;
812813
switch (yarnServiceConstraint.getType()) {
813814
case AFFINITY:
814-
constraint = PlacementConstraints
815-
.targetIn(yarnServiceConstraint.getScope().getValue(),
816-
targetExpressions.toArray(new TargetExpression[0]))
817-
.build();
815+
constraint = getAffinityConstraint(yarnServiceConstraint,
816+
targetExpressions);
818817
break;
819818
case ANTI_AFFINITY:
820-
constraint = PlacementConstraints
821-
.targetNotIn(yarnServiceConstraint.getScope().getValue(),
822-
targetExpressions.toArray(new TargetExpression[0]))
823-
.build();
819+
constraint = getAntiAffinityConstraint(yarnServiceConstraint,
820+
targetExpressions);
824821
break;
825822
case AFFINITY_WITH_CARDINALITY:
826823
constraint = PlacementConstraints.targetCardinality(
@@ -865,6 +862,46 @@ public void requestContainers(long count) {
865862
}
866863
}
867864

865+
private PlacementConstraint getAffinityConstraint(
866+
org.apache.hadoop.yarn.service.api.records.PlacementConstraint
867+
yarnServiceConstraint, List<TargetExpression> targetExpressions) {
868+
PlacementConstraint constraint = null;
869+
if (!yarnServiceConstraint.getTargetTags().isEmpty() ||
870+
!yarnServiceConstraint.getNodePartitions().isEmpty()) {
871+
constraint = PlacementConstraints
872+
.targetIn(yarnServiceConstraint.getScope().getValue(),
873+
targetExpressions.toArray(new TargetExpression[0]))
874+
.build();
875+
}
876+
if (!yarnServiceConstraint.getNodeAttributes().isEmpty()) {
877+
constraint = PlacementConstraints
878+
.targetNodeAttribute(yarnServiceConstraint.getScope().getValue(),
879+
NodeAttributeOpCode.EQ, targetExpressions.toArray(
880+
new TargetExpression[0])).build();
881+
}
882+
return constraint;
883+
}
884+
885+
private PlacementConstraint getAntiAffinityConstraint(
886+
org.apache.hadoop.yarn.service.api.records.PlacementConstraint
887+
yarnServiceConstraint, List<TargetExpression> targetExpressions) {
888+
PlacementConstraint constraint = null;
889+
if (!yarnServiceConstraint.getTargetTags().isEmpty() ||
890+
!yarnServiceConstraint.getNodePartitions().isEmpty()) {
891+
constraint = PlacementConstraints
892+
.targetNotIn(yarnServiceConstraint.getScope().getValue(),
893+
targetExpressions.toArray(new TargetExpression[0]))
894+
.build();
895+
}
896+
if (!yarnServiceConstraint.getNodeAttributes().isEmpty()) {
897+
constraint = PlacementConstraints
898+
.targetNodeAttribute(yarnServiceConstraint.getScope().getValue(),
899+
NodeAttributeOpCode.NE, targetExpressions.toArray(
900+
new TargetExpression[0])).build();
901+
}
902+
return constraint;
903+
}
904+
868905
private void setDesiredContainers(int n) {
869906
int delta = n - scheduler.getServiceMetrics().containersDesired.value();
870907
if (delta != 0) {

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -354,19 +354,6 @@ private static void validatePlacementPolicy(List<Component> components,
354354
constraint.getName() == null ? "" : constraint.getName() + " ",
355355
comp.getName()));
356356
}
357-
if (constraint.getTargetTags().isEmpty()) {
358-
throw new IllegalArgumentException(String.format(
359-
RestApiErrorMessages.ERROR_PLACEMENT_POLICY_CONSTRAINT_TAGS_NULL,
360-
constraint.getName() == null ? "" : constraint.getName() + " ",
361-
comp.getName()));
362-
}
363-
for (String targetTag : constraint.getTargetTags()) {
364-
if (!comp.getName().equals(targetTag)) {
365-
throw new IllegalArgumentException(String.format(
366-
RestApiErrorMessages.ERROR_PLACEMENT_POLICY_TAG_NAME_NOT_SAME,
367-
targetTag, comp.getName(), comp.getName(), comp.getName()));
368-
}
369-
}
370357
}
371358
}
372359
}

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -734,6 +734,7 @@ public void testComponentHealthThresholdMonitor() throws Exception {
734734
YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
735735
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,
736736
YarnConfiguration.DEFAULT_RM_MAX_COMPLETED_APPLICATIONS);
737+
conf.setInt(YarnConfiguration.NM_VCORES, 1);
737738
setConf(conf);
738739
setupInternal(3);
739740
ServiceClient client = createClient(getConf());

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java

Lines changed: 3 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -554,33 +554,11 @@ public void testPlacementPolicy() throws IOException {
554554
// Set the scope
555555
pc.setScope(PlacementScope.NODE);
556556

557-
try {
558-
ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
559-
Assert.fail(EXCEPTION_PREFIX + "constraint with no tag(s)");
560-
} catch (IllegalArgumentException e) {
561-
assertEquals(String.format(
562-
RestApiErrorMessages.ERROR_PLACEMENT_POLICY_CONSTRAINT_TAGS_NULL,
563-
"CA1 ", "comp-a"), e.getMessage());
564-
}
565-
566-
// Set a target tag - but an invalid one
567-
pc.setTargetTags(Collections.singletonList("comp-invalid"));
568-
569-
try {
570-
ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
571-
Assert.fail(EXCEPTION_PREFIX + "constraint with invalid tag name");
572-
} catch (IllegalArgumentException e) {
573-
assertEquals(
574-
String.format(
575-
RestApiErrorMessages.ERROR_PLACEMENT_POLICY_TAG_NAME_NOT_SAME,
576-
"comp-invalid", "comp-a", "comp-a", "comp-a"),
577-
e.getMessage());
578-
}
579-
580-
// Set valid target tags now
557+
// Target tag is optional.
581558
pc.setTargetTags(Collections.singletonList("comp-a"));
582559

583-
// Finally it should succeed
560+
// Validation can succeed for any arbitrary target, only scheduler knows
561+
// if the target tag is valid.
584562
try {
585563
ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
586564
} catch (IllegalArgumentException e) {

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -778,9 +778,6 @@ POST URL - http://localhost:8088/app/v1/services
778778
"node_partitions": [
779779
"gpu",
780780
"fast-disk"
781-
],
782-
"target_tags": [
783-
"hello"
784781
]
785782
}
786783
]
@@ -797,11 +794,12 @@ GET URL - http://localhost:8088/app/v1/services/hello-world
797794

798795
Note, for an anti-affinity component no more than 1 container will be allocated
799796
in a specific node. In this example, 3 containers have been requested by
800-
component "hello". All 3 containers were allocated because the cluster had 3 or
801-
more NMs. If the cluster had less than 3 NMs then less than 3 containers would
802-
be allocated. In cases when the number of allocated containers are less than the
803-
number of requested containers, the component and the service will be in
804-
non-STABLE state.
797+
component "hello". All 3 containers were allocated on separated centos7 nodes
798+
because the node attributes expects to run on centos7 nodes.
799+
If the cluster had less than 3 NMs then less than
800+
3 containers would be allocated. In cases when the number of allocated containers
801+
are less than the number of requested containers, the component and the service
802+
will be in non-STABLE state.
805803

806804
```json
807805
{
@@ -822,16 +820,19 @@ non-STABLE state.
822820
"placement_policy": {
823821
"constraints": [
824822
{
825-
"type": "ANTI_AFFINITY",
823+
"type": "AFFINITY",
826824
"scope": "NODE",
827825
"node_attributes": {
828-
"os": ["centos6", "centos7"],
829-
"fault_domain": ["fd1", "fd2"]
826+
"os": ["centos7"]
830827
},
831828
"node_partitions": [
832829
"gpu",
833830
"fast-disk"
834-
],
831+
]
832+
},
833+
{
834+
"type": "ANTI_AFFINITY",
835+
"scope": "NODE",
835836
"target_tags": [
836837
"hello"
837838
]

0 commit comments

Comments
 (0)