Skip to content

Commit 7653ebd

Browse files
committed
Revert "HDDS-1965. Compile error due to leftover ScmBlockLocationTestIngClient file (#1293)"
This reverts commit 83e452e.
1 parent 3a145e2 commit 7653ebd

File tree

1 file changed

+195
-0
lines changed

1 file changed

+195
-0
lines changed
Lines changed: 195 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,195 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*
18+
*/
19+
20+
package org.apache.hadoop.ozone.om;
21+
22+
import org.apache.commons.lang3.StringUtils;
23+
import org.apache.hadoop.hdds.client.BlockID;
24+
import org.apache.hadoop.hdds.client.ContainerBlockID;
25+
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
26+
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
27+
import org.apache.hadoop.hdds.scm.ScmInfo;
28+
import org.apache.hadoop.hdds.scm.TestUtils;
29+
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
30+
import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
31+
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
32+
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
33+
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
34+
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
35+
import org.apache.hadoop.ozone.common.BlockGroup;
36+
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
37+
import org.apache.hadoop.util.Time;
38+
import org.slf4j.Logger;
39+
import org.slf4j.LoggerFactory;
40+
41+
import java.io.IOException;
42+
import java.util.ArrayList;
43+
import java.util.Collections;
44+
import java.util.List;
45+
import java.util.UUID;
46+
47+
import static org.apache.hadoop.hdds.protocol.proto
48+
.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result;
49+
import static org.apache.hadoop.hdds.protocol.proto
50+
.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.success;
51+
import static org.apache.hadoop.hdds.protocol.proto
52+
.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.unknownFailure;
53+
54+
/**
55+
* This is a testing client that allows us to intercept calls from OzoneManager
56+
* to SCM.
57+
* <p>
58+
* TODO: OzoneManager#getScmBlockClient -- so that we can load this class up via
59+
* config setting into OzoneManager. Right now, we just pass this to
60+
* KeyDeletingService only.
61+
* <p>
62+
* TODO: Move this class to a generic test utils so we can use this class in
63+
* other Ozone Manager tests.
64+
*/
65+
public class ScmBlockLocationTestingClient implements ScmBlockLocationProtocol {
66+
private static final Logger LOG =
67+
LoggerFactory.getLogger(ScmBlockLocationTestingClient.class);
68+
private final String clusterID;
69+
private final String scmId;
70+
71+
// 0 means no calls will fail, +1 means all calls will fail, +2 means every
72+
// second call will fail, +3 means every third and so on.
73+
private final int failCallsFrequency;
74+
private int currentCall = 0;
75+
76+
/**
77+
* If ClusterID or SCMID is blank a per instance ID is generated.
78+
*
79+
* @param clusterID - String or blank.
80+
* @param scmId - String or Blank.
81+
* @param failCallsFrequency - Set to 0 for no failures, 1 for always to fail,
82+
* a positive number for that frequency of failure.
83+
*/
84+
public ScmBlockLocationTestingClient(String clusterID, String scmId,
85+
int failCallsFrequency) {
86+
this.clusterID = StringUtils.isNotBlank(clusterID) ? clusterID :
87+
UUID.randomUUID().toString();
88+
this.scmId = StringUtils.isNotBlank(scmId) ? scmId :
89+
UUID.randomUUID().toString();
90+
this.failCallsFrequency = Math.abs(failCallsFrequency);
91+
switch (this.failCallsFrequency) {
92+
case 0:
93+
LOG.debug("Set to no failure mode, all delete block calls will " +
94+
"succeed.");
95+
break;
96+
case 1:
97+
LOG.debug("Set to all failure mode. All delete block calls to SCM" +
98+
" will fail.");
99+
break;
100+
default:
101+
LOG.debug("Set to Mix mode, every {} -th call will fail",
102+
this.failCallsFrequency);
103+
}
104+
105+
}
106+
107+
/**
108+
* Returns Fake blocks to the BlockManager so we get blocks in the Database.
109+
* @param size - size of the block.
110+
* @param type Replication Type
111+
* @param factor - Replication factor
112+
* @param owner - String owner.
113+
* @param excludeList list of dns/pipelines to exclude
114+
* @return
115+
* @throws IOException
116+
*/
117+
@Override
118+
public List<AllocatedBlock> allocateBlock(long size, int num,
119+
HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
120+
String owner, ExcludeList excludeList) throws IOException {
121+
DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
122+
Pipeline pipeline = createPipeline(datanodeDetails);
123+
long containerID = Time.monotonicNow();
124+
long localID = Time.monotonicNow();
125+
AllocatedBlock.Builder abb =
126+
new AllocatedBlock.Builder()
127+
.setContainerBlockID(new ContainerBlockID(containerID, localID))
128+
.setPipeline(pipeline);
129+
return Collections.singletonList(abb.build());
130+
}
131+
132+
private Pipeline createPipeline(DatanodeDetails datanode) {
133+
List<DatanodeDetails> dns = new ArrayList<>();
134+
dns.add(datanode);
135+
Pipeline pipeline = Pipeline.newBuilder()
136+
.setState(Pipeline.PipelineState.OPEN)
137+
.setId(PipelineID.randomId())
138+
.setType(HddsProtos.ReplicationType.STAND_ALONE)
139+
.setFactor(HddsProtos.ReplicationFactor.ONE)
140+
.setNodes(dns)
141+
.build();
142+
return pipeline;
143+
}
144+
145+
@Override
146+
public List<DeleteBlockGroupResult> deleteKeyBlocks(
147+
List<BlockGroup> keyBlocksInfoList) throws IOException {
148+
List<DeleteBlockGroupResult> results = new ArrayList<>();
149+
List<DeleteBlockResult> blockResultList = new ArrayList<>();
150+
Result result;
151+
for (BlockGroup keyBlocks : keyBlocksInfoList) {
152+
for (BlockID blockKey : keyBlocks.getBlockIDList()) {
153+
currentCall++;
154+
switch (this.failCallsFrequency) {
155+
case 0:
156+
result = success;
157+
break;
158+
case 1:
159+
result = unknownFailure;
160+
break;
161+
default:
162+
if (currentCall % this.failCallsFrequency == 0) {
163+
result = unknownFailure;
164+
} else {
165+
result = success;
166+
}
167+
}
168+
blockResultList.add(new DeleteBlockResult(blockKey, result));
169+
}
170+
results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(),
171+
blockResultList));
172+
}
173+
return results;
174+
}
175+
176+
@Override
177+
public ScmInfo getScmInfo() throws IOException {
178+
ScmInfo.Builder builder =
179+
new ScmInfo.Builder()
180+
.setClusterId(clusterID)
181+
.setScmId(scmId);
182+
return builder.build();
183+
}
184+
185+
@Override
186+
public List<DatanodeDetails> sortDatanodes(List<String> nodes,
187+
String clientMachine) throws IOException {
188+
return null;
189+
}
190+
191+
@Override
192+
public void close() throws IOException {
193+
194+
}
195+
}

0 commit comments

Comments
 (0)