Skip to content

Commit

Permalink
HDDS-11544. Improve work with arrays.
Browse files Browse the repository at this point in the history
Some other meaningful refactoring.
  • Loading branch information
ivanzlenko committed Oct 8, 2024
1 parent b3afaec commit 5c24a20
Show file tree
Hide file tree
Showing 16 changed files with 190 additions and 213 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,12 @@
*/
package org.apache.hadoop.hdds.security.x509.certificate.utils;

import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import java.net.InetAddress;
import java.security.KeyPair;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;

import com.google.common.base.Preconditions;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.validator.routines.DomainValidator;
import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
import org.apache.hadoop.hdds.security.x509.exception.CertificateException;

import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.OzoneSecurityUtil;
import org.bouncycastle.asn1.ASN1EncodableVector;
import org.bouncycastle.asn1.ASN1Object;
Expand Down Expand Up @@ -65,6 +55,15 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import java.net.InetAddress;
import java.security.KeyPair;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;

import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.INVALID_CSR;
import static org.apache.hadoop.hdds.security.x509.exception.CertificateException.ErrorCode.CSR_ERROR;

Expand Down Expand Up @@ -390,7 +389,7 @@ private Optional<Extension> getSubjectAltNameExtension() throws
if (altNames != null) {
return Optional.of(new Extension(Extension.subjectAlternativeName,
false, new DEROctetString(new GeneralNames(
altNames.toArray(new GeneralName[altNames.size()])))));
altNames.toArray(new GeneralName[0])))));
}
return Optional.empty();
}
Expand All @@ -414,12 +413,10 @@ private Extensions createExtensions() throws IOException {

// Add subject alternate name extension
Optional<Extension> san = getSubjectAltNameExtension();
if (san.isPresent()) {
extensions.add(san.get());
}
san.ifPresent(extensions::add);

return new Extensions(
extensions.toArray(new Extension[extensions.size()]));
extensions.toArray(new Extension[0]));
}

public CertificateSignRequest build() throws SCMSecurityException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import org.apache.ratis.util.MemoizedSupplier;

import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Supplier;

Expand All @@ -50,7 +51,7 @@ public static HddsPolicyProvider getInstance() {
}

private static final List<Service> DN_SERVICES =
Arrays.asList(
Collections.singletonList(
new Service(
OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL,
ReconfigureProtocol.class)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,31 +18,8 @@

package org.apache.hadoop.ozone.container.common.transport.server.ratis;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.stream.Collectors;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
Expand All @@ -69,21 +46,18 @@
import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel;
import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
import org.apache.hadoop.util.Time;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.ratis.proto.RaftProtos;
import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto;
import org.apache.ratis.proto.RaftProtos.LogEntryProto;
import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto;
import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto;
import org.apache.ratis.protocol.Message;
import org.apache.ratis.protocol.RaftClientRequest;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.RaftGroupMemberId;
import org.apache.ratis.protocol.RaftPeerId;
import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.protocol.RaftPeerId;
import org.apache.ratis.protocol.exceptions.StateMachineException;
import org.apache.ratis.server.RaftServer;
import org.apache.ratis.server.protocol.TermIndex;
Expand All @@ -98,13 +72,37 @@
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat;
import org.apache.ratis.util.JavaUtils;
import org.apache.ratis.util.LifeCycle;
import org.apache.ratis.util.TaskQueue;
import org.apache.ratis.util.function.CheckedSupplier;
import org.apache.ratis.util.JavaUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;

/**
* A {@link StateMachine} for containers,
* which is responsible for handling different types of container requests.
Expand Down Expand Up @@ -818,11 +816,9 @@ private ByteString readStateMachineData(
*/
@Override
public CompletableFuture<Void> flush(long index) {
List<CompletableFuture<ContainerCommandResponseProto>> futureList =
writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index)
.map(Map.Entry::getValue).collect(Collectors.toList());
return CompletableFuture.allOf(
futureList.toArray(new CompletableFuture[futureList.size()]));
writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index)
.map(Map.Entry::getValue).toArray(CompletableFuture[]::new));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@
package org.apache.hadoop.ozone.container.ec.reconstruction;

import com.google.common.collect.ImmutableList;
import jakarta.annotation.Nonnull;
import org.apache.commons.collections.map.SingletonMap;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.client.ClientTrustManager;
Expand All @@ -34,8 +36,6 @@
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
import jakarta.annotation.Nonnull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -44,7 +44,6 @@
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;

/**
* This class wraps necessary container-level rpc calls
Expand Down Expand Up @@ -93,14 +92,11 @@ public BlockData[] listBlock(long containerId, DatanodeDetails dn,
try {
return BlockData.getFromProtoBuf(i);
} catch (IOException e) {
LOG.debug("Failed while converting to protobuf BlockData. Returning"
+ " null for listBlock from DN: " + dn,
e);
LOG.debug("Failed while converting to protobuf BlockData. Returning null for listBlock from DN: {}", dn, e);
// TODO: revisit here.
return null;
}
}).collect(Collectors.toList())
.toArray(new BlockData[blockDataList.size()]);
}).toArray(BlockData[]::new);
} finally {
this.xceiverClientManager.releaseClient(xceiverClient, false);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@

import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory;
import org.apache.ozone.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
import org.apache.ozone.erasurecode.rawcoder.NativeXORRawErasureCoderFactory;
import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -31,7 +31,6 @@
import java.util.Map;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.stream.Collectors;

/**
* This class registers all coder implementations.
Expand Down Expand Up @@ -108,8 +107,8 @@ void updateCoders(Iterable<RawErasureCoderFactory> coderFactories) {
String codecName = entry.getKey();
List<RawErasureCoderFactory> coders = entry.getValue();
coderNameMap.put(codecName, coders.stream().
map(RawErasureCoderFactory::getCoderName).
collect(Collectors.toList()).toArray(new String[0]));
map(RawErasureCoderFactory::getCoderName)
.toArray(String[]::new));
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,20 @@
*/
package org.apache.hadoop.hdds.utils;

import org.apache.commons.fileupload.FileItemIterator;
import org.apache.commons.fileupload.FileItemStream;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.apache.commons.fileupload.util.Streams;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.server.OzoneAdmins;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
Expand All @@ -36,27 +50,11 @@
import java.util.Objects;
import java.util.stream.Collectors;

import org.apache.commons.fileupload.FileItemIterator;
import org.apache.commons.fileupload.FileItemStream;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.apache.commons.fileupload.util.Streams;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.server.OzoneAdmins;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.DBStore;

import org.apache.commons.lang3.StringUtils;

import static org.apache.hadoop.hdds.utils.HddsServerUtil.writeDBCheckpointToStream;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST;
import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX;

import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Provides the current checkpoint Snapshot of the OM/SCM DB. (tar)
*/
Expand Down Expand Up @@ -287,7 +285,7 @@ private static String[] parseFormDataParameters(HttpServletRequest request) {
LOG.warn("Exception occured during form data parsing {}", e.getMessage());
}

return sstParam.size() == 0 ? null : sstParam.toArray(new String[0]);
return sstParam.isEmpty() ? null : sstParam.toArray(new String[0]);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -691,7 +691,7 @@ private void checkIterationMoveResults() {
moveSelectionToFutureMap.values();
if (!futures.isEmpty()) {
CompletableFuture<Void> allFuturesResult = CompletableFuture.allOf(
futures.toArray(new CompletableFuture[futures.size()]));
futures.toArray(new CompletableFuture[0]));
try {
allFuturesResult.get(config.getMoveTimeout().toMillis(),
TimeUnit.MILLISECONDS);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -346,8 +346,7 @@ protected List<DatanodeDetails> chooseDatanodesInternalLegacy(
return chooseNodes(null, chosenNodes, mutableFavoredNodes,
mutableUsedNodes, favorIndex, nodesRequired, mapSizeRequired);
} else {
List<DatanodeDetails> mutableExcludedNodes = new ArrayList<>();
mutableExcludedNodes.addAll(excludedNodes);
List<DatanodeDetails> mutableExcludedNodes = new ArrayList<>(excludedNodes);
// choose node to meet replication requirement
// case 1: one excluded node, choose one on the same rack as the excluded
// node, choose others on different racks.
Expand Down
Loading

0 comments on commit 5c24a20

Please sign in to comment.