diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 0a38e660489..52f435dc826 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -166,8 +166,8 @@ private synchronized void connectToDatanode(DatanodeDetails dn) // port. int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); if (port == 0) { - port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + port = config.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } // Add credential context to the client call diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index aff0aa966a7..58a2153352a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -83,8 +83,8 @@ public static XceiverClientRatis newXceiverClientRatis( org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, ConfigurationSource ozoneConf, ClientTrustManager trustManager) { final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + .get(ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new SecurityConfig(ozoneConf), trustManager); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index ed897f898c0..e324a63d3ba 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -326,65 +326,65 @@ private static void addDeprecatedKeys() { new DeprecationDelta("hdds.datanode.replication.work.dir", OZONE_CONTAINER_COPY_WORKDIR), new DeprecationDelta("dfs.container.chunk.write.sync", - OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY), + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY), new DeprecationDelta("dfs.container.ipc", - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT), + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT), new DeprecationDelta("dfs.container.ipc.random.port", - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT), + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT), new DeprecationDelta("dfs.container.ratis.admin.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT), new DeprecationDelta("dfs.container.ratis.datanode.storage.dir", - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR), new DeprecationDelta("dfs.container.ratis.datastream.enabled", - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED), new DeprecationDelta("dfs.container.ratis.datastream.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT), new DeprecationDelta("dfs.container.ratis.datastream.random.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT), new DeprecationDelta("dfs.container.ratis.enabled", - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY), new DeprecationDelta("dfs.container.ratis.ipc", - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT), new DeprecationDelta("dfs.container.ratis.ipc.random.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT), new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit", - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT), new DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT), new DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS), new DeprecationDelta("dfs.container.ratis.log.purge.gap", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP), new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT), new DeprecationDelta("dfs.container.ratis.log.queue.num-elements", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS), new DeprecationDelta("dfs.container.ratis.num.container.op.executors", - ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY), new DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume", - ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME), + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME), new DeprecationDelta("dfs.container.ratis.replication.level", - ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY), new DeprecationDelta("dfs.container.ratis.rpc.type", - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY), new DeprecationDelta("dfs.container.ratis.segment.preallocated.size", - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY), new DeprecationDelta("dfs.container.ratis.segment.size", - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY), new DeprecationDelta("dfs.container.ratis.server.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT), new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries", - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES), + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES), new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout", - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT), + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT), new DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions", - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS), + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS), new DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration", - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY), + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY), new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration", - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY), + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY), new DeprecationDelta("dfs.ratis.snapshot.threshold", - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY) + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY) }); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index cb7f6f8a3b3..bcea4d0193b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -234,8 +234,8 @@ public static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, private static RpcType getRpcType(ConfigurationSource conf) { return SupportedRpcType.valueOfIgnoreCase(conf.get( - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); } public static BiFunction newRaftClient( diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index e093a45af03..d8fdbc1063a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -41,95 +41,95 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_DB_DIRS_PERMISSIONS = "ozone.scm.db.dirs.permissions"; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY = "hdds.container.ratis.enabled"; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY = "hdds.container.ratis.rpc.type"; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME = "hdds.container.ratis.num.write.chunk.threads.per.volume"; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY = "hdds.container.ratis.replication.level"; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY = "hdds.container.ratis.num.container.op.executors"; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = "hdds.container.ratis.segment.size"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = "64MB"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = "hdds.container.ratis.segment.preallocated.size"; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = "hdds.container.ratis.statemachinedata.sync.timeout"; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = TimeDuration.valueOf(10, TimeUnit.SECONDS); public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = "hdds.container.ratis.statemachinedata.sync.retries"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = "hdds.container.ratis.statemachine.max.pending.apply-transactions"; // The default value of maximum number of pending state machine apply // transactions is kept same as default snapshot threshold. public static final int - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = 100000; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = "hdds.container.ratis.log.queue.num-elements"; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = 1024; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = "hdds.container.ratis.log.queue.byte-limit"; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = "4GB"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = "hdds.container.ratis.log.appender.queue.num-elements"; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = "hdds.container.ratis.log.appender.queue.byte-limit"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = "hdds.container.ratis.log.purge.gap"; // TODO: Set to 1024 once RATIS issue around purge is fixed. - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = "hdds.container.ratis.leader.pending.bytes.limit"; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = "hdds.ratis.server.retry-cache.timeout.duration"; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = "hdds.ratis.leader.election.minimum.timeout.duration"; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(5, TimeUnit.SECONDS); - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = "hdds.ratis.snapshot.threshold"; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 30071100093..0080686575b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -36,9 +36,9 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public final class OzoneConfigKeys { - public static final String DFS_CONTAINER_IPC_PORT = + public static final String HDDS_CONTAINER_IPC_PORT = "hdds.container.ipc.port"; - public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; + public static final int HDDS_CONTAINER_IPC_PORT_DEFAULT = 9859; public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; @@ -56,52 +56,52 @@ public final class OzoneConfigKeys { * so that a mini cluster is able to launch multiple containers on a node. * * When set to false (default), the container port will be specified as - * {@link #DFS_CONTAINER_IPC_PORT} and the default value will be specified - * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}. + * {@link #HDDS_CONTAINER_IPC_PORT} and the default value will be specified + * as {@link #HDDS_CONTAINER_IPC_PORT_DEFAULT}. */ - public static final String DFS_CONTAINER_IPC_RANDOM_PORT = + public static final String HDDS_CONTAINER_IPC_RANDOM_PORT = "hdds.container.ipc.random.port"; - public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = + public static final boolean HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = "hdds.container.ratis.datastream.random.port"; public static final boolean - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY = + public static final String HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY = "hdds.container.chunk.write.sync"; - public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; + public static final boolean HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; /** * Ratis Port where containers listen to. */ - public static final String DFS_CONTAINER_RATIS_IPC_PORT = + public static final String HDDS_CONTAINER_RATIS_IPC_PORT = "hdds.container.ratis.ipc.port"; - public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; + public static final int HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; /** * Ratis Port where containers listen to admin requests. */ - public static final String DFS_CONTAINER_RATIS_ADMIN_PORT = + public static final String HDDS_CONTAINER_RATIS_ADMIN_PORT = "hdds.container.ratis.admin.port"; - public static final int DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; + public static final int HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; /** * Ratis Port where containers listen to server-to-server requests. */ - public static final String DFS_CONTAINER_RATIS_SERVER_PORT = + public static final String HDDS_CONTAINER_RATIS_SERVER_PORT = "hdds.container.ratis.server.port"; - public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; + public static final int HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; /** * Ratis Port where containers listen to datastream requests. */ - public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLED + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED = "hdds.container.ratis.datastream.enabled"; - public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT + public static final boolean HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_PORT = "hdds.container.ratis.datastream.port"; - public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT + public static final int HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT = 9855; /** @@ -133,9 +133,9 @@ public final class OzoneConfigKeys { * When set to true, allocate a random free port for ozone container, so that * a mini cluster is able to launch multiple containers on a node. */ - public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = + public static final String HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT = "hdds.container.ratis.ipc.random.port"; - public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = + public static final boolean HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = @@ -325,97 +325,97 @@ public final class OzoneConfigKeys { public static final int OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY; + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = ScmConfigKeys. - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; // config settings to enable stateMachineData write timeout public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = + public static final String HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = "hdds.container.ratis.datanode.storage.dir"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP; - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP; + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; public static final String HDDS_DATANODE_PLUGINS_KEY = "hdds.datanode.plugins"; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index 009e6396e0d..346b05ebb4c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -99,11 +99,11 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, this.id = datanodeDetails.getUuid(); this.datanodeDetails = datanodeDetails; - this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + this.port = conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); - if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { + if (conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { this.port = 0; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index e3c2913ec5a..fdbe8c981cb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -219,8 +219,8 @@ public ContainerStateMachine(RaftGroupId gid, this.writeChunkFutureMap = new ConcurrentHashMap<>(); applyTransactionCompletionMap = new ConcurrentHashMap<>(); long pendingRequestsBytesLimit = (long)conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); // cache with FIFO eviction, and if element not found, this needs // to be obtained from disk for slow follower @@ -238,13 +238,13 @@ public ContainerStateMachine(RaftGroupId gid, this.container2BCSIDMap = new ConcurrentHashMap<>(); final int numContainerOpExecutors = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); int maxPendingApplyTransactions = conf.getInt( ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); stateMachineHealthy = new AtomicBoolean(true); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index fcc611ea3f1..53ae98f50c0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -110,12 +110,12 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; import static org.apache.ratis.util.Preconditions.assertTrue; /** @@ -189,8 +189,8 @@ private XceiverServerRatis(DatanodeDetails dd, ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); assignPorts(); this.streamEnable = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); RaftProperties serverProperties = newRaftProperties(); this.context = context; this.dispatcher = dispatcher; @@ -217,17 +217,17 @@ private XceiverServerRatis(DatanodeDetails dd, private void assignPorts() { clientPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); if (DatanodeVersion.fromProtoValue(datanodeDetails.getInitialVersion()) .compareTo(SEPARATE_RATIS_PORTS_AVAILABLE) >= 0) { adminPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); serverPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); } else { adminPort = clientPort; serverPort = clientPort; @@ -236,8 +236,8 @@ private void assignPorts() { private int determinePort(String key, int defaultValue) { boolean randomPort = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); return randomPort ? 0 : conf.getInt(key, defaultValue); } @@ -249,14 +249,14 @@ private ContainerStateMachine getStateMachine(RaftGroupId gid) { private void setUpRatisStream(RaftProperties properties) { // set the datastream config if (conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { dataStreamPort = 0; } else { dataStreamPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); } RatisHelper.enableNettyStreaming(properties); NettyConfigKeys.DataStream.setPort(properties, dataStreamPort); @@ -327,8 +327,8 @@ public RaftProperties newRaftProperties() { } long snapshotThreshold = - conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, - OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); + conf.getLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, + OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); RaftServerConfigKeys.Snapshot. setAutoTriggerEnabled(properties, true); RaftServerConfigKeys.Snapshot. @@ -338,11 +338,11 @@ public RaftProperties newRaftProperties() { setPendingRequestsLimits(properties); int logQueueNumElements = - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); + conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); final long logQueueByteLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setQueueElementLimit( properties, logQueueNumElements); @@ -353,8 +353,8 @@ public RaftProperties newRaftProperties() { false); int purgeGap = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap); //Set the number of Snapshots Retained. @@ -375,12 +375,12 @@ private void setRatisLeaderElectionTimeout(RaftProperties properties) { long duration; TimeUnit leaderElectionMinTimeoutUnit = OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getDuration(), leaderElectionMinTimeoutUnit); final TimeDuration leaderElectionMinTimeout = TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit); @@ -396,11 +396,11 @@ private void setTimeoutForRetryCache(RaftProperties properties) { TimeUnit timeUnit; long duration; timeUnit = - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getDuration(), timeUnit); final TimeDuration retryCacheTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -410,8 +410,8 @@ private void setTimeoutForRetryCache(RaftProperties properties) { private long setRaftSegmentPreallocatedSize(RaftProperties properties) { final long raftSegmentPreallocatedSize = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setPreallocatedSize(properties, SizeInBytes.valueOf(raftSegmentPreallocatedSize)); @@ -420,23 +420,23 @@ private long setRaftSegmentPreallocatedSize(RaftProperties properties) { private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) { final int logAppenderQueueNumElements = conf.getInt( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); final long raftSegmentSize = (long) conf.getStorageSize( - DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, - DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, StorageUnit.BYTES); final long raftSegmentBufferSize = logAppenderQueueByteLimit + 8; assertTrue(raftSegmentBufferSize <= raftSegmentSize, - () -> DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + () -> HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + logAppenderQueueByteLimit - + " must be <= (" + DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + + " must be <= (" + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + " = " + (raftSegmentSize - 8) + ")"); RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, @@ -454,11 +454,11 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true); TimeUnit timeUnit = OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); long duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT .getDuration(), timeUnit); final TimeDuration dataSyncTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -479,7 +479,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs / dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS); int numSyncRetries = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, syncTimeoutRetryDefault); RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties, numSyncRetries); @@ -507,8 +507,8 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { private RpcType setRpcType(RaftProperties properties) { final String rpcType = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); RatisHelper.setRpcType(properties, rpc); return rpc; @@ -517,8 +517,8 @@ private RpcType setRpcType(RaftProperties properties) { private void setPendingRequestsLimits(RaftProperties properties) { long pendingRequestsBytesLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); final int pendingRequestsMegaBytesLimit = HddsUtils.roundupMb(pendingRequestsBytesLimit); @@ -990,9 +990,9 @@ private static List createChunkExecutors( // TODO create single pool with N threads if using non-incremental chunks final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index c9d6672ee88..e35c6345683 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -164,9 +164,9 @@ public KeyValueHandler(ConfigurationSource config, // Requests. final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); containerCreationLocks = Striped.lazyWeakLock( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index 1267ed78689..288a2d3e331 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -51,8 +51,8 @@ private ChunkManagerFactory() { public static ChunkManager createChunkManager(ConfigurationSource conf, BlockManager manager, VolumeSet volSet) { boolean sync = - conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY, - OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); + conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY, + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); boolean persist = conf.getBoolean(HDDS_CONTAINER_PERSISTDATA, HDDS_CONTAINER_PERSISTDATA_DEFAULT); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 33bc4a85166..c63f82025e0 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -344,7 +344,7 @@ public static ContainerController getEmptyContainerController() { public static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); return XceiverServerRatis.newXceiverServerRatis(dn, conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index 7917a4ce55c..21775245efb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -155,8 +155,8 @@ public static HddsProtos.ReplicationFactor getReplicationFactor( private static boolean isUseRatis(ConfigurationSource c) { return c.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 5738f5c1106..e1e1ee9172a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -82,12 +82,12 @@ void setUp() throws Exception { conf = SCMTestUtils.getConf(testRoot); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, TimeUnit.MILLISECONDS); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); serverAddresses = new ArrayList<>(); scmServers = new ArrayList<>(); mockServers = new ArrayList<>(); @@ -200,7 +200,7 @@ public void testDatanodeStateContext() throws IOException, DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); try (DatanodeStateMachine stateMachine = @@ -327,7 +327,7 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception { DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); try (DatanodeStateMachine stateMachine = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java index 565853c22dd..657afc38874 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java @@ -178,7 +178,7 @@ public void isCreatedWitDefaultValues() { public void testConf() throws Exception { final OzoneConfiguration conf = new OzoneConfiguration(); final String dir = "dummy/dir"; - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final DatanodeRatisServerConfig ratisConf = conf.getObject( DatanodeRatisServerConfig.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java index 3859cd47c9b..46b8cc6772e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java @@ -59,7 +59,7 @@ public class TestPeriodicVolumeChecker { public void setup() throws IOException { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, Files.createDirectory(folder.resolve("VolumeCheckerDir")).toString()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 1159d4277c7..68e687fefad 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -82,7 +82,7 @@ public void setup() throws Exception { volumes.add(volume1); volumes.add(volume2); conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dataDirKey); initializeVolumeSet(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index e3c610bfe47..eb1f7979f8b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -228,7 +228,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) { for (int i = 0; i < numDirs; ++i) { metaDirs.add(new File(dir, randomAlphanumeric(10)).toString()); } - ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + ozoneConf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", metaDirs)); final List dbDirs = new ArrayList<>(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 7f38eab785b..8fd7b6280b6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -378,7 +378,7 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) BlockUtils.shutdownCache(conf); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, datanodeDirs.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, datanodeDirs.toString()); MutableVolumeSet volumeSets = new MutableVolumeSet(datanodeId.toString(), clusterId, conf, null, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 497418dcdcb..07804c2a20b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -165,7 +165,7 @@ public void testBuildNodeReport(ContainerTestVersionInfo versionInfo) throws Exception { initTest(versionInfo); String path = folder.toString(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", path + "/ratis1", path + "/ratis2", path + "ratis3")); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index 383e76dcc72..23b7da26346 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -99,9 +99,9 @@ private void initTests(Boolean enable) throws Exception { conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, schemaV3Enabled); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); setup(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 70d394e73b3..4fae3686c93 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -382,14 +382,14 @@ public static int getLogWarnInterval(ConfigurationSource conf) { * @return port number. */ public static int getContainerPort(ConfigurationSource conf) { - return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + return conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } public static Collection getOzoneDatanodeRatisDirectory( ConfigurationSource conf) { Collection rawLocations = conf.getTrimmedStringCollection( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); if (rawLocations.isEmpty()) { rawLocations = new ArrayList<>(1); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index b241ac0f2d2..f3a303cad73 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -186,7 +186,7 @@ public void testContainerPlacementCapacity() throws IOException, testDir.getAbsolutePath()); conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); SCMNodeManager scmNodeManager = createNodeManager(conf); containerManager = createContainerManager(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 58f65df8fd8..c74e274d3d7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -151,10 +151,10 @@ public void testGetVersionTask() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf)); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); @@ -179,9 +179,9 @@ public void testGetVersionTask() throws Exception { */ @Test public void testDeletedContainersClearedOnStartup() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, @@ -217,12 +217,12 @@ public void testDeletedContainersClearedOnStartup() throws Exception { @Test public void testCheckVersionResponse() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -267,7 +267,7 @@ public void testCheckVersionResponse() throws Exception { */ @Test public void testDnLayoutVersionFile() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -579,7 +579,7 @@ private StateContext heartbeatTaskHelper( // Mini Ozone cluster will not come up if the port is not true, since // Ratis will exit if the server port cannot be bound. We can remove this // hard coding once we fix the Ratis default behaviour. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); // Create a datanode state machine for stateConext used by endpoint task try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 6a5550e9fbd..499d58b1ff2 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -98,8 +98,8 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); boolean useRatis = conf.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); if (useRatis) { replicationFactor = HddsProtos.ReplicationFactor.THREE; replicationType = HddsProtos.ReplicationType.RATIS; diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 6469a631768..f7f49fec3d1 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -261,10 +261,10 @@ protected void initializeConfiguration() throws IOException { TimeUnit.SECONDS); conf.setInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, 4); conf.setInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, 2); conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); ReplicationManagerConfiguration replicationConf = @@ -273,8 +273,8 @@ protected void initializeConfiguration() throws IOException { replicationConf.setEventTimeout(Duration.ofSeconds(20)); replicationConf.setDatanodeTimeoutOffset(0); conf.setFromObject(replicationConf); - conf.setInt(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, 100); + conf.setInt(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100); + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, 100); conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, 100); conf.setInt(OMConfigKeys. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java index 6ec6a32d4fb..059f7b3e03d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java @@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; @@ -83,7 +83,7 @@ public static void init() throws Exception { final int blockSize = 2 * maxFlushSize; final BucketLayout layout = BucketLayout.FILE_SYSTEM_OPTIMIZED; - CONF.setBoolean(DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + CONF.setBoolean(HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); CONF.setBoolean(OZONE_FS_DATASTREAM_ENABLED, true); CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B"); CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java index 6f0bd40dde0..2829ba234ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; import org.apache.ozone.test.GenericTestUtils; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -132,7 +132,7 @@ public void testLeaderIdAfterLeaderChange() throws Exception { dnToStop.get().stop(); // wait long enough based on leader election min timeout Thread.sleep(4000 * conf.getTimeDuration( - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 5, TimeUnit.SECONDS)); GenericTestUtils.waitFor(() -> { try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index 5338cb8a0cc..c084a72a3c7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -44,11 +44,11 @@ public interface RatisTestHelper { Logger LOG = LoggerFactory.getLogger(RatisTestHelper.class); static void initRatisConf(RpcType rpc, OzoneConfiguration conf) { - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - LOG.info("{} = {}", OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + LOG.info("{} = {}", OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 020f8623c4e..275061ef784 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -48,7 +48,7 @@ import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -68,7 +68,7 @@ static void setup(@TempDir File testDir) { conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s"); } @@ -114,13 +114,13 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { // Each instance of SM will create an ozone container // that bounds to a random port. - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); List stateMachines = new ArrayList<>(); try { @@ -168,7 +168,7 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { } // Turn off the random port flag and test again - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); try ( DatanodeStateMachine sm1 = new DatanodeStateMachine( randomDatanodeDetails(), ozoneConf); @@ -182,8 +182,8 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { assertFalse(ports.add(sm2.getContainer().getReadChannel().getIPCPort())); assertFalse(ports.add(sm3.getContainer().getReadChannel().getIPCPort())); assertEquals(ports.iterator().next().intValue(), - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT)); + conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java index 6cc6bcb8e95..8f79605ab05 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java @@ -39,12 +39,12 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.anyHostWithFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; @@ -96,7 +96,7 @@ public OzoneConfiguration apply(OzoneConfiguration conf) throws IOException { Path ratisDir = baseDir.resolve("ratis"); Files.createDirectories(ratisDir); - dnConf.set(DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString()); + dnConf.set(HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString()); if (layoutVersion != null) { DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage( @@ -111,11 +111,11 @@ private void configureDatanodePorts(ConfigurationTarget conf) { conf.set(HDDS_REST_HTTP_ADDRESS_KEY, anyHostWithFreePort()); conf.set(HDDS_DATANODE_HTTP_ADDRESS_KEY, anyHostWithFreePort()); conf.set(HDDS_DATANODE_CLIENT_ADDRESS_KEY, anyHostWithFreePort()); - conf.setInt(DFS_CONTAINER_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_IPC_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_IPC_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); conf.setFromObject(new ReplicationServer.ReplicationConfig().setPort(getFreePort())); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index 563904922e9..3f1c31edfe7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -103,7 +103,7 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 4588a86a48c..b6eaca8e80d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -168,7 +168,7 @@ public static void init() throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(20)); conf.setFromObject(raftClientConfig); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) @@ -309,9 +309,9 @@ public void testContainerStateMachineFailures() throws Exception { // restart the hdds datanode, container should not in the regular set OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + UUID.randomUUID(); - config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); ozoneContainer = cluster.getHddsDatanodes().get(index) @@ -373,9 +373,9 @@ public void testUnhealthyContainer() throws Exception { OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + UUID.randomUUID(); - config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); // restart the hdds datanode and see if the container is listed in the // in the missing container set and not in the regular set diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index bf41df6c787..229059d84ad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -106,7 +106,7 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java index be27dab58ed..d48df574a94 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java @@ -123,7 +123,7 @@ public void setup() throws Exception { .setStreamBufferMaxSize(MAX_FLUSH_SIZE) .applyTo(conf); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index 4e050879295..5c0910ecdc2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -111,7 +111,7 @@ private void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index a385edd0275..b4ad49a3ed5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -107,7 +107,7 @@ private void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index ed00686bd8a..34f85d8e992 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -105,7 +105,7 @@ private void startCluster(int datanodes) throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index 8c35d5011a5..0fd31bb4b72 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -183,10 +183,10 @@ static void runContainerStateMachineMetrics( static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index d4900bb4878..51943a2e8d2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -78,7 +78,7 @@ public void testContainerMetrics() throws Exception { Pipeline pipeline = MockPipeline .createSingleNodePipeline(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 7a64ddc5d5e..1b8bae0d03a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -68,7 +68,7 @@ public void testCreateOzoneContainer( Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); @@ -99,7 +99,7 @@ void testOzoneContainerStart( Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index 715b0678a17..4f24f8e6c32 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -129,10 +129,10 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, try { Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.toString()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline .getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE) .getValue()); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); container = new OzoneContainer(dn, conf, ContainerTestUtils diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 3c89bb12ee7..c05f55bd4a7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -89,7 +89,7 @@ public class TestContainerServer { public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); - CONF.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); + CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); caClient = new DNCertificateClient(new SecurityConfig(CONF), null, dn, null, null, null); @@ -104,7 +104,7 @@ public static void tearDown() throws Exception { public void testClientServer() throws Exception { DatanodeDetails datanodeDetails = randomDatanodeDetails(); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, @@ -121,10 +121,10 @@ public void testClientServerRatisGrpc() throws Exception { static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, @@ -216,7 +216,7 @@ public void testClientServerWithContainerDispatcher() throws Exception { HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 53420c0e220..e0522ac6e91 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -158,7 +158,7 @@ public void testClientServer() throws Exception { HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, @@ -201,14 +201,14 @@ public void testClientServerRatisGrpc() throws Exception { static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index 8e95e6cb18a..7c82633f113 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -73,7 +73,7 @@ public void destroy() throws Exception { public void testRatisLogParsing() throws Exception { OzoneConfiguration conf = cluster.getHddsDatanodes().get(0).getConf(); String path = - conf.get(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + conf.get(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); UUID pid = cluster.getStorageContainerManager().getPipelineManager() .getPipelines().get(0).getId().getId(); File pipelineDir = new File(path, pid.toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 0324d030afa..c1d55accfd7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -51,7 +51,7 @@ public static void init() throws Exception { .build(); conf.setQuietMode(false); // enable ratis for Scm. - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 1614f81087b..3d426ed0349 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -190,8 +190,8 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); this.config = conf; } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 7a80878549b..ce27fce8c1e 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -198,8 +198,8 @@ public BasicRootedOzoneClientAdapterImpl(String omHost, int omPort, proxy = objectStore.getClientProxy(); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); // Fetches the bucket layout to be used by OFS. initDefaultFsBucketLayout(conf); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 1d3850b12ac..47be6aeb6df 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -106,8 +106,8 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; @@ -196,8 +196,8 @@ public void init() { OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES); datastreamEnabled = ozoneConfiguration.getBoolean( - DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); + HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, + HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); datastreamMinLength = (long) ozoneConfiguration.getStorageSize( OZONE_FS_DATASTREAM_AUTO_THRESHOLD, OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT, StorageUnit.BYTES); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 775d5a19769..28ce32e7470 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -68,7 +68,7 @@ public static void setUp() throws Exception { REST.setClient(client); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); REST.setOzoneConfiguration(conf); REST.init(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index f92496249e2..d988b430230 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -81,7 +81,7 @@ public static void setUp() throws Exception { REST.setClient(client); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setStorageSize(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, 1, StorageUnit.BYTES);