diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index c62d9773639fc..2828f6ea41ca0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -41,8 +41,7 @@ */ public final class Pipeline { - private static final Logger LOG = LoggerFactory - .getLogger(Pipeline.class); + private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class); private final PipelineID id; private final ReplicationType type; private final ReplicationFactor factor; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java index 045997fd05584..b179ca5395695 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java @@ -193,10 +193,12 @@ public List getLocationInfoList() { .setPipeline(streamEntry.getPipeline()).build(); locationInfoList.add(info); } - LOG.debug( - "block written " + streamEntry.getBlockID() + ", length " + length - + " bcsID " + streamEntry.getBlockID() - .getBlockCommitSequenceId()); + if (LOG.isDebugEnabled()) { + LOG.debug( + "block written " + streamEntry.getBlockID() + ", length " + length + + " bcsID " + streamEntry.getBlockID() + .getBlockCommitSequenceId()); + } } return locationInfoList; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index fa1672a1fa7d0..ecbb3290a7dc6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -97,8 +97,10 @@ private synchronized void initialize(String keyName, long keyLength = 0; for (int i = 0; i < blockInfos.size(); i++) { OmKeyLocationInfo omKeyLocationInfo = blockInfos.get(i); - LOG.debug("Adding stream for accessing {}. The stream will be " + - "initialized later.", omKeyLocationInfo); + if (LOG.isDebugEnabled()) { + LOG.debug("Adding stream for accessing {}. The stream will be " + + "initialized later.", omKeyLocationInfo); + } addStream(omKeyLocationInfo, xceiverClientManager, verifyChecksum); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index d0dd124171f58..06351ab2c3d0b 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -439,10 +439,14 @@ public Token getDelegationToken(Text renewer) ozoneManagerClient.getDelegationToken(renewer); if (token != null) { token.setService(dtService); - LOG.debug("Created token {} for dtService {}", token, dtService); + if (LOG.isDebugEnabled()) { + LOG.debug("Created token {} for dtService {}", token, dtService); + } } else { - LOG.debug("Cannot get ozone delegation token for renewer {} to access " + - "service {}", renewer, dtService); + if (LOG.isDebugEnabled()) { + LOG.debug("Cannot get ozone delegation token for renewer {} to " + + "access service {}", renewer, dtService); + } } return token; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java index 2fdf543f31bec..fb5665820628c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java @@ -75,7 +75,9 @@ public S3SecretValue getS3Secret(String kerberosID) throws IOException { } finally { omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID); } - LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result); + if (LOG.isTraceEnabled()) { + LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result); + } return result; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java index 62d8fdc2613a1..32684de5b73f2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java @@ -214,8 +214,10 @@ private Text computeDelegationTokenService() { @Override public void performFailover(OzoneManagerProtocolPB currentProxy) { int newProxyIndex = incrementProxyIndex(); - LOG.debug("Failing over OM proxy to index: {}, nodeId: {}", - newProxyIndex, omNodeIDList.get(newProxyIndex)); + if (LOG.isDebugEnabled()) { + LOG.debug("Failing over OM proxy to index: {}, nodeId: {}", + newProxyIndex, omNodeIDList.get(newProxyIndex)); + } } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java index bc64d6c5a1fd5..c1930c85d03f5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java @@ -61,7 +61,9 @@ private OMRatisHelper() { */ public static RaftClient newRaftClient(RpcType rpcType, String omId, RaftGroup group, RetryPolicy retryPolicy, Configuration conf) { - LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, group); + if (LOG.isTraceEnabled()) { + LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, group); + } final RaftProperties properties = new RaftProperties(); RaftConfigKeys.Rpc.setType(properties, rpcType); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index c6a99ac2d9cfd..a97a26c90b15b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -168,8 +168,10 @@ private boolean lock(Resource resource, String resourceName, throw new RuntimeException(errorMessage); } else { lockFn.accept(resourceName); - LOG.debug("Acquired {} {} lock on resource {}", lockType, resource.name, - resourceName); + if (LOG.isDebugEnabled()) { + LOG.debug("Acquired {} {} lock on resource {}", lockType, resource.name, + resourceName); + } lockSet.set(resource.setLock(lockSet.get())); return true; } @@ -264,8 +266,10 @@ public boolean acquireMultiUserLock(String firstUser, String secondUser) { throw ex; } } - LOG.debug("Acquired Write {} lock on resource {} and {}", resource.name, - firstUser, secondUser); + if (LOG.isDebugEnabled()) { + LOG.debug("Acquired Write {} lock on resource {} and {}", resource.name, + firstUser, secondUser); + } lockSet.set(resource.setLock(lockSet.get())); return true; } @@ -300,8 +304,10 @@ public void releaseMultiUserLock(String firstUser, String secondUser) { manager.writeUnlock(firstUser); manager.writeUnlock(secondUser); } - LOG.debug("Release Write {} lock on resource {} and {}", resource.name, - firstUser, secondUser); + if (LOG.isDebugEnabled()) { + LOG.debug("Release Write {} lock on resource {} and {}", resource.name, + firstUser, secondUser); + } lockSet.set(resource.clearLock(lockSet.get())); } @@ -352,8 +358,10 @@ private void unlock(Resource resource, String resourceName, // locks, as some locks support acquiring lock again. lockFn.accept(resourceName); // clear lock - LOG.debug("Release {} {}, lock on resource {}", lockType, resource.name, - resourceName); + if (LOG.isDebugEnabled()) { + LOG.debug("Release {} {}, lock on resource {}", lockType, resource.name, + resourceName); + } lockSet.set(resource.clearLock(lockSet.get())); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java index b3f607a9c3610..5cc782336a85a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java @@ -89,7 +89,7 @@ public Token generateToken(String user, if (LOG.isTraceEnabled()) { long expiryTime = tokenIdentifier.getExpiryDate(); String tokenId = tokenIdentifier.toString(); - LOG.trace("Issued delegation token -> expiryTime:{},tokenId:{}", + LOG.trace("Issued delegation token -> expiryTime:{}, tokenId:{}", expiryTime, tokenId); } // Pass blockId as service. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index 7e03095cdc45c..0de8ac63c3f04 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -289,8 +289,10 @@ public OzoneTokenIdentifier cancelToken(Token token, String canceller) throws IOException { OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf( token.getIdentifier()); - LOG.debug("Token cancellation requested for identifier: {}", - formatTokenId(id)); + if (LOG.isDebugEnabled()) { + LOG.debug("Token cancellation requested for identifier: {}", + formatTokenId(id)); + } if (id.getUser() == null) { throw new InvalidToken("Token with no owner " + formatTokenId(id)); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java index dd2ab1fa2e507..68afaaf52b81a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java @@ -43,9 +43,13 @@ public OzoneDelegationTokenSelector() { @Override public Token selectToken(Text service, Collection> tokens) { - LOG.trace("Getting token for service {}", service); + if (LOG.isTraceEnabled()) { + LOG.trace("Getting token for service {}", service); + } Token token = getSelectedTokens(service, tokens); - LOG.debug("Got tokens: {} for service {}", token, service); + if (LOG.isDebugEnabled()) { + LOG.debug("Got tokens: {} for service {}", token, service); + } return token; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java index 78f0565b81dc7..06fc071f32dde 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java @@ -110,8 +110,10 @@ public byte[] createPassword(byte[] identifier, PrivateKey privateKey) @Override public byte[] createPassword(T identifier) { - logger.debug("Creating password for identifier: {}, currentKey: {}", - formatTokenId(identifier), currentKey.getKeyId()); + if (logger.isDebugEnabled()) { + logger.debug("Creating password for identifier: {}, currentKey: {}", + formatTokenId(identifier), currentKey.getKeyId()); + } byte[] password = null; try { password = createPassword(identifier.getBytes(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 540445642f555..d64eae4e6e4c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -570,8 +570,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) } boolean hasAccess = OzoneAclUtil.checkAclRights(bucketInfo.getAcls(), context); - LOG.debug("user:{} has access rights for bucket:{} :{} ", - context.getClientUgi(), ozObject.getBucketName(), hasAccess); + if (LOG.isDebugEnabled()) { + LOG.debug("user:{} has access rights for bucket:{} :{} ", + context.getClientUgi(), ozObject.getBucketName(), hasAccess); + } return hasAccess; } catch (IOException ex) { if(ex instanceof OMException) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index f3ae9b1cd73c9..20b7fdfec534f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -1661,8 +1661,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) if (keyInfo == null) { // the key does not exist, but it is a parent "dir" of some key // let access be determined based on volume/bucket/prefix ACL - LOG.debug("key:{} is non-existent parent, permit access to user:{}", - keyName, context.getClientUgi()); + if (LOG.isDebugEnabled()) { + LOG.debug("key:{} is non-existent parent, permit access to user:{}", + keyName, context.getClientUgi()); + } return true; } } catch (OMException e) { @@ -1678,8 +1680,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) boolean hasAccess = OzoneAclUtil.checkAclRight( keyInfo.getAcls(), context); - LOG.debug("user:{} has access rights for key:{} :{} ", - context.getClientUgi(), ozObject.getKeyName(), hasAccess); + if (LOG.isDebugEnabled()) { + LOG.debug("user:{} has access rights for key:{} :{} ", + context.getClientUgi(), ozObject.getKeyName(), hasAccess); + } return hasAccess; } catch (IOException ex) { if(ex instanceof OMException) { @@ -1766,10 +1770,11 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { if (keys.iterator().hasNext()) { return new OzoneFileStatus(keyName); } - - LOG.debug("Unable to get file status for the key: volume:" + volumeName + - " bucket:" + bucketName + " key:" + keyName + " with error no " + - "such file exists:"); + if (LOG.isDebugEnabled()) { + LOG.debug("Unable to get file status for the key: volume: {}, bucket:" + + " {}, key: {}, with error: No such file exists.", volumeName, + bucketName, keyName); + } throw new OMException("Unable to get file status: volume: " + volumeName + " bucket: " + bucketName + " key: " + keyName, FILE_NOT_FOUND); @@ -2132,8 +2137,10 @@ private void sortDatanodeInPipeline(OmKeyInfo keyInfo, String clientMachine) { List sortedNodes = scmClient.getBlockClient() .sortDatanodes(nodeList, clientMachine); k.getPipeline().setNodesInOrder(sortedNodes); - LOG.debug("Sort datanodes {} for client {}, return {}", nodes, - clientMachine, sortedNodes); + if (LOG.isDebugEnabled()) { + LOG.debug("Sort datanodes {} for client {}, return {}", nodes, + clientMachine, sortedNodes); + } } catch (IOException e) { LOG.warn("Unable to sort datanodes based on distance to " + "client, volume=" + keyInfo.getVolumeName() + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java index fa4be651dae63..79bc39f498464 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java @@ -88,7 +88,9 @@ public BackgroundTaskResult call() throws Exception { if (result.isSuccess()) { try { keyManager.deleteExpiredOpenKey(result.getObjectKey()); - LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); + if (LOG.isDebugEnabled()) { + LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); + } deletedSize += 1; } catch (IOException e) { LOG.warn("Failed to delete hanging-open key {}", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index a6503d73140a3..0cd087eee2364 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -734,10 +734,12 @@ private static void loginOMUser(OzoneConfiguration conf) if (SecurityUtil.getAuthenticationMethod(conf).equals( AuthenticationMethod.KERBEROS)) { - LOG.debug("Ozone security is enabled. Attempting login for OM user. " - + "Principal: {},keytab: {}", conf.get( - OZONE_OM_KERBEROS_PRINCIPAL_KEY), - conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY)); + if (LOG.isDebugEnabled()) { + LOG.debug("Ozone security is enabled. Attempting login for OM user. " + + "Principal: {}, keytab: {}", conf.get( + OZONE_OM_KERBEROS_PRINCIPAL_KEY), + conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY)); + } UserGroupInformation.setConfiguration(conf); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index 0eafff9dcbd93..c89b32ee7347e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -139,7 +139,10 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { OMPrefixAclOpResult omPrefixAclOpResult = removeAcl(obj, acl, prefixInfo); if (!omPrefixAclOpResult.isOperationsResult()) { - LOG.debug("acl {} does not exist for prefix path {} ", acl, prefixPath); + if (LOG.isDebugEnabled()) { + LOG.debug("acl {} does not exist for prefix path {} ", + acl, prefixPath); + } return false; } @@ -236,8 +239,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) if (lastNode != null && lastNode.getValue() != null) { boolean hasAccess = OzoneAclUtil.checkAclRights(lastNode.getValue(). getAcls(), context); - LOG.debug("user:{} has access rights for ozObj:{} ::{} ", - context.getClientUgi(), ozObject, hasAccess); + if (LOG.isDebugEnabled()) { + LOG.debug("user:{} has access rights for ozObj:{} ::{} ", + context.getClientUgi(), ozObject, hasAccess); + } return hasAccess; } else { return true; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 04cf09e5ef9ee..7375eb89b26d0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -108,7 +108,7 @@ private UserVolumeInfo delVolumeFromOwnerList(String volume, String owner) if (volumeList != null) { prevVolList.addAll(volumeList.getVolumeNamesList()); } else { - LOG.debug("volume:{} not found for user:{}"); + LOG.debug("volume:{} not found for user:{}", volume, owner); throw new OMException(ResultCodes.USER_NOT_FOUND); } @@ -503,7 +503,9 @@ public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { try { volumeArgs.addAcl(acl); } catch (OMException ex) { - LOG.debug("Add acl failed.", ex); + if (LOG.isDebugEnabled()) { + LOG.debug("Add acl failed.", ex); + } return false; } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); @@ -553,7 +555,9 @@ public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { try { volumeArgs.removeAcl(acl); } catch (OMException ex) { - LOG.debug("Remove acl failed.", ex); + if (LOG.isDebugEnabled()) { + LOG.debug("Remove acl failed.", ex); + } return false; } metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); @@ -685,8 +689,10 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) Preconditions.checkState(volume.equals(volumeArgs.getVolume())); boolean hasAccess = volumeArgs.getAclMap().hasAccess( context.getAclRights(), context.getClientUgi()); - LOG.debug("user:{} has access rights for volume:{} :{} ", - context.getClientUgi(), ozObject.getVolumeName(), hasAccess); + if (LOG.isDebugEnabled()) { + LOG.debug("user:{} has access rights for volume:{} :{} ", + context.getClientUgi(), ozObject.getVolumeName(), hasAccess); + } return hasAccess; } catch (IOException ex) { LOG.error("Check access operation failed for volume:{}", volume, ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index b4f5b8d98fc94..e5cadffc40090 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -148,9 +148,11 @@ private void flushTransactions() { flushedTransactionCount.addAndGet(flushedTransactionsSize); flushIterations.incrementAndGet(); - LOG.debug("Sync Iteration {} flushed transactions in this " + - "iteration{}", flushIterations.get(), - flushedTransactionsSize); + if (LOG.isDebugEnabled()) { + LOG.debug("Sync Iteration {} flushed transactions in this " + + "iteration{}", flushIterations.get(), + flushedTransactionsSize); + } long lastRatisTransactionIndex = readyBuffer.stream().map(DoubleBufferEntry::getTrxLogIndex) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java index 2cbef50cb0492..6f97f56241b01 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java @@ -99,8 +99,10 @@ public static OzoneManagerRatisClient newOzoneManagerRatisClient( } public void connect() { - LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}", - raftGroup.getGroupId().getUuid().toString(), omNodeID); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}", + raftGroup.getGroupId().getUuid().toString(), omNodeID); + } // TODO : XceiverClient ratis should pass the config value of // maxOutstandingRequests so as to set the upper bound on max no of async @@ -147,8 +149,7 @@ private OzoneManagerProtocolProtos.Status parseErrorStatus(String message) { if (message.contains(STATUS_CODE)) { String errorCode = message.substring(message.indexOf(STATUS_CODE) + STATUS_CODE.length()); - LOG.debug("Parsing error message for error code " + - errorCode); + LOG.debug("Parsing error message for error code {}", errorCode); return OzoneManagerProtocolProtos.Status.valueOf(errorCode.trim()); } else { return OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; @@ -166,25 +167,27 @@ private CompletableFuture sendCommandAsync(OMRequest request) { CompletableFuture raftClientReply = sendRequestAsync(request); - return raftClientReply.whenComplete((reply, e) -> LOG.debug( - "received reply {} for request: cmdType={} traceID={} " + - "exception: {}", reply, request.getCmdType(), - request.getTraceID(), e)) - .thenApply(reply -> { - try { - Preconditions.checkNotNull(reply); - if (!reply.isSuccess()) { - RaftException exception = reply.getException(); - Preconditions.checkNotNull(exception, "Raft reply failure " + - "but no exception propagated."); - throw new CompletionException(exception); - } - return OMRatisHelper.getOMResponseFromRaftClientReply(reply); - - } catch (InvalidProtocolBufferException e) { - throw new CompletionException(e); - } - }); + return raftClientReply.whenComplete((reply, e) -> { + if (LOG.isDebugEnabled()) { + LOG.debug("received reply {} for request: cmdType={} traceID={} " + + "exception: {}", reply, request.getCmdType(), + request.getTraceID(), e); + } + }).thenApply(reply -> { + try { + Preconditions.checkNotNull(reply); + if (!reply.isSuccess()) { + RaftException exception = reply.getException(); + Preconditions.checkNotNull(exception, "Raft reply failure " + + "but no exception propagated."); + throw new CompletionException(exception); + } + return OMRatisHelper.getOMResponseFromRaftClientReply(reply); + + } catch (InvalidProtocolBufferException e) { + throw new CompletionException(e); + } + }); } /** @@ -198,7 +201,9 @@ private CompletableFuture sendRequestAsync( OMRequest request) { boolean isReadOnlyRequest = OmUtils.isReadOnly(request); ByteString byteString = OMRatisHelper.convertRequestToByteString(request); - LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request); + if (LOG.isDebugEnabled()) { + LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request); + } return isReadOnlyRequest ? raftClient.sendReadOnlyAsync(() -> byteString) : raftClient.sendAsync(() -> byteString); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 69a7ae93a81aa..7cab9d2738ab6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -169,8 +169,10 @@ private OMResponse processReply(OMRequest omRequest, RaftClientReply reply) omResponse.setMessage(stateMachineException.getCause().getMessage()); omResponse.setStatus(parseErrorStatus( stateMachineException.getCause().getMessage())); - LOG.debug("Error while executing ratis request. " + - "stateMachineException: ", stateMachineException); + if (LOG.isDebugEnabled()) { + LOG.debug("Error while executing ratis request. " + + "stateMachineException: ", stateMachineException); + } return omResponse.build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java index 46db75df17cf7..b97de955a51ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java @@ -103,7 +103,9 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, void onComplete(boolean operationResult, IOException exception, OMMetrics omMetrics) { if (operationResult) { - LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath()); + if (LOG.isDebugEnabled()) { + LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath()); + } } else { omMetrics.incNumBucketUpdateFails(); if (exception == null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java index 01b5edc8d5545..a5abbcca012af 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java @@ -96,8 +96,10 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, @Override void onComplete(IOException ex) { if (ex == null) { - LOG.debug("Set acls: {} to volume: {} success!", - getAcls(), getVolumeName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Set acls: {} to volume: {} success!", + getAcls(), getVolumeName()); + } } else { LOG.error("Set acls {} to volume {} failed!", getAcls(), getVolumeName(), ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java index 66f489233417d..2d305d7831a33 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java @@ -48,7 +48,9 @@ public OzoneManagerHARequestHandlerImpl(OzoneManager om, @Override public OMResponse handleApplyTransaction(OMRequest omRequest, long transactionLogIndex) { - LOG.debug("Received OMRequest: {}, ", omRequest); + if (LOG.isDebugEnabled()) { + LOG.debug("Received OMRequest: {}, ", omRequest); + } Type cmdType = omRequest.getCmdType(); switch (cmdType) { case CreateVolume: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index d4c029b8b3b99..ff2c966983f48 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -225,7 +225,9 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { } try { omClientResponse.getFlushFuture().get(); - LOG.trace("Future for {} is completed", request); + if (LOG.isTraceEnabled()) { + LOG.trace("Future for {} is completed", request); + } } catch (ExecutionException | InterruptedException ex) { // terminate OM. As if we are in this stage means, while getting // response from flush future, we got an exception. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 01e59b4fea8b5..ef96e0cc27ec4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -149,7 +149,9 @@ public OzoneManagerRequestHandler(OzoneManager om) { @SuppressWarnings("methodlength") @Override public OMResponse handle(OMRequest request) { - LOG.debug("Received OMRequest: {}, ", request); + if (LOG.isDebugEnabled()) { + LOG.debug("Received OMRequest: {}, ", request); + } Type cmdType = request.getCmdType(); OMResponse.Builder responseBuilder = OMResponse.newBuilder() .setCmdType(cmdType) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java index 5acd37e09c8c8..0b7c51a40640d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java @@ -79,20 +79,20 @@ public boolean checkAccess(IOzoneObj ozObject, RequestContext context) switch (objInfo.getResourceType()) { case VOLUME: - LOG.trace("Checking access for volume:" + objInfo); + LOG.trace("Checking access for volume: {}", objInfo); return volumeManager.checkAccess(objInfo, context); case BUCKET: - LOG.trace("Checking access for bucket:" + objInfo); + LOG.trace("Checking access for bucket: {}", objInfo); return (bucketManager.checkAccess(objInfo, context) && volumeManager.checkAccess(objInfo, context)); case KEY: - LOG.trace("Checking access for Key:" + objInfo); + LOG.trace("Checking access for Key: {}", objInfo); return (keyManager.checkAccess(objInfo, context) && prefixManager.checkAccess(objInfo, context) && bucketManager.checkAccess(objInfo, context) && volumeManager.checkAccess(objInfo, context)); case PREFIX: - LOG.trace("Checking access for Prefix:" + objInfo); + LOG.trace("Checking access for Prefix: {]", objInfo); return (prefixManager.checkAccess(objInfo, context) && bucketManager.checkAccess(objInfo, context) && volumeManager.checkAccess(objInfo, context)); diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index 4147c8ff4e3e9..298fd2e693737 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -425,7 +425,9 @@ private boolean innerDelete(Path f, boolean recursive) throws IOException { DeleteIterator iterator = new DeleteIterator(f, recursive); return iterator.iterate(); } catch (FileNotFoundException e) { - LOG.debug("Couldn't delete {} - does not exist", f); + if (LOG.isDebugEnabled()) { + LOG.debug("Couldn't delete {} - does not exist", f); + } return false; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java index 9b65b387a7928..82ffa0c5c4303 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java @@ -110,10 +110,14 @@ public void parse() throws Exception { canonicalRequest = buildCanonicalRequest(); strToSign.append(hash(canonicalRequest)); - LOG.debug("canonicalRequest:[{}]", canonicalRequest); + if (LOG.isDebugEnabled()) { + LOG.debug("canonicalRequest:[{}]", canonicalRequest); + } - headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k, - headerMap.get(k))); + if (LOG.isTraceEnabled()) { + headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k, + headerMap.get(k))); + } LOG.debug("StringToSign:[{}]", strToSign); stringToSign = strToSign.toString(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java index abaca03908240..d42c005e58316 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java @@ -86,8 +86,9 @@ private OzoneClient getClient(OzoneConfiguration config) throws IOException { identifier.setSignature(v4RequestParser.getSignature()); identifier.setAwsAccessId(v4RequestParser.getAwsAccessId()); identifier.setOwner(new Text(v4RequestParser.getAwsAccessId())); - - LOG.trace("Adding token for service:{}", omService); + if (LOG.isTraceEnabled()) { + LOG.trace("Adding token for service:{}", omService); + } Token token = new Token(identifier.getBytes(), identifier.getSignature().getBytes(UTF_8), identifier.getKind(), diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java index 43f335ede6f5e..588dafae86a6d 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java @@ -42,7 +42,9 @@ public class OS3ExceptionMapper implements ExceptionMapper { @Override public Response toResponse(OS3Exception exception) { - LOG.debug("Returning exception. ex: {}", exception.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Returning exception. ex: {}", exception.toString()); + } exception.setRequestId(requestIdentifier.getRequestId()); return Response.status(exception.getHttpCode()) .entity(exception.toXml()).build();