Skip to content

Commit

Permalink
HDDS-10630. Add missing parent directories deleted between initiate a…
Browse files Browse the repository at this point in the history
…nd complete MPU (apache#6496)
  • Loading branch information
SaketaChalamchala authored Apr 12, 2024
1 parent 0c59c18 commit c1b27a8
Show file tree
Hide file tree
Showing 5 changed files with 190 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

import java.io.IOException;
import java.nio.file.InvalidPathException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
Expand All @@ -31,6 +32,10 @@
import java.util.function.BiFunction;

import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.ozone.om.OzoneConfigUtil;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
import org.apache.ratis.server.protocol.TermIndex;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
Expand All @@ -41,6 +46,7 @@
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
Expand Down Expand Up @@ -176,11 +182,72 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager,
volumeName, bucketName);

String ozoneKey = omMetadataManager.getOzoneKey(
volumeName, bucketName, keyName);

String dbOzoneKey =
getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName);
List<OmDirectoryInfo> missingParentInfos;
OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest
.verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName,
keyName, Paths.get(keyName));
missingParentInfos = OMDirectoryCreateRequestWithFSO
.getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo,
pathInfoFSO, trxnLogIndex);

if (missingParentInfos != null) {
final long volumeId = omMetadataManager.getVolumeId(volumeName);
final long bucketId = omMetadataManager.getBucketId(volumeName,
bucketName);

// add all missing parents to directory table
addMissingParentsToCache(omBucketInfo, missingParentInfos,
omMetadataManager, volumeId, bucketId, trxnLogIndex);

String multipartOpenKey = omMetadataManager
.getMultipartKey(volumeId, bucketId,
pathInfoFSO.getLastKnownParentId(),
pathInfoFSO.getLeafNodeName(),
keyArgs.getMultipartUploadID());

if (getOmKeyInfoFromOpenKeyTable(multipartOpenKey,
keyName, omMetadataManager) == null) {

final ReplicationConfig replicationConfig = OzoneConfigUtil
.resolveReplicationConfigPreference(keyArgs.getType(),
keyArgs.getFactor(), keyArgs.getEcReplicationConfig(),
omBucketInfo != null ?
omBucketInfo.getDefaultReplicationConfig() :
null, ozoneManager);

OmMultipartKeyInfo multipartKeyInfoFromArgs =
new OmMultipartKeyInfo.Builder()
.setUploadID(keyArgs.getMultipartUploadID())
.setCreationTime(keyArgs.getModificationTime())
.setReplicationConfig(replicationConfig)
.setObjectID(pathInfoFSO.getLeafNodeObjectId())
.setUpdateID(trxnLogIndex)
.setParentID(pathInfoFSO.getLastKnownParentId())
.build();

OmKeyInfo keyInfoFromArgs = new OmKeyInfo.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setKeyName(keyName)
.setCreationTime(keyArgs.getModificationTime())
.setModificationTime(keyArgs.getModificationTime())
.setReplicationConfig(replicationConfig)
.setOmKeyLocationInfos(Collections.singletonList(
new OmKeyLocationInfoGroup(0, new ArrayList<>(), true)))
.setAcls(getAclsForKey(keyArgs, omBucketInfo, pathInfoFSO,
ozoneManager.getPrefixManager()))
.setObjectID(pathInfoFSO.getLeafNodeObjectId())
.setUpdateID(trxnLogIndex)
.setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ?
OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null)
.setParentObjectID(pathInfoFSO.getLastKnownParentId())
.build();

// Add missing multi part info to open key table
addMultiPartToCache(omMetadataManager, multipartOpenKey,
pathInfoFSO, keyInfoFromArgs, trxnLogIndex);
}
}

String dbMultipartOpenKey =
getDBMultipartOpenKey(volumeName, bucketName, keyName, uploadID,
Expand All @@ -189,6 +256,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
OmMultipartKeyInfo multipartKeyInfo = omMetadataManager
.getMultipartInfoTable().get(multipartKey);

String ozoneKey = omMetadataManager.getOzoneKey(
volumeName, bucketName, keyName);

String dbOzoneKey =
getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName);

// Check for directory exists with same name for the LEGACY_FS,
// if it exists throw error.
checkDirectoryAlreadyExists(ozoneManager, omBucketInfo, keyName,
Expand Down Expand Up @@ -284,7 +357,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
omClientResponse =
getOmClientResponse(multipartKey, omResponse, dbMultipartOpenKey,
omKeyInfo, allKeyInfoToRemove, omBucketInfo,
volumeId, bucketId);
volumeId, bucketId, missingParentInfos, multipartKeyInfo);

result = Result.SUCCESS;
} else {
Expand Down Expand Up @@ -325,7 +398,8 @@ protected OMClientResponse getOmClientResponse(String multipartKey,
OMResponse.Builder omResponse, String dbMultipartOpenKey,
OmKeyInfo omKeyInfo, List<OmKeyInfo> allKeyInfoToRemove,
OmBucketInfo omBucketInfo,
long volumeId, long bucketId) {
long volumeId, long bucketId, List<OmDirectoryInfo> missingParentInfos,
OmMultipartKeyInfo multipartKeyInfo) {

return new S3MultipartUploadCompleteResponse(omResponse.build(),
multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove,
Expand Down Expand Up @@ -464,6 +538,22 @@ protected String getDBOzoneKey(OMMetadataManager omMetadataManager,
return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
}

protected void addMissingParentsToCache(OmBucketInfo omBucketInfo,
List<OmDirectoryInfo> missingParentInfos,
OMMetadataManager omMetadataManager,
long volumeId, long bucketId, long transactionLogIndex
) throws IOException {
// FSO is disabled. Do nothing.
}

protected void addMultiPartToCache(
OMMetadataManager omMetadataManager, String multipartOpenKey,
OMFileRequest.OMPathInfoWithFSO pathInfoFSO, OmKeyInfo omKeyInfo,
long transactionLogIndex
) throws IOException {
// FSO is disabled. Do nothing.
}

protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneKey,
String keyName, OMMetadataManager omMetadataManager) throws IOException {
return omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse;
Expand Down Expand Up @@ -74,6 +76,37 @@ protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager,
}
}

@Override
protected void addMissingParentsToCache(OmBucketInfo omBucketInfo,
List<OmDirectoryInfo> missingParentInfos,
OMMetadataManager omMetadataManager, long volumeId, long bucketId,
long transactionLogIndex) throws IOException {

// validate and update namespace for missing parent directory.
checkBucketQuotaInNamespace(omBucketInfo, missingParentInfos.size());
omBucketInfo.incrUsedNamespace(missingParentInfos.size());

// Add cache entries for the missing parent directories.
OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
volumeId, bucketId, transactionLogIndex,
missingParentInfos, null);
}

@Override
protected void addMultiPartToCache(
OMMetadataManager omMetadataManager, String multipartOpenKey,
OMFileRequest.OMPathInfoWithFSO pathInfoFSO, OmKeyInfo omKeyInfo,
long transactionLogIndex
) throws IOException {

// Add multi part to cache
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(),
transactionLogIndex);

}


@Override
protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneFileKey,
String keyName, OMMetadataManager omMetadataManager) throws IOException {
Expand Down Expand Up @@ -147,11 +180,13 @@ protected OMClientResponse getOmClientResponse(String multipartKey,
OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
String dbMultipartOpenKey, OmKeyInfo omKeyInfo,
List<OmKeyInfo> allKeyInfoToRemove, OmBucketInfo omBucketInfo,
long volumeId, long bucketId) {
long volumeId, long bucketId, List<OmDirectoryInfo> missingParentInfos,
OmMultipartKeyInfo multipartKeyInfo) {

return new S3MultipartUploadCompleteResponseWithFSO(omResponse.build(),
multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove,
getBucketLayout(), omBucketInfo, volumeId, bucketId);
getBucketLayout(), omBucketInfo, volumeId, bucketId,
missingParentInfos, multipartKeyInfo);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,4 +129,12 @@ protected String addToKeyTable(OMMetadataManager omMetadataManager,
protected OmKeyInfo getOmKeyInfo() {
return omKeyInfo;
}

protected OmBucketInfo getOmBucketInfo() {
return omBucketInfo;
}

protected String getMultiPartKey() {
return multipartKey;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
Expand Down Expand Up @@ -52,6 +54,10 @@ public class S3MultipartUploadCompleteResponseWithFSO
private long volumeId;
private long bucketId;

private List<OmDirectoryInfo> missingParentInfos;

private OmMultipartKeyInfo multipartKeyInfo;

@SuppressWarnings("checkstyle:ParameterNumber")
public S3MultipartUploadCompleteResponseWithFSO(
@Nonnull OMResponse omResponse,
Expand All @@ -61,11 +67,15 @@ public S3MultipartUploadCompleteResponseWithFSO(
@Nonnull List<OmKeyInfo> allKeyInfoToRemove,
@Nonnull BucketLayout bucketLayout,
OmBucketInfo omBucketInfo,
@Nonnull long volumeId, @Nonnull long bucketId) {
@Nonnull long volumeId, @Nonnull long bucketId,
List<OmDirectoryInfo> missingParentInfos,
OmMultipartKeyInfo multipartKeyInfo) {
super(omResponse, multipartKey, multipartOpenKey, omKeyInfo,
allKeyInfoToRemove, bucketLayout, omBucketInfo);
this.volumeId = volumeId;
this.bucketId = bucketId;
this.missingParentInfos = missingParentInfos;
this.multipartKeyInfo = multipartKeyInfo;
}

/**
Expand All @@ -78,6 +88,39 @@ public S3MultipartUploadCompleteResponseWithFSO(
checkStatusNotOK();
}

@Override
public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {
if (missingParentInfos != null) {
// Create missing parent directory entries.
for (OmDirectoryInfo parentDirInfo : missingParentInfos) {
final String parentKey = omMetadataManager.getOzonePathKey(
volumeId, bucketId, parentDirInfo.getParentObjectID(),
parentDirInfo.getName());
omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
parentKey, parentDirInfo);
}

// namespace quota changes for parent directory
String bucketKey = omMetadataManager.getBucketKey(
getOmBucketInfo().getVolumeName(),
getOmBucketInfo().getBucketName());
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
bucketKey, getOmBucketInfo());

if (OMFileRequest.getOmKeyInfoFromFileTable(true,
omMetadataManager, getMultiPartKey(), getOmKeyInfo().getKeyName())
!= null) {
// Add multi part to open key table.
OMFileRequest.addToOpenFileTableForMultipart(omMetadataManager,
batchOperation,
getOmKeyInfo(), multipartKeyInfo.getUploadID(), volumeId,
bucketId);
}
}
super.addToDBBatch(omMetadataManager, batchOperation);
}

@Override
protected String addToKeyTable(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,8 @@ public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseFSO(

String multipartKey = omMetadataManager
.getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
OmMultipartKeyInfo multipartKeyInfo = omMetadataManager
.getMultipartInfoTable().get(multipartKey);

final long volumeId = omMetadataManager.getVolumeId(volumeName);
final long bucketId = omMetadataManager.getBucketId(volumeName,
Expand All @@ -324,7 +326,8 @@ public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseFSO(

return new S3MultipartUploadCompleteResponseWithFSO(omResponse,
multipartKey, multipartOpenKey, omKeyInfo, allKeyInfoToRemove,
getBucketLayout(), omBucketInfo, volumeId, bucketId);
getBucketLayout(), omBucketInfo, volumeId, bucketId, null,
multipartKeyInfo);
}

protected S3InitiateMultipartUploadResponse getS3InitiateMultipartUploadResp(
Expand Down

0 comments on commit c1b27a8

Please sign in to comment.