Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HDDS-11554. OMDBDefinition should be singleton. #7292

Merged
merged 3 commits into from
Oct 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.ratis.util.MemoizedSupplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -28,6 +29,9 @@
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;

/**
* Simple interface to provide information to create a DBStore..
Expand Down Expand Up @@ -55,6 +59,16 @@ default File getDBLocation(ConfigurationSource conf) {
getLocationConfigKey(), getName());
}

static List<String> getColumnFamilyNames(Iterable<DBColumnFamilyDefinition<?, ?>> columnFamilies) {
return Collections.unmodifiableList(StreamSupport.stream(columnFamilies.spliterator(), false)
.map(DBColumnFamilyDefinition::getName)
.collect(Collectors.toList()));
}

default List<String> getColumnFamilyNames() {
return getColumnFamilyNames(getColumnFamilies());
}

/**
* @return The column families present in the DB.
*/
Expand Down Expand Up @@ -109,9 +123,17 @@ interface WithMapInterface extends DBDefinition {
*/
abstract class WithMap implements WithMapInterface {
private final Map<String, DBColumnFamilyDefinition<?, ?>> map;
private final Supplier<List<String>> columnFamilyNames;

protected WithMap(Map<String, DBColumnFamilyDefinition<?, ?>> map) {
this.map = map;
this.columnFamilyNames = MemoizedSupplier.valueOf(
() -> DBDefinition.getColumnFamilyNames(getColumnFamilies()));
}

@Override
public final List<String> getColumnFamilyNames() {
return columnFamilyNames.get();
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
/**
* Class defines the structure and types of the om.db.
*/
public class OMDBDefinition extends DBDefinition.WithMap {
public final class OMDBDefinition extends DBDefinition.WithMap {

public static final DBColumnFamilyDefinition<String, RepeatedOmKeyInfo>
DELETED_TABLE =
Expand Down Expand Up @@ -284,7 +284,13 @@ public class OMDBDefinition extends DBDefinition.WithMap {
USER_TABLE,
VOLUME_TABLE);

public OMDBDefinition() {
private static final OMDBDefinition INSTANCE = new OMDBDefinition();

public static OMDBDefinition get() {
return INSTANCE;
}

private OMDBDefinition() {
super(COLUMN_FAMILIES);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.S3SecretManager;
import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
Expand Down Expand Up @@ -478,10 +477,7 @@ private void addCleanupEntry(Entry entry, Map<String, List<Long>> cleanupEpochs)
if (cleanupTableInfo != null) {
final List<String> cleanupTables;
if (cleanupTableInfo.cleanupAll()) {
cleanupTables = new OMDBDefinition().getColumnFamilies()
.stream()
.map(DBColumnFamilyDefinition::getName)
.collect(Collectors.toList());
cleanupTables = OMDBDefinition.get().getColumnFamilyNames();
} else {
cleanupTables = Arrays.asList(cleanupTableInfo.cleanupTables());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -439,8 +439,7 @@ public static Status exceptionToResponseStatus(Exception exception) {
*/
public static TransactionInfo getTrxnInfoFromCheckpoint(
OzoneConfiguration conf, Path dbPath) throws Exception {
return HAUtils
.getTrxnInfoFromCheckpoint(conf, dbPath, new OMDBDefinition());
return HAUtils.getTrxnInfoFromCheckpoint(conf, dbPath, OMDBDefinition.get());
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,10 @@ public void testDBDefinition() throws Exception {
OzoneConfiguration configuration = new OzoneConfiguration();
File metaDir = folder.toFile();
DBStore store = OmMetadataManagerImpl.loadDB(configuration, metaDir);
OMDBDefinition dbDef = new OMDBDefinition();

// Get list of tables from DB Definitions
final Collection<DBColumnFamilyDefinition<?, ?>> columnFamilyDefinitions
= dbDef.getColumnFamilies();
= OMDBDefinition.get().getColumnFamilies();
final int countOmDefTables = columnFamilyDefinitions.size();
ArrayList<String> missingDBDefTables = new ArrayList<>();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,14 +49,12 @@ public class OMDBUpdatesHandler extends ManagedWriteBatch.Handler {
private OMMetadataManager omMetadataManager;
private List<OMDBUpdateEvent> omdbUpdateEvents = new ArrayList<>();
private Map<String, Map<Object, OMDBUpdateEvent>> omdbLatestUpdateEvents = new HashMap<>();
private OMDBDefinition omdbDefinition;
private OmUpdateEventValidator omUpdateEventValidator;
private final OMDBDefinition omdbDefinition = OMDBDefinition.get();
private final OmUpdateEventValidator omUpdateEventValidator = new OmUpdateEventValidator(omdbDefinition);

public OMDBUpdatesHandler(OMMetadataManager metadataManager) {
omMetadataManager = metadataManager;
tablesNames = metadataManager.getStore().getTableNames();
omdbDefinition = new OMDBDefinition();
omUpdateEventValidator = new OmUpdateEventValidator(omdbDefinition);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ public class TestOMDBUpdatesHandler {

private OMMetadataManager omMetadataManager;
private OMMetadataManager reconOmMetadataManager;
private OMDBDefinition omdbDefinition = new OMDBDefinition();
private final OMDBDefinition omdbDefinition = OMDBDefinition.get();
private Random random = new Random();

private OzoneConfiguration createNewTestPath(String folderName)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@
public class TestOmUpdateEventValidator {

private OmUpdateEventValidator eventValidator;
private OMDBDefinition omdbDefinition;
private final OMDBDefinition omdbDefinition = OMDBDefinition.get();
private OMMetadataManager omMetadataManager;
private Logger logger;
@TempDir
Expand All @@ -63,11 +63,10 @@ public class TestOmUpdateEventValidator {
public void setUp() throws IOException {
omMetadataManager = initializeNewOmMetadataManager(
temporaryFolder.toFile());
omdbDefinition = new OMDBDefinition();
eventValidator = new OmUpdateEventValidator(omdbDefinition);
// Create a mock logger
logger = mock(Logger.class);
eventValidator.setLogger(logger);
OmUpdateEventValidator.setLogger(logger);
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@

import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;

import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
Expand Down Expand Up @@ -48,28 +51,23 @@ public final class DBDefinitionFactory {
private DBDefinitionFactory() {
}

private static HashMap<String, DBDefinition> dbMap;

private static String dnDBSchemaVersion;
private static final AtomicReference<String> DATANODE_DB_SCHEMA_VERSION = new AtomicReference<>();
private static final Map<String, DBDefinition> DB_MAP;

static {
dbMap = new HashMap<>();
Arrays.asList(
new SCMDBDefinition(),
new OMDBDefinition(),
new ReconSCMDBDefinition()
).forEach(dbDefinition -> dbMap.put(dbDefinition.getName(), dbDefinition));
final Map<String, DBDefinition> map = new HashMap<>();
Arrays.asList(new SCMDBDefinition(), OMDBDefinition.get(), new ReconSCMDBDefinition())
.forEach(dbDefinition -> map.put(dbDefinition.getName(), dbDefinition));
DB_MAP = Collections.unmodifiableMap(map);
}

public static DBDefinition getDefinition(String dbName) {
// OM snapshot DB name starts with this prefix.
if (!dbName.equals(OM_DB_NAME) && dbName.startsWith(OM_DB_NAME)) {
dbName = OM_DB_NAME;
}
if (dbMap.containsKey(dbName)) {
return dbMap.get(dbName);
}
return getReconDBDefinition(dbName);
final DBDefinition definition = DB_MAP.get(dbName);
return definition != null ? definition : getReconDBDefinition(dbName);
}

public static DBDefinition getDefinition(Path dbPath,
Expand All @@ -83,7 +81,7 @@ public static DBDefinition getDefinition(Path dbPath,
}
String dbName = fileName.toString();
if (dbName.endsWith(OzoneConsts.CONTAINER_DB_SUFFIX)) {
switch (dnDBSchemaVersion) {
switch (DATANODE_DB_SCHEMA_VERSION.get()) {
case "V1":
return new DatanodeSchemaOneDBDefinition(
dbPath.toAbsolutePath().toString(), config);
Expand All @@ -102,12 +100,12 @@ private static DBDefinition getReconDBDefinition(String dbName) {
if (dbName.startsWith(RECON_CONTAINER_KEY_DB)) {
return new ReconDBDefinition(dbName);
} else if (dbName.startsWith(RECON_OM_SNAPSHOT_DB)) {
return new OMDBDefinition();
return OMDBDefinition.get();
}
return null;
}

public static void setDnDBSchemaVersion(String dnDBSchemaVersion) {
DBDefinitionFactory.dnDBSchemaVersion = dnDBSchemaVersion;
DATANODE_DB_SCHEMA_VERSION.set(dnDBSchemaVersion);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

package org.apache.hadoop.ozone.debug;

import java.nio.file.Path;
import java.nio.file.Paths;

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
Expand All @@ -43,8 +44,7 @@ public class TestDBDefinitionFactory {

@Test
public void testGetDefinition() {
DBDefinition definition =
DBDefinitionFactory.getDefinition(new OMDBDefinition().getName());
DBDefinition definition = DBDefinitionFactory.getDefinition(OMDBDefinition.get().getName());
assertInstanceOf(OMDBDefinition.class, definition);

definition = DBDefinitionFactory.getDefinition(
Expand All @@ -62,20 +62,19 @@ public void testGetDefinition() {
definition = DBDefinitionFactory.getDefinition(
RECON_CONTAINER_KEY_DB + "_1");
assertInstanceOf(ReconDBDefinition.class, definition);

DBDefinitionFactory.setDnDBSchemaVersion("V2");
definition =
DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"),
new OzoneConfiguration());
final Path dbPath = Paths.get("/tmp/test-container.db");
final OzoneConfiguration conf = new OzoneConfiguration();
definition = DBDefinitionFactory.getDefinition(dbPath, conf);
assertInstanceOf(DatanodeSchemaTwoDBDefinition.class, definition);

DBDefinitionFactory.setDnDBSchemaVersion("V1");
definition =
DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"),
new OzoneConfiguration());
definition = DBDefinitionFactory.getDefinition(dbPath, conf);
assertInstanceOf(DatanodeSchemaOneDBDefinition.class, definition);

DBDefinitionFactory.setDnDBSchemaVersion("V3");
definition =
DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"),
new OzoneConfiguration());
definition = DBDefinitionFactory.getDefinition(dbPath, conf);
assertInstanceOf(DatanodeSchemaThreeDBDefinition.class, definition);
}
}