diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java index b2f2ae56f7df96..947d7f06109612 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java @@ -77,6 +77,8 @@ import java.util.Comparator; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import java.util.zip.GZIPInputStream; @@ -132,6 +134,14 @@ public enum BackupJobState { @SerializedName("prop") private Map properties = Maps.newHashMap(); + // Record table IDs that were dropped during backup + @SerializedName("dt") + private Set droppedTables = ConcurrentHashMap.newKeySet(); + + // Record partition IDs that were dropped during backup (tableId -> set of partitionIds) + @SerializedName("dp") + private Map> droppedPartitionsByTable = Maps.newConcurrentMap(); + private long commitSeq = 0; public BackupJob() { @@ -235,6 +245,39 @@ private synchronized boolean tryNewTabletSnapshotTask(SnapshotTask task) { return true; } + private boolean handleTabletMissing(SnapshotTask task) { + LOG.info("handleTabletMissing task: {}", task); + Table table = env.getInternalCatalog().getTableByTableId(task.getTableId()); + if (table == null) { + // Table was dropped (including cases where database was dropped) + droppedTables.add(task.getTableId()); + LOG.info("table {} marked as dropped during backup. {}", task.getTableId(), this); + return true; + } + + if (!(table instanceof OlapTable)) { + return false; + } + + OlapTable olapTable = (OlapTable) table; + olapTable.readLock(); + try { + Partition partition = olapTable.getPartition(task.getPartitionId()); + if (partition == null) { + // Partition was dropped or truncated (partition ID changed) + droppedPartitionsByTable.computeIfAbsent(task.getTableId(), k -> ConcurrentHashMap.newKeySet()) + .add(task.getPartitionId()); + LOG.info("partition {} from table {} marked as dropped during backup (dropped or truncated). {}", + task.getPartitionId(), task.getTableId(), this); + return true; + } + + // If partition still exists, tablet missing is caused by other reasons + return false; + } finally { + olapTable.readUnlock(); + } + } public synchronized boolean finishTabletSnapshotTask(SnapshotTask task, TFinishTaskRequest request) { Preconditions.checkState(task.getJobId() == jobId); @@ -249,11 +292,21 @@ public synchronized boolean finishTabletSnapshotTask(SnapshotTask task, TFinishT cancelInternal(); } - if (request.getTaskStatus().getStatusCode() == TStatusCode.TABLET_MISSING - && !tryNewTabletSnapshotTask(task)) { - status = new Status(ErrCode.NOT_FOUND, - "make snapshot failed, failed to ge tablet, table will be dropped or truncated"); - cancelInternal(); + if (request.getTaskStatus().getStatusCode() == TStatusCode.TABLET_MISSING) { + if (handleTabletMissing(task)) { + // Successfully handled drop case, remove from task queue + taskProgress.remove(task.getSignature()); + taskErrMsg.remove(task.getSignature()); + Long oldValue = unfinishedTaskIds.remove(task.getSignature()); + return oldValue != null; + } else { + // Not caused by drop, follow original logic + if (!tryNewTabletSnapshotTask(task)) { + status = new Status(ErrCode.NOT_FOUND, + "make snapshot failed, failed to get tablet, table will be dropped or truncated"); + cancelInternal(); + } + } } if (request.getTaskStatus().getStatusCode() == TStatusCode.NOT_IMPLEMENTED_ERROR) { @@ -498,13 +551,18 @@ private void prepareAndSendSnapshotTask() { List copiedTables = Lists.newArrayList(); List copiedResources = Lists.newArrayList(); AgentBatchTask batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); + // Track if we have any valid tables for backup + boolean hasValidTables = false; for (TableRef tableRef : tableRefs) { String tblName = tableRef.getName().getTbl(); Table tbl = db.getTableNullable(tblName); if (tbl == null) { - status = new Status(ErrCode.NOT_FOUND, "table " + tblName + " does not exist"); - return; + // Table was dropped, skip it and continue with other tables + LOG.info("table {} does not exist, it was dropped during backup preparation, skip it. {}", + tblName, this); + continue; } + hasValidTables = true; tbl.readLock(); try { switch (tbl.getType()) { @@ -538,7 +596,11 @@ private void prepareAndSendSnapshotTask() { return; } } - + // If no valid tables found, cancel the job + if (!hasValidTables) { + status = new Status(ErrCode.NOT_FOUND, "no valid tables found for backup"); + return; + } // Limit the max num of tablets involved in a backup job, to avoid OOM. if (unfinishedTaskIds.size() > Config.max_backup_tablets_per_job) { String msg = String.format("the num involved tablets %d exceeds the limit %d, " @@ -825,6 +887,43 @@ private void waitingAllUploadingFinished() { } } + private void cleanupDroppedTablesAndPartitions() { + if (backupMeta == null) { + return; + } + + // Remove dropped partitions first (before removing tables) + for (Map.Entry> entry : droppedPartitionsByTable.entrySet()) { + Long tableId = entry.getKey(); + Set droppedPartitionIds = entry.getValue(); + + Table table = backupMeta.getTable(tableId); + if (table instanceof OlapTable) { + OlapTable olapTable = (OlapTable) table; + + // Directly get partitions by ID instead of iterating all partitions + for (Long droppedPartitionId : droppedPartitionIds) { + Partition partition = olapTable.getPartition(droppedPartitionId); + if (partition != null) { + LOG.info("remove dropped partition {} from table {} (id: {}) in backup meta. {}", + partition.getName(), table.getName(), tableId, this); + olapTable.dropPartitionAndReserveTablet(partition.getName()); + } + } + } + } + + // Remove dropped tables after processing partitions + for (Long tableId : droppedTables) { + Table removedTable = backupMeta.getTable(tableId); + if (removedTable != null) { + LOG.info("remove dropped table {} (id: {}) from backup meta. {}", + removedTable.getName(), tableId, this); + backupMeta.removeTable(tableId); + } + } + } + private void saveMetaInfo(boolean replay) { String createTimeStr = TimeUtils.longToTimeString(createTime, TimeUtils.getDatetimeFormatWithHyphenWithTimeZone()); @@ -846,7 +945,10 @@ private void saveMetaInfo(boolean replay) { return; } - // 2. save meta info file + // 2. Clean up dropped tables and partitions from backup metadata + cleanupDroppedTablesAndPartitions(); + + // 3. save meta info file File metaInfoFile = new File(jobDir, Repository.FILE_META_INFO); if (!metaInfoFile.createNewFile()) { status = new Status(ErrCode.COMMON_ERROR, @@ -856,7 +958,7 @@ private void saveMetaInfo(boolean replay) { backupMeta.writeToFile(metaInfoFile); localMetaInfoFilePath = metaInfoFile.getAbsolutePath(); - // 3. save job info file + // 4. save job info file Map tableCommitSeqMap = Maps.newHashMap(); // iterate properties, convert key, value from string to long // key is "${TABLE_COMMIT_SEQ_PREFIX}{tableId}", only need tableId to long @@ -869,8 +971,21 @@ private void saveMetaInfo(boolean replay) { tableCommitSeqMap.put(tableId, commitSeq); } } + // Filter out snapshot infos for dropped tables and partitions + Map filteredSnapshotInfos = Maps.newHashMap(); + for (Map.Entry entry : snapshotInfos.entrySet()) { + SnapshotInfo info = entry.getValue(); + boolean isDroppedTable = droppedTables.contains(info.getTblId()); + boolean isDroppedPartition = droppedPartitionsByTable.getOrDefault(info.getTblId(), + Collections.emptySet()).contains(info.getPartitionId()); + + if (!isDroppedTable && !isDroppedPartition) { + filteredSnapshotInfos.put(entry.getKey(), info); + } + } + jobInfo = BackupJobInfo.fromCatalog(createTime, label, dbName, dbId, - getContent(), backupMeta, snapshotInfos, tableCommitSeqMap); + getContent(), backupMeta, filteredSnapshotInfos, tableCommitSeqMap); if (LOG.isDebugEnabled()) { LOG.debug("job info: {}. {}", jobInfo, this); } @@ -903,6 +1018,10 @@ private void saveMetaInfo(boolean replay) { snapshotInfos.clear(); + // Clean up temporary records to reduce editlog size + droppedPartitionsByTable.clear(); + droppedTables.clear(); + // log env.getEditLog().logBackupJob(this); LOG.info("finished to save meta the backup job info file to local.[{}], [{}] {}", diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupMeta.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupMeta.java index 0f1a043bdada3b..850d6f92e83c0f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupMeta.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupMeta.java @@ -86,6 +86,15 @@ public Table getTable(Long tblId) { return tblIdMap.get(tblId); } + public boolean removeTable(Long tableId) { + Table removedTable = tblIdMap.remove(tableId); + if (removedTable != null) { + tblNameMap.remove(removedTable.getName()); + return true; + } + return false; + } + public static BackupMeta fromFile(String filePath, int metaVersion) throws IOException { return fromInputStream(new FileInputStream(filePath), metaVersion); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/BackupJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/backup/BackupJobTest.java index f067c8f00be964..13469f04df2e23 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/BackupJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/BackupJobTest.java @@ -25,6 +25,7 @@ import org.apache.doris.catalog.Env; import org.apache.doris.catalog.FsBroker; import org.apache.doris.catalog.OlapTable; +import org.apache.doris.catalog.Table; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Config; import org.apache.doris.common.FeConstants; @@ -76,6 +77,7 @@ public class BackupJobTest { private BackupJob job; private Database db; + private OlapTable table2; private long dbId = 1; private long tblId = 2; @@ -85,6 +87,12 @@ public class BackupJobTest { private long backendId = 10000; private long version = 6; + private long tblId2 = 3; + private long partId2 = 4; + private long idxId2 = 5; + private long tabletId2 = 6; + private String table2Name = "testTable2"; + private long repoId = 20000; private AtomicLong id = new AtomicLong(50000); @@ -157,6 +165,10 @@ public void setUp() { Deencapsulation.setField(env, "backupHandler", backupHandler); db = UnitTestUtil.createDb(dbId, tblId, partId, idxId, tabletId, backendId, version); + + // Create second table in setUp to avoid Env initialization issues + table2 = UnitTestUtil.createTable(db, tblId2, table2Name, partId2, idxId2, tabletId2, backendId, version); + catalog = Deencapsulation.newInstance(InternalCatalog.class); new Expectations(env) { { @@ -168,13 +180,26 @@ public void setUp() { minTimes = 0; result = db; + catalog.getTableByTableId(anyLong); + minTimes = 0; + result = new Delegate
() { + public Table getTableByTableId(Long tableId) { + // Check if table exists in the database + return db.getTableNullable(tableId); + } + }; + Env.getCurrentEnvJournalVersion(); minTimes = 0; result = FeConstants.meta_version; env.getNextId(); minTimes = 0; - result = id.getAndIncrement(); + result = new Delegate() { + public Long getNextId() { + return id.getAndIncrement(); + } + }; env.getEditLog(); minTimes = 0; @@ -214,6 +239,7 @@ Status getBrokerAddress(Long beId, Env env, List brokerAddrs) { } }; + // Only include first table to ensure other tests are not affected List tableRefs = Lists.newArrayList(); tableRefs.add(new TableRef( new TableName(InternalCatalog.INTERNAL_CATALOG_NAME, UnitTestUtil.DB_NAME, UnitTestUtil.TABLE_NAME), @@ -222,9 +248,20 @@ Status getBrokerAddress(Long beId, Env env, List brokerAddrs) { env, repo.getId(), 0); } + /** + * Test normal backup job execution flow + * + * Scenario: Backup a single table with all content + * Expected Results: + * 1. Job should progress through all states: PENDING -> SNAPSHOTING -> UPLOAD_SNAPSHOT -> UPLOADING -> SAVE_META -> UPLOAD_INFO -> FINISHED + * 2. Backup meta should contain the correct table information + * 3. Snapshot and upload tasks should be created and executed successfully + * 4. Meta files should be saved and uploaded correctly + * 5. Job should complete successfully with OK status + */ @Test public void testRunNormal() { - // 1.pending + // 1. pending Assert.assertEquals(BackupJobState.PENDING, job.getState()); job.run(); Assert.assertEquals(Status.OK, job.getStatus()); @@ -345,9 +382,18 @@ public void testRunNormal() { Assert.assertEquals(BackupJobState.FINISHED, job.getState()); } + /** + * Test backup job execution with non-existent table + * + * Scenario: Attempt to backup a table that does not exist + * Expected Results: + * 1. Job should fail with NOT_FOUND error code + * 2. Job state should be CANCELLED + * 3. No backup tasks should be created + */ @Test public void testRunAbnormal() { - // 1.pending + // 1. pending AgentTaskQueue.clearAllTasks(); List tableRefs = Lists.newArrayList(); @@ -361,6 +407,188 @@ public void testRunAbnormal() { Assert.assertEquals(BackupJobState.CANCELLED, job.getState()); } + /** + * Test backup job execution with mixed existing and non-existent tables + * + * Scenario: Backup two tables - one existing table and one non-existent table + * Expected Results: + * 1. Job should succeed and proceed to SNAPSHOTING state + * 2. Backup meta should only contain the existing table + * 3. Only snapshot tasks for the existing table should be created + * 4. Non-existent table should be skipped without causing job failure + */ + @Test + public void testRunAbnormalWithMixedTables() { + // Test backup two tables: one normal table and one non-existent table + // Verify backup succeeds, backs up the normal table, and skips the non-existent table + AgentTaskQueue.clearAllTasks(); + + List tableRefs = Lists.newArrayList(); + // Add normal table + tableRefs.add(new TableRef( + new TableName(InternalCatalog.INTERNAL_CATALOG_NAME, UnitTestUtil.DB_NAME, UnitTestUtil.TABLE_NAME), + null)); + // Add non-existent table + tableRefs.add( + new TableRef(new TableName(InternalCatalog.INTERNAL_CATALOG_NAME, UnitTestUtil.DB_NAME, "unknown_tbl"), + null)); + + job = new BackupJob("label", dbId, UnitTestUtil.DB_NAME, tableRefs, 13600 * 1000, BackupStmt.BackupContent.ALL, + env, repo.getId(), 0); + + // 1. pending + Assert.assertEquals(BackupJobState.PENDING, job.getState()); + job.run(); + Assert.assertEquals(Status.OK, job.getStatus()); + Assert.assertEquals(BackupJobState.SNAPSHOTING, job.getState()); + + // Verify backup meta only contains the normal table + BackupMeta backupMeta = job.getBackupMeta(); + Assert.assertEquals(1, backupMeta.getTables().size()); + OlapTable backupTbl = (OlapTable) backupMeta.getTable(UnitTestUtil.TABLE_NAME); + Assert.assertNotNull(backupTbl); + Assert.assertNull(backupMeta.getTable("unknown_tbl")); + + // Verify only snapshot tasks for the normal table are created + Assert.assertEquals(1, AgentTaskQueue.getTaskNum()); + AgentTask task = AgentTaskQueue.getTask(backendId, TTaskType.MAKE_SNAPSHOT, id.get() - 1); + Assert.assertTrue(task instanceof SnapshotTask); + SnapshotTask snapshotTask = (SnapshotTask) task; + Assert.assertEquals(tblId, snapshotTask.getTableId()); + Assert.assertEquals(dbId, snapshotTask.getDbId()); + Assert.assertEquals(partId, snapshotTask.getPartitionId()); + Assert.assertEquals(idxId, snapshotTask.getIndexId()); + Assert.assertEquals(tabletId, snapshotTask.getTabletId()); + } + + /** + * Test backup job execution when a table is dropped during SNAPSHOTING phase + * + * Scenario: Start backup with two normal tables, then drop one table during SNAPSHOTING phase + * Expected Results: + * 1. Job should start with two tables and create snapshot tasks for both + * 2. When one table is dropped during SNAPSHOTING, the dropped table should be marked as dropped + * 3. Backup should continue successfully with only the remaining table + * 4. Final backup meta should only contain the non-dropped table + * 5. Job should complete successfully with FINISHED state + */ + @Test + public void testRunWithTableDroppedDuringSnapshoting() { + try { + AgentTaskQueue.clearAllTasks(); + + List tableRefs = Lists.newArrayList(); + tableRefs.add(new TableRef( + new TableName(InternalCatalog.INTERNAL_CATALOG_NAME, UnitTestUtil.DB_NAME, UnitTestUtil.TABLE_NAME), + null)); + tableRefs.add(new TableRef( + new TableName(InternalCatalog.INTERNAL_CATALOG_NAME, UnitTestUtil.DB_NAME, table2Name), + null)); + + job = new BackupJob("label", dbId, UnitTestUtil.DB_NAME, tableRefs, 13600 * 1000, BackupStmt.BackupContent.ALL, + env, repo.getId(), 0); + + // 1. pending - should create snapshot tasks for both tables + Assert.assertEquals(BackupJobState.PENDING, job.getState()); + job.run(); + Assert.assertEquals(Status.OK, job.getStatus()); + Assert.assertEquals(BackupJobState.SNAPSHOTING, job.getState()); + + // Verify backup meta contains both tables initially + BackupMeta backupMeta = job.getBackupMeta(); + Assert.assertEquals(2, backupMeta.getTables().size()); + Assert.assertNotNull(backupMeta.getTable(UnitTestUtil.TABLE_NAME)); + Assert.assertNotNull(backupMeta.getTable(table2Name)); + + // Verify snapshot tasks are created for both tables + Assert.assertEquals(2, AgentTaskQueue.getTaskNum()); + + // 2. Simulate dropping the second table during SNAPSHOTING phase + db.unregisterTable(table2Name); + + // 3. Finish snapshot tasks + SnapshotTask taskForDroppedTable = null; + SnapshotTask taskForExistingTable = null; + long taskTabletId1 = AgentTaskQueue.getTask(backendId, TTaskType.MAKE_SNAPSHOT, id.get() - 2).getTabletId(); + if (taskTabletId1 == tabletId) { + taskForExistingTable = (SnapshotTask) AgentTaskQueue.getTask(backendId, TTaskType.MAKE_SNAPSHOT, id.get() - 2); + taskForDroppedTable = (SnapshotTask) AgentTaskQueue.getTask(backendId, TTaskType.MAKE_SNAPSHOT, id.get() - 1); + } else { + taskForDroppedTable = (SnapshotTask) AgentTaskQueue.getTask(backendId, TTaskType.MAKE_SNAPSHOT, id.get() - 2); + taskForExistingTable = (SnapshotTask) AgentTaskQueue.getTask(backendId, TTaskType.MAKE_SNAPSHOT, id.get() - 1); + } + + TBackend tBackend = new TBackend("", 0, 1); + + // Finish task for dropped table + TStatus taskStatusMissing = new TStatus(TStatusCode.TABLET_MISSING); + taskStatusMissing.setErrorMsgs(Lists.newArrayList("Tablet missing")); + TFinishTaskRequest requestMissing = new TFinishTaskRequest(tBackend, TTaskType.MAKE_SNAPSHOT, + taskForDroppedTable.getSignature(), taskStatusMissing); + Assert.assertTrue(job.finishTabletSnapshotTask(taskForDroppedTable, requestMissing)); + + // Finish task for existing table + String snapshotPath = "/path/to/snapshot"; + List snapshotFiles = Lists.newArrayList("1.dat", "1.idx", "1.hdr"); + TStatus taskStatusOK = new TStatus(TStatusCode.OK); + TFinishTaskRequest requestOK = new TFinishTaskRequest(tBackend, TTaskType.MAKE_SNAPSHOT, + taskForExistingTable.getSignature(), taskStatusOK); + requestOK.setSnapshotFiles(snapshotFiles); + requestOK.setSnapshotPath(snapshotPath); + Assert.assertTrue(job.finishTabletSnapshotTask(taskForExistingTable, requestOK)); + + // 4. Continue the backup process + job.run(); + Assert.assertEquals(Status.OK, job.getStatus()); + Assert.assertEquals(BackupJobState.UPLOAD_SNAPSHOT, job.getState()); + + AgentTaskQueue.clearAllTasks(); + job.run(); // UPLOAD_SNAPSHOT -> UPLOADING + Assert.assertEquals(1, AgentTaskQueue.getTaskNum()); + UploadTask upTask = (UploadTask) AgentTaskQueue.getTask(backendId, TTaskType.UPLOAD, id.get() - 1); + + // Finish upload task + Map> tabletFileMap = Maps.newHashMap(); + List tabletFiles = Lists.newArrayList(); + tabletFiles.add("1.dat.4f158689243a3d6030352fec3cfd3798"); + tabletFiles.add("1.idx.4f158689243a3d6030352fec3cfd3798"); + tabletFiles.add("1.hdr.4f158689243a3d6030352fec3cfd3798"); + tabletFileMap.put(taskForExistingTable.getTabletId(), tabletFiles); + TFinishTaskRequest requestUpload = new TFinishTaskRequest(tBackend, TTaskType.UPLOAD, + upTask.getSignature(), taskStatusOK); + requestUpload.setTabletFiles(tabletFileMap); + Assert.assertTrue(job.finishSnapshotUploadTask(upTask, requestUpload)); + + job.run(); // UPLOADING -> SAVE_META + Assert.assertEquals(BackupJobState.SAVE_META, job.getState()); + + job.run(); // SAVE_META -> UPLOAD_INFO + Assert.assertEquals(BackupJobState.UPLOAD_INFO, job.getState()); + + job.run(); // UPLOAD_INFO -> FINISHED + Assert.assertEquals(BackupJobState.FINISHED, job.getState()); + + } catch (Throwable e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } finally { + // Clean up: re-register the second table if it was removed + if (db.getTableNullable(table2Name) == null && table2 != null) { + db.registerTable(table2); + } + } + } + + /** + * Test backup job serialization and deserialization + * + * Scenario: Write backup job to file and read it back + * Expected Results: + * 1. Backup job should be successfully written to file + * 2. Backup job should be successfully read from file + * 3. All job properties should be preserved during serialization/deserialization + * 4. Temporary files should be cleaned up + */ @Test public void testSerialization() throws IOException, AnalysisException { // 1. Write objects to file diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/util/UnitTestUtil.java b/fe/fe-core/src/test/java/org/apache/doris/common/util/UnitTestUtil.java index d88d28c140aadb..e73ed0238a7636 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/util/UnitTestUtil.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/util/UnitTestUtil.java @@ -61,8 +61,14 @@ public class UnitTestUtil { public static Database createDb(long dbId, long tableId, long partitionId, long indexId, long tabletId, long backendId, long version) { - // Catalog.getCurrentInvertedIndex().clear(); + Database db = new Database(dbId, DB_NAME); + createTable(db, tableId, TABLE_NAME, partitionId, indexId, tabletId, backendId, version); + + return db; + } + public static OlapTable createTable(Database db, long tableId, String tableName, long partitionId, long indexId, + long tabletId, long backendId, long version) { // replica long replicaId = 0; Replica replica1 = new Replica(replicaId, backendId, ReplicaState.NORMAL, version, 0); @@ -74,7 +80,7 @@ public static Database createDb(long dbId, long tableId, long partitionId, long // index MaterializedIndex index = new MaterializedIndex(indexId, IndexState.NORMAL); - TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partitionId, indexId, 0, TStorageMedium.HDD); + TabletMeta tabletMeta = new TabletMeta(db.getId(), tableId, partitionId, indexId, 0, TStorageMedium.HDD); index.addTablet(tablet, tabletMeta); tablet.addReplica(replica1); @@ -110,17 +116,15 @@ public static Database createDb(long dbId, long tableId, long partitionId, long partitionInfo.setIsInMemory(partitionId, false); partitionInfo.setIsMutable(partitionId, true); partitionInfo.setTabletType(partitionId, TTabletType.TABLET_TYPE_DISK); - OlapTable table = new OlapTable(tableId, TABLE_NAME, columns, + OlapTable table = new OlapTable(tableId, tableName, columns, KeysType.AGG_KEYS, partitionInfo, distributionInfo); Deencapsulation.setField(table, "baseIndexId", indexId); table.addPartition(partition); - table.setIndexMeta(indexId, TABLE_NAME, columns, 0, SCHEMA_HASH, (short) 1, TStorageType.COLUMN, + table.setIndexMeta(indexId, tableName, columns, 0, SCHEMA_HASH, (short) 1, TStorageType.COLUMN, KeysType.AGG_KEYS); - // db - Database db = new Database(dbId, DB_NAME); db.registerTable(table); - return db; + return table; } public static Backend createBackend(long id, String host, int heartPort, int bePort, int httpPort) {