Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1190,6 +1190,15 @@ public class ConfigOptions {
+ "The `table.datalake.format` can be pre-defined before enabling `table.datalake.enabled`. This allows the data lake feature to be dynamically enabled on the table without requiring table recreation. "
+ "If `table.datalake.format` is not explicitly set during table creation, the table will default to the format specified by the `datalake.format` configuration in the Fluss cluster.");

public static final ConfigOption<Duration> TABLE_DATALAKE_FRESHNESS =
key("table.datalake.freshness")
.durationType()
.defaultValue(Duration.ofMinutes(3))
.withDescription(
"It defines the maximum amount of time that the datalake table's content should lag behind updates to the Fluss table. "
+ "Based on this target freshness, the Fluss service automatically moves data from the Fluss table and updates to the datalake table, so that the data in the datalake table is kept up to date within this target. "
+ "If the data does not need to be as fresh, you can specify a longer target freshness time to reduce costs.");

public static final ConfigOption<MergeEngineType> TABLE_MERGE_ENGINE =
key("table.merge-engine")
.enumType(MergeEngineType.class)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import com.alibaba.fluss.metadata.MergeEngineType;
import com.alibaba.fluss.utils.AutoPartitionStrategy;

import java.time.Duration;
import java.util.Optional;

/**
Expand Down Expand Up @@ -84,6 +85,14 @@ public Optional<DataLakeFormat> getDataLakeFormat() {
return config.getOptional(ConfigOptions.TABLE_DATALAKE_FORMAT);
}

/**
* Gets the data lake freshness of the table. It defines the maximum amount of time that the
* datalake table's content should lag behind updates to the Fluss table.
*/
public Duration getDataLakeFreshness() {
return config.get(ConfigOptions.TABLE_DATALAKE_FRESHNESS);
}

/** Gets the optional merge engine type of the table. */
public Optional<MergeEngineType> getMergeEngineType() {
return config.getOptional(ConfigOptions.TABLE_MERGE_ENGINE);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
/*
* Copyright (c) 2025 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.alibaba.fluss.exception;

/** Exception thrown when the tiering epoch is invalid. */
public class FencedTieringEpochException extends ApiException {

public FencedTieringEpochException(String message) {
super(message);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import com.alibaba.fluss.exception.DatabaseNotExistException;
import com.alibaba.fluss.exception.DuplicateSequenceException;
import com.alibaba.fluss.exception.FencedLeaderEpochException;
import com.alibaba.fluss.exception.FencedTieringEpochException;
import com.alibaba.fluss.exception.InvalidColumnProjectionException;
import com.alibaba.fluss.exception.InvalidConfigException;
import com.alibaba.fluss.exception.InvalidCoordinatorException;
Expand Down Expand Up @@ -199,7 +200,9 @@ public enum Errors {
SECURITY_DISABLED_EXCEPTION(47, "Security is disabled.", SecurityDisabledException::new),
AUTHORIZATION_EXCEPTION(48, "Authorization failed", AuthorizationException::new),
BUCKET_MAX_NUM_EXCEPTION(
49, "Exceed the maximum number of buckets", TooManyBucketsException::new);
49, "Exceed the maximum number of buckets", TooManyBucketsException::new),
FENCED_TIERING_EPOCH_EXCEPTION(
50, "The tiering epoch is invalid.", FencedTieringEpochException::new);

private static final Logger LOG = LoggerFactory.getLogger(Errors.class);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ public class CoordinatorEventProcessor implements EventProcessor {
private final MetadataManager metadataManager;
private final TableManager tableManager;
private final AutoPartitionManager autoPartitionManager;
private final LakeTableTieringManager lakeTableTieringManager;
private final TableChangeWatcher tableChangeWatcher;
private final CoordinatorChannelManager coordinatorChannelManager;
private final TabletServerChangeWatcher tabletServerChangeWatcher;
Expand Down Expand Up @@ -151,6 +152,7 @@ public CoordinatorEventProcessor(
ServerMetadataCache serverMetadataCache,
CoordinatorChannelManager coordinatorChannelManager,
AutoPartitionManager autoPartitionManager,
LakeTableTieringManager lakeTableTieringManager,
CoordinatorMetricGroup coordinatorMetricGroup,
Configuration conf,
ExecutorService ioExecutor) {
Expand All @@ -160,6 +162,7 @@ public CoordinatorEventProcessor(
coordinatorChannelManager,
new CoordinatorContext(),
autoPartitionManager,
lakeTableTieringManager,
coordinatorMetricGroup,
conf,
ioExecutor);
Expand All @@ -171,6 +174,7 @@ public CoordinatorEventProcessor(
CoordinatorChannelManager coordinatorChannelManager,
CoordinatorContext coordinatorContext,
AutoPartitionManager autoPartitionManager,
LakeTableTieringManager lakeTableTieringManager,
CoordinatorMetricGroup coordinatorMetricGroup,
Configuration conf,
ExecutorService ioExecutor) {
Expand Down Expand Up @@ -211,6 +215,7 @@ public CoordinatorEventProcessor(
ioExecutor,
zooKeeperClient);
this.autoPartitionManager = autoPartitionManager;
this.lakeTableTieringManager = lakeTableTieringManager;
this.coordinatorMetricGroup = coordinatorMetricGroup;
this.internalListenerName = conf.getString(ConfigOptions.INTERNAL_LISTENER_NAME);
registerMetrics();
Expand Down Expand Up @@ -326,13 +331,18 @@ private void initCoordinatorContext() throws Exception {

// load all tables
List<TableInfo> autoPartitionTables = new ArrayList<>();
List<Tuple2<TableInfo, Long>> lakeTables = new ArrayList<>();
for (String database : metadataManager.listDatabases()) {
for (String tableName : metadataManager.listTables(database)) {
TablePath tablePath = TablePath.of(database, tableName);
TableInfo tableInfo = metadataManager.getTable(tablePath);
coordinatorContext.putTablePath(tableInfo.getTableId(), tablePath);
coordinatorContext.putTableInfo(tableInfo);

if (tableInfo.getTableConfig().isDataLakeEnabled()) {
// always set to current time,
// todo: should get from the last lake snapshot
lakeTables.add(Tuple2.of(tableInfo, System.currentTimeMillis()));
}
if (tableInfo.isPartitioned()) {
Map<String, Long> partitions =
zooKeeperClient.getPartitionNameAndIds(tablePath);
Expand All @@ -351,6 +361,7 @@ private void initCoordinatorContext() throws Exception {
}
}
autoPartitionManager.initAutoPartitionTables(autoPartitionTables);
lakeTableTieringManager.initWithLakeTables(lakeTables);

// load all assignment
loadTableAssignment();
Expand Down Expand Up @@ -550,6 +561,9 @@ private void processCreateTable(CreateTableEvent createTableEvent) {
if (createTableEvent.isAutoPartitionTable()) {
autoPartitionManager.addAutoPartitionTable(tableInfo, true);
}
if (tableInfo.getTableConfig().isDataLakeEnabled()) {
lakeTableTieringManager.addNewLakeTable(tableInfo);
}
}

private void processCreatePartition(CreatePartitionEvent createPartitionEvent) {
Expand Down Expand Up @@ -585,6 +599,9 @@ private void processDropTable(DropTableEvent dropTableEvent) {
if (dropTableEvent.isAutoPartitionTable()) {
autoPartitionManager.removeAutoPartitionTable(dropTableEvent.getTableId());
}
if (dropTableEvent.isDataLakeEnabled()) {
lakeTableTieringManager.removeLakeTable(dropTableEvent.getTableId());
}
}

private void processDropPartition(DropPartitionEvent dropPartitionEvent) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,9 @@ public class CoordinatorServer extends ServerBase {
@GuardedBy("lock")
private AutoPartitionManager autoPartitionManager;

@GuardedBy("lock")
private LakeTableTieringManager lakeTableTieringManager;

@GuardedBy("lock")
private ExecutorService ioExecutor;

Expand Down Expand Up @@ -172,6 +175,8 @@ protected void startServices() throws Exception {
authorizer.startup();
}

this.lakeTableTieringManager = new LakeTableTieringManager();

MetadataManager metadataManager = new MetadataManager(zkClient, conf);
this.coordinatorService =
new CoordinatorService(
Expand Down Expand Up @@ -221,6 +226,7 @@ protected void startServices() throws Exception {
metadataCache,
coordinatorChannelManager,
autoPartitionManager,
lakeTableTieringManager,
serverMetricGroup,
conf,
ioExecutor);
Expand Down Expand Up @@ -366,6 +372,14 @@ CompletableFuture<Void> stopServices() {
exception = ExceptionUtils.firstOrSuppressed(t, exception);
}

try {
if (lakeTableTieringManager != null) {
lakeTableTieringManager.close();
}
} catch (Throwable t) {
exception = ExceptionUtils.firstOrSuppressed(t, exception);
}

try {
if (zkClient != null) {
zkClient.close();
Expand Down
Loading