Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions api/src/main/java/org/apache/iceberg/BatchScanAdapter.java
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@
import org.apache.iceberg.metrics.MetricsReporter;

/** An adapter that allows using {@link TableScan} as {@link BatchScan}. */
class BatchScanAdapter implements BatchScan {
public class BatchScanAdapter implements BatchScan {

private final TableScan scan;

BatchScanAdapter(TableScan scan) {
public BatchScanAdapter(TableScan scan) {
this.scan = scan;
}

Expand Down
7 changes: 7 additions & 0 deletions core/src/main/java/org/apache/iceberg/rest/RESTTable.java
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
import java.util.Set;
import java.util.function.Supplier;
import org.apache.iceberg.BaseTable;
import org.apache.iceberg.BatchScan;
import org.apache.iceberg.BatchScanAdapter;
import org.apache.iceberg.ImmutableTableScanContext;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.TableScan;
Expand Down Expand Up @@ -67,4 +69,9 @@ public TableScan newScan() {
resourcePaths,
supportedEndpoints);
}

@Override
public BatchScan newBatchScan() {
return new BatchScanAdapter(newScan());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import static org.apache.iceberg.TestBase.SPEC;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.assertj.core.api.Assumptions.assumeThat;

import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
Expand Down Expand Up @@ -58,6 +57,7 @@
import org.apache.iceberg.MetadataTableUtils;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Scan;
import org.apache.iceberg.ScanTask;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableScan;
import org.apache.iceberg.catalog.Namespace;
Expand Down Expand Up @@ -348,6 +348,25 @@ void scanPlanningWithAllTasksInSingleResponse(
}
}

@ParameterizedTest
@EnumSource(PlanningMode.class)
void scanPlanningWithBatchScan(
Function<TestPlanningBehavior.Builder, TestPlanningBehavior.Builder> planMode)
throws IOException {
configurePlanningBehavior(planMode);
Table table = restTableFor(scanPlanningCatalog(), "batch_scan_table");
setParserContext(table);

// Verify actual data file is returned with correct count
try (CloseableIterable<ScanTask> iterable = table.newBatchScan().planFiles()) {
List<ScanTask> tasks = Lists.newArrayList(iterable);

assertThat(tasks).hasSize(1);
assertThat(tasks.get(0).asFileScanTask().file().location()).isEqualTo(FILE_A.location());
assertThat(tasks.get(0).asFileScanTask().deletes()).isEmpty();
}
}

@Test
public void nestedPlanTaskPagination() throws IOException {
// Configure: synchronous planning with very small pages (creates nested plan task structure)
Expand Down Expand Up @@ -409,11 +428,7 @@ public void iterableCloseTriggersCancel() throws IOException {

@ParameterizedTest
@EnumSource(MetadataTableType.class)
public void metadataTablesWithRemotePlanning(MetadataTableType type) throws IOException {
assumeThat(type)
.as("POSITION_DELETES table does not implement newScan() method")
.isNotEqualTo(MetadataTableType.POSITION_DELETES);

public void metadataTablesWithRemotePlanning(MetadataTableType type) {
configurePlanningBehavior(TestPlanningBehavior.Builder::synchronous);
RESTTable table = restTableFor(scanPlanningCatalog(), "metadata_tables_test");
table.newAppend().appendFile(FILE_B).commit();
Expand All @@ -424,7 +439,12 @@ public void metadataTablesWithRemotePlanning(MetadataTableType type) throws IOEx
// tasks, this test just verifies that metadata tables can be scanned with RESTTable.
Table metadataTableInstance = MetadataTableUtils.createMetadataTableInstance(table, type);
assertThat(metadataTableInstance).isNotNull();
assertThat(metadataTableInstance.newScan().planFiles()).isNotEmpty();
if (type.equals(MetadataTableType.POSITION_DELETES)) {
// Position deletes table only uses batch scan
assertThat(metadataTableInstance.newBatchScan().planFiles()).isNotEmpty();
} else {
assertThat(metadataTableInstance.newScan().planFiles()).isNotEmpty();
}
}

@ParameterizedTest
Expand Down