-
Notifications
You must be signed in to change notification settings - Fork 57
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
SNOW-902709 Limit the max allowed number of chunks in blob
- Loading branch information
1 parent
17d029e
commit c6dd85c
Showing
7 changed files
with
456 additions
and
48 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
99 changes: 99 additions & 0 deletions
99
src/test/java/net/snowflake/ingest/streaming/internal/ManyTablesIT.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
package net.snowflake.ingest.streaming.internal; | ||
|
||
import static net.snowflake.ingest.utils.Constants.ROLE; | ||
|
||
import java.sql.Connection; | ||
import java.sql.ResultSet; | ||
import java.sql.SQLException; | ||
import java.util.Collections; | ||
import java.util.Map; | ||
import java.util.Properties; | ||
import net.snowflake.ingest.TestUtils; | ||
import net.snowflake.ingest.streaming.OpenChannelRequest; | ||
import net.snowflake.ingest.streaming.SnowflakeStreamingIngestChannel; | ||
import net.snowflake.ingest.streaming.SnowflakeStreamingIngestClient; | ||
import net.snowflake.ingest.streaming.SnowflakeStreamingIngestClientFactory; | ||
import net.snowflake.ingest.utils.Constants; | ||
import net.snowflake.ingest.utils.ParameterProvider; | ||
import org.junit.After; | ||
import org.junit.Assert; | ||
import org.junit.Before; | ||
import org.junit.Test; | ||
|
||
/** | ||
* Verified that ingestion work when we ingest into large number of tables from the same client and | ||
* blobs and registration requests have to be cut, so they don't contain large number of chunks | ||
*/ | ||
public class ManyTablesIT { | ||
|
||
private static final int TABLES_COUNT = 100; | ||
private static final int TOTAL_ROWS_COUNT = 20_000; | ||
private String dbName; | ||
private SnowflakeStreamingIngestClient client; | ||
private Connection connection; | ||
private SnowflakeStreamingIngestChannel[] channels; | ||
private String[] offsetTokensPerChannel; | ||
|
||
@Before | ||
public void setUp() throws Exception { | ||
Properties props = TestUtils.getProperties(Constants.BdecVersion.THREE, false); | ||
props.put(ParameterProvider.MAX_CHUNKS_IN_BLOB, 2); | ||
props.put(ParameterProvider.MAX_CHUNKS_IN_REGISTRATION_REQUEST, 3); | ||
if (props.getProperty(ROLE).equals("DEFAULT_ROLE")) { | ||
props.setProperty(ROLE, "ACCOUNTADMIN"); | ||
} | ||
client = SnowflakeStreamingIngestClientFactory.builder("client1").setProperties(props).build(); | ||
connection = TestUtils.getConnection(true); | ||
dbName = String.format("sdk_it_many_tables_db_%d", System.nanoTime()); | ||
|
||
channels = new SnowflakeStreamingIngestChannel[TABLES_COUNT]; | ||
offsetTokensPerChannel = new String[TABLES_COUNT]; | ||
connection.createStatement().execute(String.format("create database %s;", dbName)); | ||
|
||
String[] tableNames = new String[TABLES_COUNT]; | ||
for (int i = 0; i < tableNames.length; i++) { | ||
tableNames[i] = String.format("table_%d", i); | ||
connection.createStatement().execute(String.format("create table table_%d(c int);", i)); | ||
channels[i] = | ||
client.openChannel( | ||
OpenChannelRequest.builder(String.format("channel-%d", i)) | ||
.setDBName(dbName) | ||
.setSchemaName("public") | ||
.setTableName(tableNames[i]) | ||
.setOnErrorOption(OpenChannelRequest.OnErrorOption.ABORT) | ||
.build()); | ||
} | ||
} | ||
|
||
@After | ||
public void tearDown() throws Exception { | ||
connection.createStatement().execute(String.format("drop database %s;", dbName)); | ||
client.close(); | ||
connection.close(); | ||
} | ||
|
||
@Test | ||
public void testIngestionIntoManyTables() throws InterruptedException, SQLException { | ||
for (int i = 0; i < TOTAL_ROWS_COUNT; i++) { | ||
Map<String, Object> row = Collections.singletonMap("c", i); | ||
String offset = String.valueOf(i); | ||
int channelId = i % channels.length; | ||
channels[channelId].insertRow(row, offset); | ||
offsetTokensPerChannel[channelId] = offset; | ||
} | ||
|
||
for (int i = 0; i < channels.length; i++) { | ||
TestUtils.waitForOffset(channels[i], offsetTokensPerChannel[i]); | ||
} | ||
|
||
int totalRowsCount = 0; | ||
ResultSet rs = | ||
connection | ||
.createStatement() | ||
.executeQuery(String.format("show tables in database %s;", dbName)); | ||
while (rs.next()) { | ||
totalRowsCount += rs.getInt("rows"); | ||
} | ||
Assert.assertEquals(TOTAL_ROWS_COUNT, totalRowsCount); | ||
} | ||
} |
Oops, something went wrong.