diff --git a/src/main/java/com/rockset/jdbc/RocksetStatement.java b/src/main/java/com/rockset/jdbc/RocksetStatement.java index ecf3fcd1..8e4dd2b5 100644 --- a/src/main/java/com/rockset/jdbc/RocksetStatement.java +++ b/src/main/java/com/rockset/jdbc/RocksetStatement.java @@ -185,6 +185,11 @@ private static String getQueryIdFromQueryResponse(QueryResponse response) { protected boolean executeWithParams(String sql, List params) throws SQLException { clearCurrentResults(); checkOpen(); + final String schema = this.connection.get().getSchema(); + + final String sqlWithWorkspace = schema.equals(RocksetConnection.DEFAULT_SCHEMA) + ? sql + : String.format("OPTION(default_workspace='%s')\n %s", schema, sql); ResultSet resultSet = null; try { @@ -192,12 +197,12 @@ protected boolean executeWithParams(String sql, List params) thr // because rockset queries do not yet have a client-side timeout. QueryResponse resp = connection() - .startQuery(sql, this.fetchSize.get(), params, getStatementSessionProperties()); + .startQuery(sqlWithWorkspace, this.fetchSize.get(), params, getStatementSessionProperties()); // store resuts in memory resultSet = new RocksetResultSet( - sql, + sqlWithWorkspace, resp, this.maxRows.get(), RocksetResultSetPaginationParams.builder() @@ -210,7 +215,7 @@ protected boolean executeWithParams(String sql, List params) thr this.currentResult.set(resultSet); return true; } catch (RuntimeException e) { - String msg = "Error executing query '" + sql + "'" + " error = " + e.getMessage(); + String msg = "Error executing query '" + sqlWithWorkspace + "'" + " error = " + e.getMessage(); RocksetDriver.log(msg); throw new SQLException(msg, e); } catch (Exception e) { diff --git a/src/test/java/com/rockset/client/TestWorkspace.java b/src/test/java/com/rockset/client/TestWorkspace.java index 469ee75d..acfd523d 100644 --- a/src/test/java/com/rockset/client/TestWorkspace.java +++ b/src/test/java/com/rockset/client/TestWorkspace.java @@ -1,6 +1,8 @@ package com.rockset.client; import com.rockset.client.model.*; + +import java.sql.Time; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.RandomStringUtils; @@ -60,7 +62,8 @@ public void testDeleteWorkspace() throws Exception { // wait for collection to go away Awaitility.await("Waiting for collection to be cleaned up ") - .atMost(60, TimeUnit.SECONDS) + .atMost(120, TimeUnit.SECONDS) + .pollInterval(1, TimeUnit.SECONDS) .until( (Callable) () -> { @@ -70,7 +73,6 @@ public void testDeleteWorkspace() throws Exception { } catch (Exception e) { return true; // collection deleted } - Thread.sleep(1000); return false; }); diff --git a/src/test/java/com/rockset/jdbc/TestSchema.java b/src/test/java/com/rockset/jdbc/TestSchema.java index 5fa90197..3a6580fc 100644 --- a/src/test/java/com/rockset/jdbc/TestSchema.java +++ b/src/test/java/com/rockset/jdbc/TestSchema.java @@ -4,14 +4,19 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; +import com.rockset.client.ApiException; import com.rockset.client.RocksetClient; import com.rockset.client.model.Collection; import com.rockset.client.model.CreateCollectionRequest; import com.rockset.client.model.CreateCollectionResponse; +import com.rockset.client.model.CreateWorkspaceRequest; import com.rockset.client.model.DeleteCollectionResponse; +import com.rockset.client.model.DeleteWorkspaceResponse; +import com.rockset.client.model.ListWorkspacesResponse; import com.rockset.client.model.QueryRequest; import com.rockset.client.model.QueryRequestSql; import com.rockset.client.model.QueryResponse; + import java.io.File; import java.io.IOException; import java.sql.Connection; @@ -27,6 +32,11 @@ import java.util.List; import java.util.Properties; import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +import com.rockset.client.model.Workspace; +import io.swagger.annotations.Api; import okhttp3.MediaType; import okhttp3.MultipartBody; import okhttp3.OkHttpClient; @@ -34,8 +44,15 @@ import okhttp3.RequestBody; import okhttp3.Response; import org.apache.commons.lang3.RandomStringUtils; +import org.awaitility.Awaitility; +import org.awaitility.core.ConditionTimeoutException; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import org.testng.Assert; +import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; // @@ -44,428 +61,504 @@ // public class TestSchema { - // JDBC driver name and database URL - private static final String JDBC_DRIVER = FirstExample.JDBC_DRIVER; - private static final String DB_URL = FirstExample.DB_URL; + // JDBC driver name and database URL + private static final String JDBC_DRIVER = FirstExample.JDBC_DRIVER; + private static final String DB_URL = FirstExample.DB_URL; + + // store credentials for accessing rockset service + private Properties property; + + // A rockset client to directly interact with Rockset service + private RocksetClient testClient; - // store credentials for accessing rockset service - private Properties property; + // A private object to upload files to rockset + OkHttpClient client = new OkHttpClient(); - // A rockset client to directly interact with Rockset service - private RocksetClient testClient; + private static final String TEST_WORKSPACE_PREFIX = "jdbc_test_workspace_"; - // A private object to upload files to rockset - OkHttpClient client = new OkHttpClient(); + private static final String testWorkspace = TEST_WORKSPACE_PREFIX + RandomStringUtils.randomAlphanumeric(8); + final ObjectMapper mapper = new ObjectMapper(); - final ObjectMapper mapper = new ObjectMapper(); + @BeforeSuite + public void setUp() throws Exception { + String apiKey = System.getenv("ROCKSET_APIKEY"); + String apiServer = System.getenv("ROCKSET_APISERVER"); + if (apiKey == null || apiServer == null) { + throw new Exception( + "To run unit tests, please set ROCKSET_APIKEY and ROCKSET_APISERVER " + + "environment variables."); + } + property = new Properties(); + property.setProperty("apiKey", apiKey); + + if (apiServer.toLowerCase().contains("https://")) apiServer = apiServer.replace("https://", ""); + property.setProperty("apiServer", apiServer); + + // create the Rockset test client + testClient = + new RocksetClient(property.getProperty("apiKey"), property.getProperty("apiServer")); + // Register JDBC driver + Class.forName(JDBC_DRIVER); + // Clean up any previous test workspaces in case finalization didn't properly run before + // This will also prevent these tests from running concurrently on a single org, but that should be fine + deleteAllJDBCTestHarnessWorkspaces(); + } - @BeforeSuite - public void setUp() throws Exception { - String apiKey = System.getenv("ROCKSET_APIKEY"); - String apiServer = System.getenv("ROCKSET_APISERVER"); - if (apiKey == null || apiServer == null) { - throw new Exception( - "To run unit tests, please set ROCKSET_APIKEY and ROCKSET_APISERVER " - + "environment variables."); + + @AfterSuite + public void cleanUp() throws Exception { + // Ensure any jdbc workspaces are removed + deleteAllJDBCTestHarnessWorkspaces(); } - property = new Properties(); - property.setProperty("apiKey", apiKey); - - if (apiServer.toLowerCase().contains("https://")) apiServer = apiServer.replace("https://", ""); - property.setProperty("apiServer", apiServer); - - // create the Rockset test client - testClient = - new RocksetClient(property.getProperty("apiKey"), property.getProperty("apiServer")); - // Register JDBC driver - Class.forName(JDBC_DRIVER); - } - - // - // Check catalog names. - // This call should return one record with with one column - // TABLE_CATALOG - // - @Test - public void testGetTables() throws Exception { - System.out.println("testGetTables"); - - // create 3 collections - int numCollections = 3; - List colls = generateCollectionNames(numCollections); - createCollections(colls); - - // wait for all leaves to be ready to serve - waitCollections(colls); - - // now check these 3 collections are returned via the JDBC call - Connection conn = null; - Statement stmt = null; - try { - conn = DriverManager.getConnection(DB_URL, property); - - DatabaseMetaData meta = conn.getMetaData(); - ResultSet rs = - meta.getTables( - RocksetConnection.DEFAULT_CATALOG, RocksetConnection.DEFAULT_SCHEMA, "*", null); - int colCatIndex = rs.findColumn("TABLE_CAT"); - int colSchemaIndex = rs.findColumn("TABLE_SCHEM"); - int colNameIndex = rs.findColumn("TABLE_NAME"); - int colTypeIndex = rs.findColumn("TABLE_TYPE"); - int colRemarksIndex = rs.findColumn("REMARKS"); - - int found = 0; - while (rs.next()) { - String value = rs.getString(colSchemaIndex); - Assert.assertTrue(value.equals(RocksetConnection.DEFAULT_SCHEMA)); - value = rs.getString(colCatIndex); - Assert.assertTrue(value.equals(RocksetConnection.DEFAULT_CATALOG)); - value = rs.getString(colTypeIndex); - Assert.assertTrue(value.equals("TABLE")); - final String v1 = rs.getString(colNameIndex); - if (colls.stream().anyMatch(str -> str.equals(v1))) { - found++; + + // + // Check catalog names. + // This call should return one record with with one column + // TABLE_CATALOG + // + @Test(dataProvider = "schemas") + public void testGetTables(String schema) throws Exception { + System.out.println("testGetTables"); + + // create 3 collections + int numCollections = 3; + List colls = generateCollectionNames(numCollections); + createCollections(colls, schema); + + // wait for all leaves to be ready to serve + waitCollections(colls, schema); + + // now check these 3 collections are returned via the JDBC call + Connection conn = null; + Statement stmt = null; + try { + conn = DriverManager.getConnection(DB_URL, property); + + DatabaseMetaData meta = conn.getMetaData(); + ResultSet rs = + meta.getTables( + RocksetConnection.DEFAULT_CATALOG, schema, "*", null); + int colCatIndex = rs.findColumn("TABLE_CAT"); + int colSchemaIndex = rs.findColumn("TABLE_SCHEM"); + int colNameIndex = rs.findColumn("TABLE_NAME"); + int colTypeIndex = rs.findColumn("TABLE_TYPE"); + int colRemarksIndex = rs.findColumn("REMARKS"); + + int found = 0; + while (rs.next()) { + String value = rs.getString(colSchemaIndex); + Assert.assertTrue(value.equals(schema)); + value = rs.getString(colCatIndex); + Assert.assertTrue(value.equals(RocksetConnection.DEFAULT_CATALOG)); + value = rs.getString(colTypeIndex); + Assert.assertTrue(value.equals("TABLE")); + final String v1 = rs.getString(colNameIndex); + if (colls.stream().anyMatch(str -> str.equals(v1))) { + found++; + } + } + Assert.assertEquals(numCollections, found); + } finally { + cleanup(colls, stmt, conn, schema); } - } - Assert.assertEquals(numCollections, found); - } finally { - cleanup(colls, stmt, conn); } - } - - @Test - public void testDateTimeTimestamp() throws Exception { - System.out.println("testDateTimeTimestamp"); - Connection conn = null; - Statement stmt = null; - ResultSet rs = null; - // create 1 collection - int numCollections = 1; - List colls = generateCollectionNames(numCollections); - final String collectionName = colls.get(0); - - try { - // create collection - createCollections(colls); - - // wait for all leaves to be ready to serve - waitCollections(colls); - - String csvParams = - "{ \"csv\": { \"columnNames\": [\"c1\", \"c2\", \"c3\"], " - + "\"columnTypes\": [\"DATE\", \"TIME\", \"TIMESTAMP\"] } }"; - - uploadFile(collectionName, "src/test/resources/basic.csv", csvParams); - waitNumberDocs(collectionName, 1); - - conn = DriverManager.getConnection(DB_URL, property); - - // Execute a query - System.out.println("Creating statement 1..."); - stmt = conn.createStatement(); - String sql = "select c1, c2, c3 from " + collectionName; - rs = stmt.executeQuery(sql); - - // Extract data from result set - while (rs.next()) { - // Retrieve by column name - Date c1 = rs.getDate("c1"); - Time c2 = rs.getTime("c2"); - String c3 = rs.getString("c3"); - - // Display values - System.out.println("c1: " + c1.toString() + " c2: " + c2.toString() + " c3: " + c3); - } - } finally { - cleanup(colls, stmt, conn); + + @Test(dataProvider = "schemas") + public void testDateTimeTimestamp(String schema) throws Exception { + System.out.println("testDateTimeTimestamp"); + Connection conn = null; + Statement stmt = null; + ResultSet rs = null; + // create 1 collection + int numCollections = 1; + List colls = generateCollectionNames(numCollections); + final String collectionName = colls.get(0); + + try { + // create collection + createCollections(colls, schema); + + // wait for all leaves to be ready to serve + waitCollections(colls, schema); + + String csvParams = + "{ \"csv\": { \"columnNames\": [\"c1\", \"c2\", \"c3\"], " + + "\"columnTypes\": [\"DATE\", \"TIME\", \"TIMESTAMP\"] } }"; + + uploadFile(collectionName, "src/test/resources/basic.csv", csvParams, schema); + waitNumberDocs(collectionName, 1, schema); + + conn = DriverManager.getConnection(DB_URL, property); + conn.setSchema(schema); + + // Execute a query + System.out.println("Creating statement 1..."); + stmt = conn.createStatement(); + String sql = "select c1, c2, c3 from " + collectionName; + rs = stmt.executeQuery(sql); + + // Extract data from result set + while (rs.next()) { + // Retrieve by column name + Date c1 = rs.getDate("c1"); + Time c2 = rs.getTime("c2"); + String c3 = rs.getString("c3"); + + // Display values + System.out.println("c1: " + c1.toString() + " c2: " + c2.toString() + " c3: " + c3); + } + } finally { + cleanup(colls, stmt, conn, schema); + } } - } - - @Test - public void testGetColumns() throws Exception { - System.out.println("testGetTableColumns"); - Connection conn = null; - Statement stmt = null; - - // create 1 collection - int numCollections = 1; - List colls = generateCollectionNames(numCollections); - final String collectionName = colls.get(0); - - try { - // create collection - createCollections(colls); - - // wait for all leaves to be ready to serve - waitCollections(colls); - - // upload one file to collection and wait for it to be visible - uploadFile(collectionName, "src/test/resources/basic.json", null); - waitNumberDocs(collectionName, 1); - - // there should be 5 columns in this test file - // a, name, nested, _id, _event_time, _meta - final int numColumns = 6; - - conn = DriverManager.getConnection(DB_URL, property); - - DatabaseMetaData meta = conn.getMetaData(); - ResultSet rs = - meta.getColumns( - RocksetConnection.DEFAULT_CATALOG, - RocksetConnection.DEFAULT_SCHEMA, - collectionName, - null); - int colCatIndex = rs.findColumn("TABLE_CAT"); - int colSchemaIndex = rs.findColumn("TABLE_SCHEM"); - int colNameIndex = rs.findColumn("TABLE_NAME"); - int c1 = rs.findColumn("COLUMN_NAME"); - c1 = rs.findColumn("DATA_TYPE"); - c1 = rs.findColumn("TYPE_NAME"); - c1 = rs.findColumn("COLUMN_SIZE"); - c1 = rs.findColumn("BUFFER_LENGTH"); - c1 = rs.findColumn("DECIMAL_DIGITS"); - c1 = rs.findColumn("NUM_PREC_RADIX"); - c1 = rs.findColumn("NULLABLE"); - c1 = rs.findColumn("REMARKS"); - c1 = rs.findColumn("COLUMN_DEF"); - c1 = rs.findColumn("SQL_DATA_TYPE"); - c1 = rs.findColumn("SQL_DATETIME_SUB"); - c1 = rs.findColumn("CHAR_OCTET_LENGTH"); - c1 = rs.findColumn("ORDINAL_POSITION"); - c1 = rs.findColumn("IS_NULLABLE"); - c1 = rs.findColumn("SCOPE_CATALOG"); - c1 = rs.findColumn("SCOPE_SCHEMA"); - c1 = rs.findColumn("SCOPE_TABLE"); - c1 = rs.findColumn("SOURCE_DATA_TYPE"); - c1 = rs.findColumn("IS_AUTOINCREMENT"); - c1 = rs.findColumn("IS_GENERATEDCOLUMN"); - - int found = 0; - while (rs.next()) { - String value = rs.getString(colSchemaIndex); - Assert.assertTrue(value.equals(RocksetConnection.DEFAULT_SCHEMA)); - value = rs.getString(colCatIndex); - Assert.assertTrue(value.equals(RocksetConnection.DEFAULT_CATALOG)); - value = rs.getString(colNameIndex); - Assert.assertTrue(value.equals(collectionName)); - found++; - } - Assert.assertEquals(found, numColumns); - } finally { - cleanup(colls, stmt, conn); + + @Test(dataProvider = "schemas") + public void testGetColumns(String schema) throws Exception { + System.out.println("testGetTableColumns"); + Connection conn = null; + Statement stmt = null; + + // create 1 collection + int numCollections = 1; + List colls = generateCollectionNames(numCollections); + final String collectionName = colls.get(0); + + try { + // create collection + createCollections(colls, schema); + + // wait for all leaves to be ready to serve + waitCollections(colls, schema); + + // upload one file to collection and wait for it to be visible + uploadFile(collectionName, "src/test/resources/basic.json", null, schema); + waitNumberDocs(collectionName, 1, schema); + + // there should be 5 columns in this test file + // a, name, nested, _id, _event_time, _meta + final int numColumns = 6; + + conn = DriverManager.getConnection(DB_URL, property); + conn.setSchema(schema); + + DatabaseMetaData meta = conn.getMetaData(); + ResultSet rs = + meta.getColumns( + RocksetConnection.DEFAULT_CATALOG, + schema, + collectionName, + null); + int colCatIndex = rs.findColumn("TABLE_CAT"); + int colSchemaIndex = rs.findColumn("TABLE_SCHEM"); + int colNameIndex = rs.findColumn("TABLE_NAME"); + int c1 = rs.findColumn("COLUMN_NAME"); + c1 = rs.findColumn("DATA_TYPE"); + c1 = rs.findColumn("TYPE_NAME"); + c1 = rs.findColumn("COLUMN_SIZE"); + c1 = rs.findColumn("BUFFER_LENGTH"); + c1 = rs.findColumn("DECIMAL_DIGITS"); + c1 = rs.findColumn("NUM_PREC_RADIX"); + c1 = rs.findColumn("NULLABLE"); + c1 = rs.findColumn("REMARKS"); + c1 = rs.findColumn("COLUMN_DEF"); + c1 = rs.findColumn("SQL_DATA_TYPE"); + c1 = rs.findColumn("SQL_DATETIME_SUB"); + c1 = rs.findColumn("CHAR_OCTET_LENGTH"); + c1 = rs.findColumn("ORDINAL_POSITION"); + c1 = rs.findColumn("IS_NULLABLE"); + c1 = rs.findColumn("SCOPE_CATALOG"); + c1 = rs.findColumn("SCOPE_SCHEMA"); + c1 = rs.findColumn("SCOPE_TABLE"); + c1 = rs.findColumn("SOURCE_DATA_TYPE"); + c1 = rs.findColumn("IS_AUTOINCREMENT"); + c1 = rs.findColumn("IS_GENERATEDCOLUMN"); + + int found = 0; + while (rs.next()) { + String value = rs.getString(colSchemaIndex); + Assert.assertTrue(value.equals(schema)); + value = rs.getString(colCatIndex); + Assert.assertTrue(value.equals(RocksetConnection.DEFAULT_CATALOG)); + value = rs.getString(colNameIndex); + Assert.assertTrue(value.equals(collectionName)); + found++; + } + Assert.assertEquals(found, numColumns); + } finally { + cleanup(colls, stmt, conn, schema); + } } - } - private void validatePagination(Statement stmt, int pageSize, String query, Set cities) - throws Exception { - stmt.setFetchSize(pageSize); - stmt.setFetchDirection(ResultSet.FETCH_FORWARD); - ResultSet rs = stmt.executeQuery(query); + private void validatePagination(Statement stmt, int pageSize, String query, Set cities) + throws Exception { + stmt.setFetchSize(pageSize); + stmt.setFetchDirection(ResultSet.FETCH_FORWARD); + ResultSet rs = stmt.executeQuery(query); + + while (rs.next()) { + cities.add(rs.getString("city")); + } - while (rs.next()) { - cities.add(rs.getString("city")); + System.out.println("Verifying for page size " + pageSize); + Assert.assertEquals(cities.size(), 11); } - System.out.println("Verifying for page size " + pageSize); - Assert.assertEquals(cities.size(), 11); - } + @Test(dataProvider = "schemas") + public void testPagination(String schema) throws Exception { + System.out.println("testPagination"); + Connection conn = null; + Statement stmt = null; - @Test - public void testPagination() throws Exception { - System.out.println("testPagination"); - Connection conn = null; - Statement stmt = null; + Set cities = new HashSet<>(); - Set cities = new HashSet<>(); + // create 1 collection + int numCollections = 1; + List colls = generateCollectionNames(numCollections); + final String collectionName = colls.get(0); - // create 1 collection - int numCollections = 1; - List colls = generateCollectionNames(numCollections); - final String collectionName = colls.get(0); + try { + // create collection + createCollections(colls, schema); - try { - // create collection - createCollections(colls); + // wait for all leaves to be ready to serve + waitCollections(colls, schema); - // wait for all leaves to be ready to serve - waitCollections(colls); + // upload one file to collection and wait for it to be visible + uploadFile(collectionName, "src/test/resources/pagination_data.json", null, schema); + waitNumberDocs(collectionName, 1, schema); - // upload one file to collection and wait for it to be visible - uploadFile(collectionName, "src/test/resources/pagination_data.json", null); - waitNumberDocs(collectionName, 1); + conn = DriverManager.getConnection(DB_URL, property); + conn.setSchema(schema); - conn = DriverManager.getConnection(DB_URL, property); + String query = String.format("SELECT city FROM %s", collectionName); + stmt = conn.createStatement(); - String query = String.format("SELECT city FROM %s", collectionName); - stmt = conn.createStatement(); + for (int pageSize = 0; pageSize <= 12; ++pageSize) { + validatePagination(stmt, pageSize, query, cities); + } - for (int pageSize = 0; pageSize <= 12; ++pageSize) { - validatePagination(stmt, pageSize, query, cities); - } + } catch (SQLException e) { + System.out.println("Exception: " + e); + } finally { + cleanup(colls, stmt, conn, schema); + } + } - } catch (SQLException e) { - System.out.println("Exception: " + e); - } finally { - cleanup(colls, stmt, conn); + // + // Invoked by all unit tests at the end to cleanup its mess + // + private void cleanup(List colls, Statement stmt, Connection conn, String workspace) { + try { + if (colls != null) { + deleteCollections(colls, workspace); + } + } catch (Exception e) { + // nothing we can do + } + try { + if (stmt != null) { + stmt.close(); + } + } catch (SQLException se2) { + // nothing we can do + } + try { + if (conn != null) { + conn.close(); + } + } catch (SQLException se) { + se.printStackTrace(); + } } - } - - // - // Invoked by all unit tests at the end to cleanup its mess - // - private void cleanup(List colls, Statement stmt, Connection conn) { - try { - if (colls != null) { - deleteCollections(colls); - } - } catch (Exception e) { - // nothing we can do + + // + // Generate a list of collection names + // + private List generateCollectionNames(int num) { + List names = new ArrayList(num); + String prefix = "jdbctestcollection" + RandomStringUtils.randomAlphanumeric(5); + for (int i = 0; i < num; i++) { + names.add(prefix + i); + } + return names; } - try { - if (stmt != null) { - stmt.close(); - } - } catch (SQLException se2) { - // nothing we can do + + // + // Wait for all collections to be ready + // + private void waitCollections(List names, String workspace) throws Exception { + for (String name : names) { + String sql = "describe " + workspace + "." + name + ";"; + while (true) { + try { + QueryRequestSql qs = new QueryRequestSql().query(sql); + testClient.queries.query(new QueryRequest().sql(qs)); + break; + } catch (Exception e) { + System.out.println(String.format("Waiting for collection %s to be describable", name)); + Thread.sleep(1000); + } + } + } } - try { - if (conn != null) { - conn.close(); - } - } catch (SQLException se) { - se.printStackTrace(); + + // + // Create the list of collections. Fail if any of the collection + // already exists + // + private void createCollections(List names, String workspace) throws Exception { + createWorkspaceIfNonExistent(workspace); + + for (String name : names) { + CreateCollectionRequest request = new CreateCollectionRequest().name(name); + CreateCollectionResponse response = testClient.collections.create(workspace, request); + + Assert.assertEquals(response.getData().getName(), name); + Assert.assertEquals(response.getData().getStatus(), Collection.StatusEnum.CREATED); + } } - } - - // - // Generate a list of collection names - // - private List generateCollectionNames(int num) { - List names = new ArrayList(num); - String prefix = "jdbctestcollection" + RandomStringUtils.randomAlphanumeric(5); - for (int i = 0; i < num; i++) { - names.add(prefix + i); + + private void createWorkspaceIfNonExistent(String workspace) throws Exception { + ListWorkspacesResponse workspaces = testClient.workspaces.list(); + if (workspaces.getData().stream() + .map(ws -> ws.getName()) + .noneMatch(ws -> ws.equals(workspace))) { + CreateWorkspaceRequest request = new CreateWorkspaceRequest().name(workspace); + testClient.workspaces.create(request); + } } - return names; - } - - // - // Wait for all collections to be ready - // - private void waitCollections(List names) throws Exception { - for (String name : names) { - String sql = "describe \"" + name + "\";"; - while (true) { - try { - QueryRequestSql qs = new QueryRequestSql().query(sql); - testClient.queries.query(new QueryRequest().sql(qs)); - break; - } catch (Exception e) { - System.out.println(String.format("Waiting for collection %s to be describable", name)); - Thread.sleep(1000); + + private void deleteAllJDBCTestHarnessWorkspaces() throws Exception { + List collections = testClient.collections.list().getData(); + + for (Collection collection : collections) { + if (collection.getWorkspace().startsWith(TEST_WORKSPACE_PREFIX)) { + DeleteCollectionResponse resp = retryOnApiException(() -> testClient.collections.delete(collection.getWorkspace(), collection.getName())); + System.out.println("Deleted workspace " + resp.getData().getName()); + } + } + + Set workspaces = new HashSet<>(); + testClient.workspaces.list().getData().stream() + .map(Workspace::getName) + .filter(ws -> ws.startsWith(TEST_WORKSPACE_PREFIX)) + .forEach(workspaces::add); + + for (String ws : workspaces) { + DeleteWorkspaceResponse resp = retryOnApiException(() -> testClient.workspaces.delete(ws)); + System.out.println("Deleted workspace " + resp.getData().getName()); + // Collection deletion is async, wait for this to finish before workspace can be deleted } - } } - } - - // - // Create the list of collections. Fail if any of the collection - // already exists - // - private void createCollections(List names) throws Exception { - for (String name : names) { - CreateCollectionRequest request = new CreateCollectionRequest().name(name); - CreateCollectionResponse response = testClient.collections.create("commons", request); - - Assert.assertEquals(response.getData().getName(), name); - Assert.assertEquals(response.getData().getStatus(), Collection.StatusEnum.CREATED); + + // + // Delete all specified collections + // + private void deleteCollections(List names, String workspace) throws Exception { + for (String name : names) { + DeleteCollectionResponse deleteCollectionResponse = + testClient.collections.delete(workspace, name); + Assert.assertEquals(deleteCollectionResponse.getData().getName(), name); + // Assert.assertEquals(deleteCollectionResponse.getData().getStatus(), + // Collection.StatusEnum.DELETED); + } } - } - - // - // Delete all specified collections - // - private void deleteCollections(List names) throws Exception { - for (String name : names) { - DeleteCollectionResponse deleteCollectionResponse = - testClient.collections.delete("commons", name); - Assert.assertEquals(deleteCollectionResponse.getData().getName(), name); - // Assert.assertEquals(deleteCollectionResponse.getData().getStatus(), - // Collection.StatusEnum.DELETED); + + // + // Upload the specified file to the specified collection + // + void uploadFile(String collectionName, String path, String params, String workspace) throws IOException { + final File file = new File(path); + final MediaType mt = MediaType.parse("text/json; charset=utf-8"); + + // create multipart request + MultipartBody.Builder multipartBuilder = + new MultipartBody.Builder() + .setType(MultipartBody.FORM) + .addFormDataPart("file", file.getName(), RequestBody.create(mt, file)) + .addFormDataPart("size", String.valueOf(file.length())); + + if (params != null) { + multipartBuilder.addFormDataPart("params", params); + } + + RequestBody body = multipartBuilder.build(); + + // send the file upload request + String url = + String.format( + "https://%s/v1/orgs/self/ws/%s/collections/%s/uploads", + property.getProperty("apiServer"), workspace, collectionName); + System.out.println("Uploading test file to " + url); + + Request request = + new Request.Builder() + .url(url) + .addHeader("Authorization", String.format("ApiKey %s", property.getProperty("apiKey"))) + .post(body) + .build(); + + Response response = null; + try { + response = client.newCall(request).execute(); + assertEquals(response.code(), 200); + JsonNode jsonObject = mapper.readTree(response.body().string()); + JsonNode jsonData = jsonObject.get("data"); + assertEquals(jsonData.get("file_name").asText(), file.getName()); + } finally { + if (response != null) { + response.close(); + } + } } - } - - // - // Upload the specified file to the specified collection - // - void uploadFile(String collectionName, String path, String params) throws IOException { - final File file = new File(path); - final MediaType mt = MediaType.parse("text/json; charset=utf-8"); - - // create multipart request - MultipartBody.Builder multipartBuilder = - new MultipartBody.Builder() - .setType(MultipartBody.FORM) - .addFormDataPart("file", file.getName(), RequestBody.create(mt, file)) - .addFormDataPart("size", String.valueOf(file.length())); - - if (params != null) { - multipartBuilder.addFormDataPart("params", params); + + // + // Wait for the specified docs to appear in the collection + // + private void waitNumberDocs(String collectionName, int expectedDocs, String workspace) throws Exception { + String sql = " OPTION(default_workspace='" + workspace + "') \n select count(*) from \"" + collectionName + "\";"; + int found = 0; + while (found < expectedDocs) { + try { + QueryRequestSql qs = new QueryRequestSql().query(sql); + QueryResponse resp = testClient.queries.query(new QueryRequest().sql(qs)); + + RocksetResultSet res = new RocksetResultSet(sql, resp, Integer.MAX_VALUE, null); + if (res.next()) { + found = res.getInt("?COUNT"); + } + } catch (Exception e) { + System.out.println("Exception in query " + sql + "exception " + e.getMessage()); + } + System.out.println( + String.format( + "Collection %s found %d docs, waiting for %d", collectionName, found, expectedDocs)); + Thread.sleep(1000); + } } - RequestBody body = multipartBuilder.build(); - - // send the file upload request - String url = - String.format( - "https://%s/v1/orgs/self/ws/commons/collections/%s/uploads", - property.getProperty("apiServer"), collectionName); - System.out.println("Uploading test file to " + url); - - Request request = - new Request.Builder() - .url(url) - .addHeader("Authorization", String.format("ApiKey %s", property.getProperty("apiKey"))) - .post(body) - .build(); - - Response response = null; - try { - response = client.newCall(request).execute(); - assertEquals(response.code(), 200); - JsonNode jsonObject = mapper.readTree(response.body().string()); - JsonNode jsonData = jsonObject.get("data"); - assertEquals(jsonData.get("file_name").asText(), file.getName()); - } finally { - if (response != null) { - response.close(); - } + @DataProvider(name = "schemas") + public Object[][] schemas() { + return new Object[][]{ + // commons is default + {"commons"}, + {testWorkspace}, + }; + } - } - - // - // Wait for the specified docs to appear in the collection - // - private void waitNumberDocs(String collectionName, int expectedDocs) throws Exception { - String sql = "select count(*) from \"" + collectionName + "\";"; - int found = 0; - while (found < expectedDocs) { - try { - QueryRequestSql qs = new QueryRequestSql().query(sql); - QueryResponse resp = testClient.queries.query(new QueryRequest().sql(qs)); - - RocksetResultSet res = new RocksetResultSet(sql, resp, Integer.MAX_VALUE, null); - if (res.next()) { - found = res.getInt("?COUNT"); - } - } catch (Exception e) { - System.out.println("Exception in query " + sql + "exception " + e.getMessage()); - } - System.out.println( - String.format( - "Collection %s found %d docs, waiting for %d", collectionName, found, expectedDocs)); - Thread.sleep(1000); + + private static T retryOnApiException(Callable request) { + return Awaitility + .await() + .atMost(5, TimeUnit.MINUTES) + .pollInterval(1, TimeUnit.SECONDS) + .until(() -> { + try { + return request.call(); + } catch (ApiException e) { + System.out.println("Encountered error, retrying request: " + e.getMessage()); + return null; + } + }, Matchers.notNullValue()); } - } }