diff --git a/fhir-bucket/pom.xml b/fhir-bucket/pom.xml
index 315887d44f3..0d507c18c10 100644
--- a/fhir-bucket/pom.xml
+++ b/fhir-bucket/pom.xml
@@ -58,10 +58,6 @@
org.apache.derby
derby
-
- com.ibm.db2
- jcc
-
org.postgresql
postgresql
diff --git a/fhir-bucket/src/main/java/com/ibm/fhir/bucket/api/Constants.java b/fhir-bucket/src/main/java/com/ibm/fhir/bucket/api/Constants.java
index 094c69924b9..64c40c1222c 100644
--- a/fhir-bucket/src/main/java/com/ibm/fhir/bucket/api/Constants.java
+++ b/fhir-bucket/src/main/java/com/ibm/fhir/bucket/api/Constants.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2020
+ * (C) Copyright IBM Corp. 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -56,6 +56,6 @@ public class Constants {
public static final int IMPORT_RETRY_TIMES = 1;
public static final int COS_REQUEST_TIMEOUT = 10000;
// Batch writing to DB can take long time which can make the idle COS/S3 client connection timeout, so set the client socket timeout
- // to 120 seconds which is the default DB2 timeout.
+ // to 120 seconds which is the default liberty transaction timeout.
public static final int COS_SOCKET_TIMEOUT = 120000;
}
diff --git a/fhir-bucket/src/main/java/com/ibm/fhir/bucket/app/Main.java b/fhir-bucket/src/main/java/com/ibm/fhir/bucket/app/Main.java
index 0668a44cf0f..37ddb43e949 100644
--- a/fhir-bucket/src/main/java/com/ibm/fhir/bucket/app/Main.java
+++ b/fhir-bucket/src/main/java/com/ibm/fhir/bucket/app/Main.java
@@ -681,7 +681,7 @@ public void setupDerbyRepository() {
}
/**
- * Set up the connection pool and transaction provider for connecting to a DB2
+ * Set up the connection pool and transaction provider for connecting to a PostgreSQL
* database
*/
public void setupPostgresRepository() {
diff --git a/fhir-bucket/src/main/java/com/ibm/fhir/bucket/persistence/MergeResourceTypes.java b/fhir-bucket/src/main/java/com/ibm/fhir/bucket/persistence/MergeResourceTypes.java
index d1da59eb82a..1ebfb276d6b 100644
--- a/fhir-bucket/src/main/java/com/ibm/fhir/bucket/persistence/MergeResourceTypes.java
+++ b/fhir-bucket/src/main/java/com/ibm/fhir/bucket/persistence/MergeResourceTypes.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2020, 2021
+ * (C) Copyright IBM Corp. 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -22,7 +22,7 @@
/**
* DAO to encapsulate all the SQL/DML used to retrieve and persist data
* in the schema.
- * Supports: Db2 and Derby
+ * Supports: Derby
* Does not support: PostgreSQL
*/
public class MergeResourceTypes implements IDatabaseStatement {
diff --git a/fhir-bucket/src/main/java/com/ibm/fhir/bucket/persistence/MergeResources.java b/fhir-bucket/src/main/java/com/ibm/fhir/bucket/persistence/MergeResources.java
index 2ef27ff2cd4..ac3d38243f0 100644
--- a/fhir-bucket/src/main/java/com/ibm/fhir/bucket/persistence/MergeResources.java
+++ b/fhir-bucket/src/main/java/com/ibm/fhir/bucket/persistence/MergeResources.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2020, 2021
+ * (C) Copyright IBM Corp. 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -38,7 +38,7 @@ public MergeResources(Collection resources) {
@Override
public void run(IDatabaseTranslator translator, Connection c) {
- // Support for PostgreSQL as well as Derby/Db2
+ // Support for PostgreSQL as well as Derby
final String currentTimestamp = translator.currentTimestampString();
final String dual = translator.dualTableName();
final String source = dual == null ? "(SELECT 1)" : dual;
diff --git a/fhir-config/src/test/java/com/ibm/fhir/config/test/FHIRConfigHelperTest.java b/fhir-config/src/test/java/com/ibm/fhir/config/test/FHIRConfigHelperTest.java
index 8d9f5f2a71d..104ebb986e5 100644
--- a/fhir-config/src/test/java/com/ibm/fhir/config/test/FHIRConfigHelperTest.java
+++ b/fhir-config/src/test/java/com/ibm/fhir/config/test/FHIRConfigHelperTest.java
@@ -357,7 +357,7 @@ public void testDefaultDatasourceLookup() throws Exception {
PropertyGroup dsPG = FHIRConfigHelper.getPropertyGroup(dsPropertyName);
assertNotNull(dsPG);
String type = dsPG.getStringProperty("type");
- assertEquals("db2", type);
+ assertEquals("postgresql", type);
}
/**
diff --git a/fhir-config/src/test/resources/config/default/fhir-server-config.json b/fhir-config/src/test/resources/config/default/fhir-server-config.json
index f9ededaa955..89bde4d6bab 100644
--- a/fhir-config/src/test/resources/config/default/fhir-server-config.json
+++ b/fhir-config/src/test/resources/config/default/fhir-server-config.json
@@ -26,7 +26,7 @@
"persistence": {
"datasources": {
"default": {
- "type": "db2"
+ "type": "postgresql"
}
},
"payload": {
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IConnectionProvider.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IConnectionProvider.java
index 3cb5bb9dc5d..651b4f8462c 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IConnectionProvider.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IConnectionProvider.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2019
+ * (C) Copyright IBM Corp. 2019, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -33,7 +33,7 @@ public interface IConnectionProvider {
/**
* Get the translator associated with this connection provider. Supports
* interpretation of SQLExceptions and the ability to tweak
- * SQL statements to handle differences between DB2 and Derby
+ * SQL statements to handle differences between databases
* @return
*/
public IDatabaseTranslator getTranslator();
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IDatabaseAdapter.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IDatabaseAdapter.java
index 27d87d62597..46d8b6a2f1a 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IDatabaseAdapter.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IDatabaseAdapter.java
@@ -22,10 +22,7 @@
/**
* Abstraction of the SQL to use for a given database. This allows us to hide as
- * much as possible the differences in syntax and support between DB2 and Derby
- * (which is used for unit-testing). Derby is pretty close to DB2 in most cases,
- * but notably does not support partitioning, variables or SPL stored
- * procedures.
+ * much as possible the differences in syntax and support between databases.
*/
public interface IDatabaseAdapter {
/**
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IDatabaseTranslator.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IDatabaseTranslator.java
index 19c726175c4..f0eb9c7c9d3 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IDatabaseTranslator.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/api/IDatabaseTranslator.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2019, 2020
+ * (C) Copyright IBM Corp. 2019, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -13,9 +13,7 @@
import com.ibm.fhir.database.utils.model.DbType;
/**
- * Lets us adjust DDL/DML/SQL statements to match the target database. This
- * is needed because DB2 and Derby have a few differences, and we need to
- * tweak the SQL in order to support all the unit tests we want/need
+ * Lets us adjust DDL/DML/SQL statements to match the target database.
*/
public interface IDatabaseTranslator {
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/CommonDatabaseAdapter.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/CommonDatabaseAdapter.java
index 40cde9f3997..6eea2a4bfbe 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/CommonDatabaseAdapter.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/CommonDatabaseAdapter.java
@@ -50,7 +50,9 @@
import com.ibm.fhir.database.utils.tenant.UpdateTenantStatusDAO;
/**
- * Provides schema control functions common to our supported databases (DB2 and Derby)
+ * Provides schema control functions common to our supported databases:
+ * PostgreSQL
+ * Derby (for unit tests, not production)
*/
public abstract class CommonDatabaseAdapter implements IDatabaseAdapter, IDatabaseTypeAdapter {
private static final Logger logger = Logger.getLogger(CommonDatabaseAdapter.class.getName());
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/DropForeignKeyConstraint.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/DropForeignKeyConstraint.java
index afb07107b18..0ef2240223e 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/DropForeignKeyConstraint.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/DropForeignKeyConstraint.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2019, 2020
+ * (C) Copyright IBM Corp. 2019, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -41,7 +41,7 @@ public DropForeignKeyConstraint(String schemaName, String tableName, String... c
public void run(IDatabaseTranslator translator, Connection c) {
String qTableName = DataDefinitionUtil.getQualifiedName(schemaName, tableName);
- // Need to account for the syntax differences betweeb Db2/Derby and PostgreSQL
+ // Need to account for the syntax differences between Derby and PostgreSQL
// DB2: ALTER TABLE tbl DROP FOREIGN KEY fkConstraintName
// PostgreSQL: ALTER TABLE tbl DROP CONSTRAINT fkConstraintName
for (String constraintName : constraintNames) {
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/GetSequenceNextValueDAO.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/GetSequenceNextValueDAO.java
index a3d3fe306cb..d9da4ce8f81 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/GetSequenceNextValueDAO.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/GetSequenceNextValueDAO.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2020
+ * (C) Copyright IBM Corp. 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -45,7 +45,7 @@ public GetSequenceNextValueDAO(String schemaName, String sequenceName) {
public Long run(IDatabaseTranslator translator, Connection c) {
// you can't get the current value before calling next value in a given session,
// so we simply bump the sequence number. The translator is used to support
- // our different database flavors (e.g. Derby, DB2 and PostgreSQL)
+ // our different database flavors (e.g. Derby and PostgreSQL)
final String SQL = translator.selectSequenceNextValue(schemaName, sequenceName);
try (PreparedStatement ps = c.prepareStatement(SQL)) {
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/PlainSchemaAdapter.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/PlainSchemaAdapter.java
index 189a50cbd38..85ec97f9251 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/PlainSchemaAdapter.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/common/PlainSchemaAdapter.java
@@ -28,7 +28,7 @@
/**
* Adapter to build the plain version of the FHIR schema. Uses
* the IDatabaseAdapter to hide the specifics of a particular
- * database flavor (like Db2, PostgreSQL, Derby etc).
+ * database flavor (like PostgreSQL, Derby etc).
*/
public class PlainSchemaAdapter implements ISchemaAdapter {
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/derby/DerbyAdapter.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/derby/DerbyAdapter.java
index 15f1058642a..406baaed95a 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/derby/DerbyAdapter.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/derby/DerbyAdapter.java
@@ -284,8 +284,7 @@ public String varbinaryClause(int size) {
@Override
public String blobClause(long size, int inlineSize) {
- // Derby doesn't support the INLINE feature (which greatly helps with
- // performance on DB2)
+ // Derby doesn't support the INLINE feature
return "BLOB(" + size + ")";
}
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/BaseObject.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/BaseObject.java
index 709518f2d27..df0a02934d1 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/BaseObject.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/BaseObject.java
@@ -206,9 +206,9 @@ public ITaskGroup collect(final ITaskCollector tc, final ISchemaAdapter target,
@Override
public void applyTx(ISchemaAdapter target, SchemaApplyContext context, ITransactionProvider tp, IVersionHistoryService vhs) {
// Wrap the apply operation in its own transaction, as this is likely
- // being executed from a thread-pool. DB2 has some issues with deadlocks
- // on its catalog tables (SQLCODE=-911, SQLSTATE=40001, SQLERRMC=2) when
- // applying schema changes in parallel, so we need a little retry loop.
+ // being executed from a thread-pool. The retry loop is required to
+ // cover any deadlocks we might encounter caused by issuing DDL in
+ // parallel.
int remainingAttempts = 10;
while (remainingAttempts-- > 0) {
try (ITransaction tx = tp.getTransaction()) {
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/DatabaseObject.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/DatabaseObject.java
index 3c2707443ac..d560d6b2f25 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/DatabaseObject.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/DatabaseObject.java
@@ -144,9 +144,7 @@ public String toString() {
@Override
public void applyTx(ISchemaAdapter target, SchemaApplyContext context, ITransactionProvider tp, IVersionHistoryService vhs) {
// Wrap the apply operation in its own transaction, as this is likely
- // being executed from a thread-pool. DB2 has some issues with deadlocks
- // on its catalog tables (SQLCODE=-911, SQLSTATE=40001, SQLERRMC=2) when
- // applying schema changes in parallel, so we need a little retry loop.
+ // being executed from a thread-pool.
int remainingAttempts = 10;
while (remainingAttempts-- > 0) {
try (ITransaction tx = tp.getTransaction()) {
diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/Table.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/Table.java
index 9c4428b486d..e0ea950803e 100644
--- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/Table.java
+++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/model/Table.java
@@ -41,9 +41,6 @@ public class Table extends BaseObject {
// All the FK constraints used by this table
private final List fkConstraints = new ArrayList<>();
- // enable access control
- private final SessionVariableDef accessControlVar;
-
private final Tablespace tablespace;
// The column to use when making this table multi-tenant (if supported by the the target)
@@ -90,7 +87,7 @@ public class Table extends BaseObject {
public Table(String schemaName, String name, int version, String tenantColumnName,
Collection columns, PrimaryKeyDef pk,
IdentityDef identity, Collection indexes, Collection fkConstraints,
- SessionVariableDef accessControlVar, Tablespace tablespace, List dependencies, Map tags,
+ Tablespace tablespace, List dependencies, Map tags,
Collection privileges, List migrations, List withs, List checkConstraints,
DistributionType distributionType, String distributionColumnName, boolean create) {
super(schemaName, name, DatabaseObjectType.TABLE, version, migrations);
@@ -100,7 +97,6 @@ public Table(String schemaName, String name, int version, String tenantColumnNam
this.identity = identity;
this.indexes.addAll(indexes);
this.fkConstraints.addAll(fkConstraints);
- this.accessControlVar = accessControlVar;
this.tablespace = tablespace;
this.withs = withs;
this.checkConstraints.addAll(checkConstraints);
@@ -179,18 +175,6 @@ public void apply(ISchemaAdapter target, SchemaApplyContext context) {
fkc.apply(getSchemaName(), getObjectName(), this.tenantColumnName, target, this.distributionType);
}
}
-
- // Apply tenant access control if required
- if (this.accessControlVar != null) {
- // The accessControlVar represents a DB2 session variable. Programs must set this value
- // for the current tenant when executing any SQL (both reads and writes) on
- // tables with this access control enabled
- final String variableName = accessControlVar.getQualifiedName();
- final String tenantPermission = getObjectName() + "_TENANT";
- final String predicate = getQualifiedName() + ".MT_ID = " + variableName;
- target.createOrReplacePermission(getSchemaName(), tenantPermission, getObjectName(), predicate);
- target.activateRowAccessControl(getSchemaName(), getObjectName());
- }
}
@Override
@@ -201,17 +185,6 @@ public void apply(Integer priorVersion, ISchemaAdapter target, SchemaApplyContex
for (Migration step : migrations) {
step.migrateFrom(priorVersion).stream().forEachOrdered(target::runStatement);
}
- // Re-apply tenant access control if required
- if (this.accessControlVar != null && this.create) {
- // The accessControlVar represents a DB2 session variable. Programs must set this value
- // for the current tenant when executing any SQL (both reads and writes) on
- // tables with this access control enabled
- final String variableName = accessControlVar.getQualifiedName();
- final String tenantPermission = getObjectName() + "_TENANT";
- final String predicate = getQualifiedName() + ".MT_ID = " + variableName;
- target.createOrReplacePermission(getSchemaName(), tenantPermission, getObjectName(), predicate);
- target.activateRowAccessControl(getSchemaName(), getObjectName());
- }
}
}
@@ -225,12 +198,6 @@ protected void grantGroupPrivileges(ISchemaAdapter target, Set group,
@Override
public void drop(ISchemaAdapter target) {
- if (this.accessControlVar != null) {
- target.deactivateRowAccessControl(getSchemaName(), getObjectName());
-
- final String tenantPermission = getObjectName() + "_TENANT";
- target.dropPermission(getSchemaName(), tenantPermission);
- }
target.dropTable(getSchemaName(), getObjectName());
}
@@ -280,9 +247,6 @@ public static class Builder extends VersionedSchemaObject {
// The tablespace to use for this table [optional]
private Tablespace tablespace;
- // The variable to use for access control (when set)
- private SessionVariableDef accessControlVar;
-
// Privileges to be granted on this table
private List privileges = new ArrayList<>();
@@ -864,7 +828,7 @@ public Table build(IDataModel dataModel) {
// Our schema objects are immutable by design, so all initialization takes place
// through the constructor
return new Table(getSchemaName(), getObjectName(), this.version, this.tenantColumnName, buildColumns(), this.primaryKey, this.identity, this.indexes.values(),
- enabledFKConstraints, this.accessControlVar, this.tablespace, allDependencies, tags, privileges, migrations, withs, checkConstraints, distributionType,
+ enabledFKConstraints, this.tablespace, allDependencies, tags, privileges, migrations, withs, checkConstraints, distributionType,
distributionColumnName, create);
}
@@ -932,18 +896,6 @@ protected List buildColumns() {
return result;
}
- /**
- * Switch on access control for this table
- */
- public Builder enableAccessControl(SessionVariableDef var) {
- this.accessControlVar = var;
-
- // Add the session variable as a dependency for this table
- this.dependencies.add(var);
-
- return this;
- }
-
/**
* @param tagName
* @param tagValue
@@ -970,9 +922,7 @@ public Builder addPrivileges(Collection gps) {
}
/**
- * Setter to configure this table for multitenancy. Multitenancy support depends on the target
- * ...which in this case means DB2 supports it (using partitioning) but Derby does not...so for
- * Derby, we don't create the extra column or FK relationships back to the TENANTS table.
+ * Setter to configure this table for multitenancy when supported by the target database type
* @return
*/
public Builder setTenantColumnName(String name) {
diff --git a/fhir-flow/pom.xml b/fhir-flow/pom.xml
index 8ac6a9cf5f9..8df076f6e03 100644
--- a/fhir-flow/pom.xml
+++ b/fhir-flow/pom.xml
@@ -63,10 +63,6 @@
org.apache.derby
derby
-
- com.ibm.db2
- jcc
-
org.postgresql
postgresql
diff --git a/fhir-install/pom.xml b/fhir-install/pom.xml
index a546748deab..7c96024691a 100644
--- a/fhir-install/pom.xml
+++ b/fhir-install/pom.xml
@@ -47,10 +47,6 @@
org.apache.derby
derbytools
-
- com.ibm.db2
- jcc
-
org.postgresql
postgresql
diff --git a/fhir-parent/pom.xml b/fhir-parent/pom.xml
index 94dd46c61c7..395d35f66b6 100644
--- a/fhir-parent/pom.xml
+++ b/fhir-parent/pom.xml
@@ -172,11 +172,6 @@
22.0.0.4
zip
-
- com.ibm.db2
- jcc
- 11.5.7.0
-
jakarta.ws.rs
jakarta.ws.rs-api
diff --git a/fhir-persistence-blob-app/pom.xml b/fhir-persistence-blob-app/pom.xml
index 5a3150d7bba..a06c6792b90 100644
--- a/fhir-persistence-blob-app/pom.xml
+++ b/fhir-persistence-blob-app/pom.xml
@@ -113,11 +113,6 @@
derbytools
true
-
- com.ibm.db2
- jcc
- true
-
org.postgresql
postgresql
diff --git a/fhir-persistence-cassandra-app/pom.xml b/fhir-persistence-cassandra-app/pom.xml
index 81eee7bcdb1..c5fcb62b6e1 100644
--- a/fhir-persistence-cassandra-app/pom.xml
+++ b/fhir-persistence-cassandra-app/pom.xml
@@ -102,11 +102,6 @@
derbytools
true
-
- com.ibm.db2
- jcc
- true
-
org.postgresql
postgresql
diff --git a/fhir-persistence-jdbc/pom.xml b/fhir-persistence-jdbc/pom.xml
index e7ce084fc90..b0941c2207c 100644
--- a/fhir-persistence-jdbc/pom.xml
+++ b/fhir-persistence-jdbc/pom.xml
@@ -28,11 +28,6 @@
derby
provided
-
- com.ibm.db2
- jcc
- provided
-
org.postgresql
postgresql
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRPersistenceJDBCCache.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRPersistenceJDBCCache.java
index 613310ebf09..bf8050681dd 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRPersistenceJDBCCache.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRPersistenceJDBCCache.java
@@ -1,12 +1,12 @@
/*
- * (C) Copyright IBM Corp. 2020, 2021
+ * (C) Copyright IBM Corp. 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
package com.ibm.fhir.persistence.jdbc;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesCache;
import com.ibm.fhir.persistence.jdbc.dao.api.IIdNameCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
import com.ibm.fhir.persistence.jdbc.dao.api.INameIdCache;
@@ -30,10 +30,10 @@ public interface FHIRPersistenceJDBCCache {
void clearNeedToPrefill();
/**
- * Getter for the common token values cache
+ * Getter for the common values cache
* @return
*/
- ICommonTokenValuesCache getResourceReferenceCache();
+ ICommonValuesCache getCommonValuesCache();
/**
* Getter for the cache handling lookups for logical_resource_id values
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRResourceDAOFactory.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRResourceDAOFactory.java
index 64422d45e21..ac7591a141f 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRResourceDAOFactory.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRResourceDAOFactory.java
@@ -11,26 +11,22 @@
import javax.transaction.TransactionSynchronizationRegistry;
import com.ibm.fhir.database.utils.api.IDatabaseTranslator;
-import com.ibm.fhir.database.utils.citus.CitusTranslator;
import com.ibm.fhir.database.utils.common.DatabaseTranslatorFactory;
import com.ibm.fhir.database.utils.derby.DerbyTranslator;
import com.ibm.fhir.database.utils.postgres.PostgresTranslator;
import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
import com.ibm.fhir.persistence.jdbc.citus.CitusResourceDAO;
-import com.ibm.fhir.persistence.jdbc.citus.CitusResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
import com.ibm.fhir.persistence.jdbc.dao.ReindexResourceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.FhirSequenceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ResourceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceReferenceDAO;
+import com.ibm.fhir.persistence.jdbc.dao.impl.CommonValuesDAO;
+import com.ibm.fhir.persistence.jdbc.derby.DerbyCommonValuesDAO;
import com.ibm.fhir.persistence.jdbc.derby.DerbyResourceDAO;
-import com.ibm.fhir.persistence.jdbc.derby.DerbyResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.impl.ParameterTransactionDataImpl;
import com.ibm.fhir.persistence.jdbc.postgres.PostgresReindexResourceDAO;
import com.ibm.fhir.persistence.jdbc.postgres.PostgresResourceDAO;
-import com.ibm.fhir.persistence.jdbc.postgres.PostgresResourceReferenceDAO;
/**
* Factory for constructing ResourceDAO implementations specific to a
@@ -57,16 +53,15 @@ public static ResourceDAO getResourceDAO(Connection connection, String adminSche
throws IllegalArgumentException, FHIRPersistenceException {
final ResourceDAO resourceDAO;
- IResourceReferenceDAO rrd = getResourceReferenceDAO(connection, adminSchemaName, schemaName, flavor, cache);
switch (flavor.getType()) {
case DERBY:
- resourceDAO = new DerbyResourceDAO(connection, schemaName, flavor, trxSynchRegistry, cache, rrd, ptdi);
+ resourceDAO = new DerbyResourceDAO(connection, schemaName, flavor, trxSynchRegistry, cache, ptdi);
break;
case POSTGRESQL:
- resourceDAO = new PostgresResourceDAO(connection, schemaName, flavor, trxSynchRegistry, cache, rrd, ptdi, shardKey);
+ resourceDAO = new PostgresResourceDAO(connection, schemaName, flavor, trxSynchRegistry, cache, ptdi, shardKey);
break;
case CITUS:
- resourceDAO = new CitusResourceDAO(connection, schemaName, flavor, trxSynchRegistry, cache, rrd, ptdi, shardKey);
+ resourceDAO = new CitusResourceDAO(connection, schemaName, flavor, trxSynchRegistry, cache, ptdi, shardKey);
break;
default:
throw new IllegalArgumentException("Unsupported database type: " + flavor.getType().name());
@@ -89,17 +84,16 @@ public static ReindexResourceDAO getReindexResourceDAO(Connection connection, St
final IDatabaseTranslator translator;
final ReindexResourceDAO result;
- IResourceReferenceDAO rrd = getResourceReferenceDAO(connection, adminSchemaName, schemaName, flavor, cache);
switch (flavor.getType()) {
case DERBY:
translator = new DerbyTranslator();
- result = new ReindexResourceDAO(connection, translator, parameterDao, schemaName, flavor, cache, rrd);
+ result = new ReindexResourceDAO(connection, translator, parameterDao, schemaName, flavor, cache);
break;
case POSTGRESQL:
case CITUS:
translator = new PostgresTranslator();
- result = new PostgresReindexResourceDAO(connection, translator, parameterDao, schemaName, flavor, cache, rrd);
+ result = new PostgresReindexResourceDAO(connection, translator, parameterDao, schemaName, flavor, cache);
break;
default:
throw new IllegalArgumentException("Unsupported database type: " + flavor.getType().name());
@@ -133,16 +127,15 @@ public static ResourceDAO getResourceDAO(Connection connection, String adminSche
FHIRPersistenceJDBCCache cache, Short shardKey) throws IllegalArgumentException, FHIRPersistenceException {
final ResourceDAO resourceDAO;
- IResourceReferenceDAO rrd = getResourceReferenceDAO(connection, adminSchemaName, schemaName, flavor, cache);
switch (flavor.getType()) {
case DERBY:
- resourceDAO = new DerbyResourceDAO(connection, schemaName, flavor, cache, rrd);
+ resourceDAO = new DerbyResourceDAO(connection, schemaName, flavor, cache);
break;
case POSTGRESQL:
- resourceDAO = new PostgresResourceDAO(connection, schemaName, flavor, cache, rrd, shardKey);
+ resourceDAO = new PostgresResourceDAO(connection, schemaName, flavor, cache, shardKey);
break;
case CITUS:
- resourceDAO = new CitusResourceDAO(connection, schemaName, flavor, cache, rrd, shardKey);
+ resourceDAO = new CitusResourceDAO(connection, schemaName, flavor, cache, shardKey);
break;
default:
throw new IllegalArgumentException("Unsupported database type: " + flavor.getType().name());
@@ -151,27 +144,23 @@ public static ResourceDAO getResourceDAO(Connection connection, String adminSche
}
/**
- * Get a standalone DAO to handle the inserts of the common token values and
- * resource token refs just prior to the transaction commit
+ * Get a standalone DAO to handle the fetch of records from common_token_values
+ * and common_canonical_values
* @param connection
* @param schemaName
* @param flavor
- * @param cache
* @return
*/
- public static ResourceReferenceDAO getResourceReferenceDAO(Connection connection, String adminSchemaName, String schemaName, FHIRDbFlavor flavor,
- FHIRPersistenceJDBCCache cache) {
+ public static CommonValuesDAO getCommonValuesDAO(Connection connection, String adminSchemaName, String schemaName, FHIRDbFlavor flavor) {
- final ResourceReferenceDAO rrd;
+ final CommonValuesDAO rrd;
switch (flavor.getType()) {
case DERBY:
- rrd = new DerbyResourceReferenceDAO(new DerbyTranslator(), connection, schemaName, cache.getResourceReferenceCache(), cache.getParameterNameCache(), cache.getLogicalResourceIdentCache());
+ rrd = new DerbyCommonValuesDAO(new DerbyTranslator(), connection, schemaName);
break;
case POSTGRESQL:
- rrd = new PostgresResourceReferenceDAO(new PostgresTranslator(), connection, schemaName, cache.getResourceReferenceCache(), cache.getParameterNameCache(), cache.getLogicalResourceIdentCache());
- break;
case CITUS:
- rrd = new CitusResourceReferenceDAO(new CitusTranslator(), connection, schemaName, cache.getResourceReferenceCache(), cache.getParameterNameCache(), cache.getLogicalResourceIdentCache());
+ rrd = new CommonValuesDAO(new PostgresTranslator(), connection, schemaName);
break;
default:
throw new IllegalArgumentException("Unsupported database type: " + flavor.getType().name());
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/CommonTokenValuesCacheImpl.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/CommonValuesCacheImpl.java
similarity index 97%
rename from fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/CommonTokenValuesCacheImpl.java
rename to fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/CommonValuesCacheImpl.java
index ff328e24c18..e1e4986a3ff 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/CommonTokenValuesCacheImpl.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/CommonValuesCacheImpl.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2020, 2021
+ * (C) Copyright IBM Corp. 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -14,7 +14,7 @@
import java.util.Map;
import java.util.Set;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesCache;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceProfileRec;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceTokenValueRec;
import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValue;
@@ -22,9 +22,9 @@
/**
* Implementation of a cache used for lookups of entities related
- * to local and external resource references
+ * to tokens, canonicals and code systems
*/
-public class CommonTokenValuesCacheImpl implements ICommonTokenValuesCache {
+public class CommonValuesCacheImpl implements ICommonValuesCache {
// We use LinkedHashMap for the local map because we also need to maintain order
// of insertion to make sure we have correct LRU behavior when updating the shared cache
@@ -50,7 +50,7 @@ public class CommonTokenValuesCacheImpl implements ICommonTokenValuesCache {
* @param tokenValueCacheSize
* @param canonicalCacheSize
*/
- public CommonTokenValuesCacheImpl(int codeSystemCacheSize, int tokenValueCacheSize, int canonicalCacheSize) {
+ public CommonValuesCacheImpl(int codeSystemCacheSize, int tokenValueCacheSize, int canonicalCacheSize) {
// LRU cache for quick lookup of code-systems and token-values
codeSystemsCache = new LRUCache<>(codeSystemCacheSize);
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheImpl.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheImpl.java
index 5dfe120aa76..0dbcb2f69f8 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheImpl.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheImpl.java
@@ -10,7 +10,7 @@
import java.util.logging.Logger;
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesCache;
import com.ibm.fhir.persistence.jdbc.dao.api.IIdNameCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
import com.ibm.fhir.persistence.jdbc.dao.api.INameIdCache;
@@ -27,7 +27,7 @@ public class FHIRPersistenceJDBCCacheImpl implements FHIRPersistenceJDBCCache {
private final INameIdCache parameterNameCache;
- private final ICommonTokenValuesCache resourceReferenceCache;
+ private final ICommonValuesCache commonValuesCache;
private final ILogicalResourceIdentCache logicalResourceIdentCache;
@@ -39,23 +39,23 @@ public class FHIRPersistenceJDBCCacheImpl implements FHIRPersistenceJDBCCache {
* @param resourceTypeCache
* @param resourceTypeNameCache
* @param parameterNameCache
- * @param resourceReferenceCache
+ * @param commonValuesCache
* @param logicalResourceIdentCache
*/
public FHIRPersistenceJDBCCacheImpl(INameIdCache resourceTypeCache, IIdNameCache resourceTypeNameCache,
- INameIdCache parameterNameCache, ICommonTokenValuesCache resourceReferenceCache, ILogicalResourceIdentCache logicalResourceIdentCache) {
+ INameIdCache parameterNameCache, ICommonValuesCache commonValuesCache, ILogicalResourceIdentCache logicalResourceIdentCache) {
this.resourceTypeCache = resourceTypeCache;
this.resourceTypeNameCache = resourceTypeNameCache;
this.parameterNameCache = parameterNameCache;
- this.resourceReferenceCache = resourceReferenceCache;
+ this.commonValuesCache = commonValuesCache;
this.logicalResourceIdentCache = logicalResourceIdentCache;
}
/**
* @return the resourceReferenceCache
*/
- public ICommonTokenValuesCache getResourceReferenceCache() {
- return resourceReferenceCache;
+ public ICommonValuesCache getCommonValuesCache() {
+ return commonValuesCache;
}
/**
@@ -92,7 +92,7 @@ public void transactionCommitted() {
resourceTypeCache.updateSharedMaps();
resourceTypeNameCache.updateSharedMaps();
parameterNameCache.updateSharedMaps();
- resourceReferenceCache.updateSharedMaps();
+ commonValuesCache.updateSharedMaps();
logicalResourceIdentCache.updateSharedMaps();
}
@@ -102,7 +102,7 @@ public void transactionRolledBack() {
resourceTypeCache.clearLocalMaps();
resourceTypeNameCache.clearLocalMaps();
parameterNameCache.clearLocalMaps();
- resourceReferenceCache.clearLocalMaps();
+ commonValuesCache.clearLocalMaps();
logicalResourceIdentCache.clearLocalMaps();
}
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheUtil.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheUtil.java
index 111006639d3..b041d06f964 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheUtil.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheUtil.java
@@ -11,7 +11,7 @@
import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ResourceDAO;
@@ -26,7 +26,7 @@ public class FHIRPersistenceJDBCCacheUtil {
* @return
*/
public static FHIRPersistenceJDBCCache create(int codeSystemCacheSize, int tokenValueCacheSize, int canonicalCacheSize, int logicalResourceIdentCacheSize) {
- ICommonTokenValuesCache rrc = new CommonTokenValuesCacheImpl(codeSystemCacheSize, tokenValueCacheSize, canonicalCacheSize);
+ ICommonValuesCache rrc = new CommonValuesCacheImpl(codeSystemCacheSize, tokenValueCacheSize, canonicalCacheSize);
ILogicalResourceIdentCache lric = new LogicalResourceIdentCacheImpl(logicalResourceIdentCacheSize);
return new FHIRPersistenceJDBCCacheImpl(new NameIdCache(), new IdNameCache(), new NameIdCache(), rrc, lric);
}
@@ -47,6 +47,6 @@ public static void prefill(ResourceDAO resourceDAO, ParameterDAO parameterDAO, F
cache.getParameterNameCache().prefill(parameterNames);
Map codeSystems = parameterDAO.readAllCodeSystems();
- cache.getResourceReferenceCache().prefillCodeSystems(codeSystems);
+ cache.getCommonValuesCache().prefillCodeSystems(codeSystems);
}
}
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/citus/CitusResourceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/citus/CitusResourceDAO.java
index 9371352806b..0c0e64e1f40 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/citus/CitusResourceDAO.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/citus/CitusResourceDAO.java
@@ -31,7 +31,6 @@
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
import com.ibm.fhir.persistence.jdbc.dao.api.FHIRDAOConstants;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValue;
import com.ibm.fhir.persistence.jdbc.dto.Resource;
@@ -84,8 +83,8 @@ public class CitusResourceDAO extends PostgresResourceDAO {
* @param cache
* @param rrd
*/
- public CitusResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd, Short shardKey) {
- super(connection, schemaName, flavor, cache, rrd, shardKey);
+ public CitusResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, Short shardKey) {
+ super(connection, schemaName, flavor, cache, shardKey);
}
/**
@@ -99,9 +98,9 @@ public CitusResourceDAO(Connection connection, String schemaName, FHIRDbFlavor f
* @param rrd
* @param ptdi
*/
- public CitusResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd,
+ public CitusResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache,
ParameterTransactionDataImpl ptdi, Short shardKey) {
- super(connection, schemaName, flavor, trxSynchRegistry, cache, rrd, ptdi, shardKey);
+ super(connection, schemaName, flavor, trxSynchRegistry, cache, ptdi, shardKey);
}
/**
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/citus/CitusResourceReferenceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/citus/CitusResourceReferenceDAO.java
deleted file mode 100644
index 841d40ec608..00000000000
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/citus/CitusResourceReferenceDAO.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * (C) Copyright IBM Corp. 2022
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-package com.ibm.fhir.persistence.jdbc.citus;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import com.ibm.fhir.database.utils.api.IDatabaseTranslator;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.INameIdCache;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceReferenceDAO;
-import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValue;
-import com.ibm.fhir.persistence.jdbc.postgres.PostgresResourceReferenceDAO;
-import com.ibm.fhir.persistence.params.api.ParamSchemaConstants;
-
-/**
- * Citus-specific extension of the {@link ResourceReferenceDAO} to work around
- * some Citus distribution limitations
- */
-public class CitusResourceReferenceDAO extends PostgresResourceReferenceDAO {
- private static final Logger logger = Logger.getLogger(CitusResourceReferenceDAO.class.getName());
-
- /**
- * Public constructor
- *
- * @param t
- * @param c
- * @param schemaName
- * @param cache
- * @param parameterNameCache
- * @param logicalResourceIdentCache
- */
- public CitusResourceReferenceDAO(IDatabaseTranslator t, Connection c, String schemaName, ICommonTokenValuesCache cache, INameIdCache parameterNameCache,
- ILogicalResourceIdentCache logicalResourceIdentCache) {
- super(t, c, schemaName, cache, parameterNameCache, logicalResourceIdentCache);
- }
-
- @Override
- public void doCodeSystemsUpsert(String paramList, Collection sortedSystemNames) {
- // If we try using the PostgreSQL insert-as-select variant, Citus
- // rejects the statement, so instead we simplify things by grabbing
- // the id values from the sequence first, then simply submit as a
- // batch.
- List sequenceValues = new ArrayList<>(sortedSystemNames.size());
- final String nextVal = getTranslator().nextValue(getSchemaName(), "fhir_ref_sequence");
- final String SELECT = ""
- + "SELECT " + nextVal
- + " FROM generate_series(1, ?)";
- try (PreparedStatement ps = getConnection().prepareStatement(SELECT)) {
- ps.setInt(1, sortedSystemNames.size());
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- sequenceValues.add(rs.getInt(1));
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, SELECT, x);
- throw getTranslator().translate(x);
- }
-
- final String INSERT = ""
- + " INSERT INTO code_systems (code_system_id, code_system_name) "
- + " VALUES (?, ?) "
- + " ON CONFLICT DO NOTHING ";
-
- try (PreparedStatement ps = getConnection().prepareStatement(INSERT)) {
- int index=0;
- for (String csn: sortedSystemNames) {
- ps.setInt(1, sequenceValues.get(index++));
- ps.setString(2, csn);
- ps.addBatch();
- }
- ps.executeBatch();
- } catch (SQLException x) {
- logger.log(Level.SEVERE, INSERT, x);
- throw getTranslator().translate(x);
- }
- }
-
- @Override
- public void doCanonicalValuesUpsert(String paramList, Collection sortedURLS) {
- // Because of how PostgreSQL MVCC implementation, the insert from negative outer
- // join pattern doesn't work...you still hit conflicts. The PostgreSQL pattern
- // for upsert is ON CONFLICT DO NOTHING, which is what we use here:
- List sequenceValues = new ArrayList<>(sortedURLS.size());
- final String nextVal = getTranslator().nextValue(getSchemaName(), ParamSchemaConstants.CANONICAL_ID_SEQ);
- final String SELECT = ""
- + "SELECT " + nextVal
- + " FROM generate_series(1, ?)";
- try (PreparedStatement ps = getConnection().prepareStatement(SELECT)) {
- ps.setInt(1, sortedURLS.size());
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- sequenceValues.add(rs.getInt(1));
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, SELECT, x);
- throw getTranslator().translate(x);
- }
-
- final String INSERT = ""
- + " INSERT INTO common_canonical_values (canonical_id, url) "
- + " VALUES (?, ?) "
- + " ON CONFLICT DO NOTHING ";
-
- try (PreparedStatement ps = getConnection().prepareStatement(INSERT)) {
- int index=0;
- for (String csn: sortedURLS) {
- ps.setInt(1, sequenceValues.get(index++));
- ps.setString(2, csn);
- ps.addBatch();
- }
- ps.executeBatch();
- } catch (SQLException x) {
- logger.log(Level.SEVERE, INSERT, x);
- throw getTranslator().translate(x);
- }
- }
-
- @Override
- protected void doCommonTokenValuesUpsert(String paramList, Collection sortedTokenValues) {
- // In Citus, we can no longer use a generated id column, so we have to use
- // values from fhir_sequence and insert the values directly. For Citus,
- // COMMON_TOKEN_VALUES is distributed as a REFERENCE table, meaning
- // records are copied to each node in a distributed transaction.
- logger.fine("Inserting " + sortedTokenValues.size() + " values into COMMON_TOKEN_VALUES");
- final String nextVal = getTranslator().nextValue(getSchemaName(), "fhir_sequence");
-
- final String INSERT = ""
- + " INSERT INTO common_token_values (common_token_value_id, token_value, code_system_id) "
- + " VALUES (" + nextVal + ", ?, ?) "
- + " ON CONFLICT DO NOTHING ";
-
- try (PreparedStatement ps = getConnection().prepareStatement(INSERT)) {
- for (CommonTokenValue ctv: sortedTokenValues) {
- ps.setString(1, ctv.getTokenValue());
- ps.setInt(2, ctv.getCodeSystemId());
- ps.addBatch();
- }
- ps.executeBatch();
- } catch (SQLException x) {
- logger.log(Level.SEVERE, INSERT, x);
- throw getTranslator().translate(x);
- }
- }
-}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/EraseResourceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/EraseResourceDAO.java
index eb339d5a24c..0c860478b46 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/EraseResourceDAO.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/EraseResourceDAO.java
@@ -25,9 +25,11 @@
import com.ibm.fhir.persistence.jdbc.FHIRResourceDAOFactory;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
import com.ibm.fhir.persistence.jdbc.dao.api.FhirSequenceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
+import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceDAOImpl;
import com.ibm.fhir.persistence.jdbc.dto.ErasedResourceRec;
+import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValue;
+import com.ibm.fhir.persistence.jdbc.dto.Resource;
import com.ibm.fhir.persistence.jdbc.util.ParameterTableSupport;
/**
@@ -72,11 +74,9 @@ public class EraseResourceDAO extends ResourceDAOImpl {
* @param schemaName
* @param flavor
* @param cache
- * @param rrd
*/
- public EraseResourceDAO(Connection conn, String adminSchemaName, IDatabaseTranslator translator, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache,
- IResourceReferenceDAO rrd) {
- super(conn, schemaName, flavor, cache, rrd);
+ public EraseResourceDAO(Connection conn, String adminSchemaName, IDatabaseTranslator translator, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache) {
+ super(conn, schemaName, flavor, cache);
this.adminSchemaName = adminSchemaName;
this.translator = translator;
}
@@ -425,4 +425,11 @@ public void clearErasedResourcesInGroup(long erasedResourceGroupId) {
throw translator.translate(x);
}
}
+
+ @Override
+ public Resource insert(Resource resource, List parameters, String parameterHashB64, ParameterDAO parameterDao, Integer ifNoneMatch)
+ throws FHIRPersistenceException {
+ // NOP because ERASE doesn't need to insert the resource
+ return resource;
+ }
}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/ReindexResourceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/ReindexResourceDAO.java
index a140d71972a..df54b83e9af 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/ReindexResourceDAO.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/ReindexResourceDAO.java
@@ -23,13 +23,14 @@
import com.ibm.fhir.database.utils.api.IDatabaseTranslator;
import com.ibm.fhir.database.utils.common.CalendarHelper;
import com.ibm.fhir.database.utils.common.PreparedStatementHelper;
+import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ResourceIndexRecord;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceDAOImpl;
import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValue;
+import com.ibm.fhir.persistence.jdbc.dto.Resource;
import com.ibm.fhir.persistence.jdbc.impl.ParameterTransactionDataImpl;
import com.ibm.fhir.persistence.jdbc.util.ParameterTableSupport;
@@ -101,8 +102,8 @@ public class ReindexResourceDAO extends ResourceDAOImpl {
* @param cache
* @param rrd
*/
- public ReindexResourceDAO(Connection connection, IDatabaseTranslator translator, ParameterDAO parameterDao, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd) {
- super(connection, schemaName, flavor, cache, rrd);
+ public ReindexResourceDAO(Connection connection, IDatabaseTranslator translator, ParameterDAO parameterDao, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache) {
+ super(connection, schemaName, flavor, cache);
this.translator = translator;
this.parameterDao = parameterDao;
}
@@ -118,8 +119,8 @@ public ReindexResourceDAO(Connection connection, IDatabaseTranslator translator,
* @param cache
* @param rrd
*/
- public ReindexResourceDAO(Connection connection, IDatabaseTranslator translator, ParameterDAO parameterDao, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd, ParameterTransactionDataImpl ptdi) {
- super(connection, schemaName, flavor, trxSynchRegistry, cache, rrd, ptdi);
+ public ReindexResourceDAO(Connection connection, IDatabaseTranslator translator, ParameterDAO parameterDao, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache, ParameterTransactionDataImpl ptdi) {
+ super(connection, schemaName, flavor, trxSynchRegistry, cache, ptdi);
this.translator = translator;
this.parameterDao = parameterDao;
@@ -439,4 +440,11 @@ protected void updateParameterHash(Connection conn, long logicalResourceId, Stri
throw translator.translate(x);
}
}
+
+ @Override
+ public Resource insert(Resource resource, List parameters, String parameterHashB64, ParameterDAO parameterDao, Integer ifNoneMatch)
+ throws FHIRPersistenceException {
+ // NOP because for reindex we only insert the parameters - the current resource is not changed
+ return resource;
+ }
}
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/ICommonTokenValuesCache.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/ICommonValuesCache.java
similarity index 98%
rename from fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/ICommonTokenValuesCache.java
rename to fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/ICommonValuesCache.java
index e530a91572d..152bfb5f688 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/ICommonTokenValuesCache.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/ICommonValuesCache.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2020, 2021
+ * (C) Copyright IBM Corp. 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -25,7 +25,7 @@
* likely to be too many unique token-values to cache, so these need
* to be retrieved on-demand and managed as LRU.
*/
-public interface ICommonTokenValuesCache {
+public interface ICommonValuesCache {
/**
* Take the records we've touched in the current thread and update the
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/ICommonValuesDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/ICommonValuesDAO.java
new file mode 100644
index 00000000000..55bb724ccb6
--- /dev/null
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/ICommonValuesDAO.java
@@ -0,0 +1,52 @@
+/*
+ * (C) Copyright IBM Corp. 2020, 2022
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package com.ibm.fhir.persistence.jdbc.dao.api;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+
+import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValue;
+import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValueResult;
+
+/**
+ * Contract for DAO implementations handling persistence of
+ * resource references (and token parameters) with the
+ * normalized schema introduced in issue 1366.
+ */
+public interface ICommonValuesDAO {
+
+ /**
+ * Find the database id for the given token value and system
+ * @param codeSystem
+ * @param tokenValue
+ * @return the matching id from common_token_values.common_token_value_id or null if not found
+ */
+ CommonTokenValueResult readCommonTokenValueId(String codeSystem, String tokenValue);
+
+ /**
+ * Find database ids for a set of common token values
+ * @param tokenValues
+ * @return a non-null, possibly-empty set of ids from common_token_values.common_token_value_id;
+ * CommonTokenValues with no corresponding record will be omitted from the set
+ */
+ Set readCommonTokenValueIds(Collection tokenValues);
+
+ /**
+ * Fetch the list of matching common_token_value_id records for the given tokenValue.
+ * @param tokenValue
+ * @return
+ */
+ List readCommonTokenValueIdList(String tokenValue);
+
+ /**
+ * Read the database canonical_id for the given value
+ * @param canonicalValue
+ * @return
+ */
+ Long readCanonicalId(String canonicalValue);
+}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/IResourceReferenceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/IResourceReferenceDAO.java
deleted file mode 100644
index e8e9da31158..00000000000
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/api/IResourceReferenceDAO.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * (C) Copyright IBM Corp. 2020, 2021
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-package com.ibm.fhir.persistence.jdbc.dao.api;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Set;
-
-import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceProfileRec;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceReferenceValueRec;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceTokenValueRec;
-import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValue;
-import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValueResult;
-
-/**
- * Contract for DAO implementations handling persistence of
- * resource references (and token parameters) with the
- * normalized schema introduced in issue 1366.
- */
-public interface IResourceReferenceDAO {
-
- /**
- * Get the cache used by the DAO
- * @return
- */
- ICommonTokenValuesCache getResourceReferenceCache();
-
- /**
- * Execute any statements with pending batch entries
- * @throws FHIRPersistenceException
- */
- void flush() throws FHIRPersistenceException;
-
- /**
- * Add TOKEN_VALUE_MAP records, creating any CODE_SYSTEMS and COMMON_TOKEN_VALUES
- * as necessary
- * @param resourceType
- * @param xrefs
- * @param refValues
- * @param profileRecs
- * @param tagRecs
- * @param securityRecs
- */
- void addNormalizedValues(String resourceType, Collection xrefs, Collection refValues, Collection profileRecs, Collection tagRecs, Collection securityRecs) throws FHIRPersistenceException;
-
- /**
- * Persist the records, which may span multiple resource types
- * @param records
- * @param referenceRecords
- * @param profileRecs
- * @param tagRecs
- * @param securityRecs
- */
- void persist(Collection records, Collection referenceRecords, Collection profileRecs, Collection tagRecs, Collection securityRecs) throws FHIRPersistenceException;
-
- /**
- * Find the database id for the given token value and system
- * @param codeSystem
- * @param tokenValue
- * @return the matching id from common_token_values.common_token_value_id or null if not found
- */
- CommonTokenValueResult readCommonTokenValueId(String codeSystem, String tokenValue);
-
- /**
- * Find database ids for a set of common token values
- * @param tokenValues
- * @return a non-null, possibly-empty set of ids from common_token_values.common_token_value_id;
- * CommonTokenValues with no corresponding record will be omitted from the set
- */
- Set readCommonTokenValueIds(Collection tokenValues);
-
- /**
- * Fetch the list of matching common_token_value_id records for the given tokenValue.
- * @param tokenValue
- * @return
- */
- List readCommonTokenValueIdList(String tokenValue);
-
- /**
- * Read the database canonical_id for the given value
- * @param canonicalValue
- * @return
- */
- Long readCanonicalId(String canonicalValue);
-}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/CommonValuesDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/CommonValuesDAO.java
new file mode 100644
index 00000000000..fe3df5956cc
--- /dev/null
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/CommonValuesDAO.java
@@ -0,0 +1,195 @@
+/*
+ * (C) Copyright IBM Corp. 2020, 2022
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package com.ibm.fhir.persistence.jdbc.dao.impl;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.ibm.fhir.database.utils.api.IDatabaseTranslator;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesDAO;
+import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValue;
+import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValueResult;
+
+/**
+ * DAO to fetch common value records normalized in common_token_values,
+ * common_canonical_values and code_systems
+ */
+public class CommonValuesDAO implements ICommonValuesDAO {
+ private static final Logger logger = Logger.getLogger(CommonValuesDAO.class.getName());
+
+ private final String schemaName;
+
+ // hold on to the connection because we use batches to improve efficiency
+ private final Connection connection;
+
+ // The translator for the type of database we are connected to
+ private final IDatabaseTranslator translator;
+
+ /**
+ * Public constructor
+ *
+ * @param t
+ * @param c
+ * @param schemaName
+ */
+ public CommonValuesDAO(IDatabaseTranslator t, Connection c, String schemaName) {
+ this.translator = t;
+ this.schemaName = schemaName;
+ this.connection = c;
+ }
+
+ /**
+ * Getter for the {@link IDatabaseTranslator} held by this DAO
+ * @return
+ */
+ protected IDatabaseTranslator getTranslator() {
+ return this.translator;
+ }
+
+ /**
+ * Getter for the {@link Connection} held by this DAO
+ * @return
+ */
+ protected Connection getConnection() {
+ return this.connection;
+ }
+
+ /**
+ * Getter for subclass access to the schemaName
+ * @return
+ */
+ protected String getSchemaName() {
+ return this.schemaName;
+ }
+
+ @Override
+ public CommonTokenValueResult readCommonTokenValueId(String codeSystem, String tokenValue) {
+ CommonTokenValueResult result;
+
+ final String SQL = ""
+ + "SELECT c.code_system_id, c.common_token_value_id "
+ + " FROM common_token_values c,"
+ + " code_systems s "
+ + " WHERE c.token_value = ? "
+ + " AND s.code_system_name = ? "
+ + " AND c.code_system_id = s.code_system_id";
+ try (PreparedStatement ps = connection.prepareStatement(SQL)) {
+ ps.setString(1, tokenValue);
+ ps.setString(2, codeSystem);
+ ResultSet rs = ps.executeQuery();
+ if (rs.next()) {
+ result = new CommonTokenValueResult(null, rs.getInt(1), rs.getLong(2));
+ } else {
+ result = null;
+ }
+ } catch (SQLException x) {
+ logger.log(Level.SEVERE, SQL, x);
+ throw translator.translate(x);
+ }
+
+ return result;
+ }
+
+ @Override
+ public Set readCommonTokenValueIds(Collection tokenValues) {
+ if (tokenValues.isEmpty()) {
+ return Collections.emptySet();
+ }
+
+ Set result = new HashSet<>();
+
+ StringBuilder select = new StringBuilder()
+ .append("SELECT c.token_value, c.code_system_id, c.common_token_value_id ")
+ .append(" FROM common_token_values c")
+ .append(" JOIN (VALUES ");
+
+ String delim = "";
+ for (CommonTokenValue tokenValue : tokenValues) {
+ select.append(delim);
+ // CodeSystemId is an int that comes from the db so we can use the literal for that.
+ // TokenValue is a string that could possibly come from the end user, so that should be a bind variable.
+ select.append("(?," + tokenValue.getCodeSystemId() + ")");
+ delim = ", ";
+ }
+
+ select.append(") AS tmp (token_value,code_system_id) ON tmp.token_value = c.token_value AND tmp.code_system_id = c.code_system_id");
+
+ try (PreparedStatement ps = connection.prepareStatement(select.toString())) {
+ Iterator iterator = tokenValues.iterator();
+ for (int i = 1; i <= tokenValues.size(); i++) {
+ CommonTokenValue tokenValue = iterator.next();
+
+ ps.setString(i, tokenValue.getTokenValue());
+ }
+
+ ResultSet rs = ps.executeQuery();
+ while (rs.next()) {
+ result.add(new CommonTokenValueResult(rs.getString(1), rs.getInt(2), rs.getLong(3)));
+ }
+ } catch (SQLException x) {
+ logger.log(Level.SEVERE, select.toString(), x);
+ throw translator.translate(x);
+ }
+
+ return result;
+ }
+
+ @Override
+ public Long readCanonicalId(String canonicalValue) {
+ Long result;
+ final String SQL = ""
+ + "SELECT canonical_id "
+ + " FROM common_canonical_values "
+ + " WHERE url = ? ";
+ try (PreparedStatement ps = connection.prepareStatement(SQL)) {
+ ps.setString(1, canonicalValue);
+ ResultSet rs = ps.executeQuery();
+ if (rs.next()) {
+ result = rs.getLong(1);
+ } else {
+ result = null;
+ }
+ } catch (SQLException x) {
+ logger.log(Level.SEVERE, SQL, x);
+ throw translator.translate(x);
+ }
+
+ return result;
+ }
+
+ @Override
+ public List readCommonTokenValueIdList(final String tokenValue) {
+ final List result = new ArrayList<>();
+ final String SQL = ""
+ + "SELECT c.common_token_value_id "
+ + " FROM common_token_values c "
+ + " WHERE c.token_value = ?";
+ try (PreparedStatement ps = connection.prepareStatement(SQL)) {
+ ps.setString(1, tokenValue);
+ ResultSet rs = ps.executeQuery();
+ while (rs.next()) {
+ result.add(rs.getLong(1));
+ }
+ } catch (SQLException x) {
+ logger.log(Level.SEVERE, SQL, x);
+ throw translator.translate(x);
+ }
+
+ return result;
+ }
+}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/JDBCIdentityCacheImpl.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/JDBCIdentityCacheImpl.java
index 3df278a6311..6b14f56084b 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/JDBCIdentityCacheImpl.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/JDBCIdentityCacheImpl.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2020, 2021
+ * (C) Copyright IBM Corp. 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -19,8 +19,8 @@
import com.ibm.fhir.persistence.exception.FHIRPersistenceDataAccessException;
import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.JDBCIdentityCache;
import com.ibm.fhir.persistence.jdbc.dao.api.LogicalResourceIdentKey;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
@@ -46,19 +46,20 @@ public class JDBCIdentityCacheImpl implements JDBCIdentityCache {
// The DAO providing access to resource types
private final ResourceDAO resourceDAO;
- private final IResourceReferenceDAO resourceReferenceDAO;
+ // The DAO providing access to the normalized values in common_token_values, common_canonical_values and code_systems
+ private final ICommonValuesDAO commonValuesDAO;
/**
* Public constructor
* @param cache
* @param parameterDAO
- * @param rrd
+ * @param commonValuesDAO
*/
- public JDBCIdentityCacheImpl(FHIRPersistenceJDBCCache cache, ResourceDAO resourceDAO, ParameterDAO parameterDAO, IResourceReferenceDAO rrd) {
+ public JDBCIdentityCacheImpl(FHIRPersistenceJDBCCache cache, ResourceDAO resourceDAO, ParameterDAO parameterDAO, ICommonValuesDAO commonValuesDAO) {
this.cache = cache;
this.resourceDAO = resourceDAO;
this.parameterDAO = parameterDAO;
- this.resourceReferenceDAO = rrd;
+ this.commonValuesDAO = commonValuesDAO;
}
@Override
@@ -106,12 +107,12 @@ public String getResourceTypeName(Integer resourceTypeId) throws FHIRPersistence
@Override
public Integer getCodeSystemId(String codeSystemName) throws FHIRPersistenceException {
- Integer result = cache.getResourceReferenceCache().getCodeSystemId(codeSystemName);
+ Integer result = cache.getCommonValuesCache().getCodeSystemId(codeSystemName);
if (result == null) {
// cache miss, so hit the database
result = parameterDAO.readOrAddCodeSystemId(codeSystemName);
if (result != null) {
- cache.getResourceReferenceCache().addCodeSystem(codeSystemName, result);
+ cache.getCommonValuesCache().addCodeSystem(codeSystemName, result);
}
}
return result;
@@ -130,11 +131,11 @@ public Integer getParameterNameId(String parameterName) throws FHIRPersistenceEx
@Override
public Long getCanonicalId(String canonicalValue) throws FHIRPersistenceException {
- Long result = cache.getResourceReferenceCache().getCanonicalId(canonicalValue);
+ Long result = cache.getCommonValuesCache().getCanonicalId(canonicalValue);
if (result == null) {
- result = resourceReferenceDAO.readCanonicalId(canonicalValue);
+ result = commonValuesDAO.readCanonicalId(canonicalValue);
if (result != null) {
- cache.getResourceReferenceCache().addCanonicalValue(canonicalValue, result);
+ cache.getCommonValuesCache().addCanonicalValue(canonicalValue, result);
} else {
result = -1L;
}
@@ -145,12 +146,12 @@ public Long getCanonicalId(String canonicalValue) throws FHIRPersistenceExceptio
@Override
public Long getCommonTokenValueId(String codeSystem, String tokenValue) {
- Long result = cache.getResourceReferenceCache().getCommonTokenValueId(codeSystem, tokenValue);
+ Long result = cache.getCommonValuesCache().getCommonTokenValueId(codeSystem, tokenValue);
if (result == null) {
if (logger.isLoggable(Level.FINE)) {
logger.fine("Cache miss. Fetching common_token_value_id from database: '" + codeSystem + "|" + tokenValue + "'");
}
- CommonTokenValueResult dto = resourceReferenceDAO.readCommonTokenValueId(codeSystem, tokenValue);
+ CommonTokenValueResult dto = commonValuesDAO.readCommonTokenValueId(codeSystem, tokenValue);
if (dto != null) {
// Value exists in the database, so we can add this to our cache. Note that we still
// choose to add it the thread-local cache - this avoids any locking. The values will
@@ -160,8 +161,8 @@ public Long getCommonTokenValueId(String codeSystem, String tokenValue) {
if (logger.isLoggable(Level.FINE)) {
logger.fine("Adding common_token_value_id to cache: '" + codeSystem + "|" + tokenValue + "' = " + result);
}
- cache.getResourceReferenceCache().addCodeSystem(codeSystem, dto.getCodeSystemId());
- cache.getResourceReferenceCache().addTokenValue(new CommonTokenValue(codeSystem, dto.getCodeSystemId(), tokenValue), result);
+ cache.getCommonValuesCache().addCodeSystem(codeSystem, dto.getCodeSystemId());
+ cache.getCommonValuesCache().addTokenValue(new CommonTokenValue(codeSystem, dto.getCodeSystemId(), tokenValue), result);
}
}
return result;
@@ -170,7 +171,7 @@ public Long getCommonTokenValueId(String codeSystem, String tokenValue) {
@Override
public Set getCommonTokenValueIds(Collection tokenValues) {
Set misses = new HashSet<>();
- Set result = cache.getResourceReferenceCache().resolveCommonTokenValueIds(tokenValues, misses);
+ Set result = cache.getCommonValuesCache().resolveCommonTokenValueIds(tokenValues, misses);
if (misses.isEmpty()) {
return result;
@@ -180,7 +181,7 @@ public Set getCommonTokenValueIds(Collection tokenValues
logger.fine("Cache miss. Fetching common_token_value_ids from database: " + misses);
}
- Set readCommonTokenValueIds = resourceReferenceDAO.readCommonTokenValueIds(misses);
+ Set readCommonTokenValueIds = commonValuesDAO.readCommonTokenValueIds(misses);
result.addAll(readCommonTokenValueIds.stream()
.map(r -> r.getCommonTokenValueId())
.collect(Collectors.toSet()));
@@ -195,7 +196,7 @@ public Set getCommonTokenValueIds(Collection tokenValues
}
// The codeSystem is not required at this stage
- cache.getResourceReferenceCache().addTokenValue(new CommonTokenValue(null, dto.getCodeSystemId(), dto.getTokenValue()), dto.getCommonTokenValueId());
+ cache.getCommonValuesCache().addTokenValue(new CommonTokenValue(null, dto.getCodeSystemId(), dto.getTokenValue()), dto.getCommonTokenValueId());
}
return result;
@@ -203,7 +204,7 @@ public Set getCommonTokenValueIds(Collection tokenValues
@Override
public List getCommonTokenValueIdList(String tokenValue) {
- return resourceReferenceDAO.readCommonTokenValueIdList(tokenValue);
+ return commonValuesDAO.readCommonTokenValueIdList(tokenValue);
}
@Override
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ParameterVisitorBatchDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ParameterVisitorBatchDAO.java
deleted file mode 100644
index f32b1be9981..00000000000
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ParameterVisitorBatchDAO.java
+++ /dev/null
@@ -1,732 +0,0 @@
-/*
- * (C) Copyright IBM Corp. 2019, 2022
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-package com.ibm.fhir.persistence.jdbc.dao.impl;
-
-import static com.ibm.fhir.config.FHIRConfiguration.PROPERTY_SEARCH_ENABLE_LEGACY_WHOLE_SYSTEM_SEARCH_PARAMS;
-import static com.ibm.fhir.search.SearchConstants.PROFILE;
-import static com.ibm.fhir.search.SearchConstants.SECURITY;
-import static com.ibm.fhir.search.SearchConstants.TAG;
-
-import java.math.BigDecimal;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.sql.Timestamp;
-import java.sql.Types;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.List;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import com.ibm.fhir.config.FHIRConfigHelper;
-import com.ibm.fhir.database.utils.common.CalendarHelper;
-import com.ibm.fhir.persistence.exception.FHIRPersistenceDataAccessException;
-import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.api.JDBCIdentityCache;
-import com.ibm.fhir.persistence.jdbc.dto.CompositeParmVal;
-import com.ibm.fhir.persistence.jdbc.dto.DateParmVal;
-import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValue;
-import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValueVisitor;
-import com.ibm.fhir.persistence.jdbc.dto.LocationParmVal;
-import com.ibm.fhir.persistence.jdbc.dto.NumberParmVal;
-import com.ibm.fhir.persistence.jdbc.dto.QuantityParmVal;
-import com.ibm.fhir.persistence.jdbc.dto.ReferenceParmVal;
-import com.ibm.fhir.persistence.jdbc.dto.StringParmVal;
-import com.ibm.fhir.persistence.jdbc.dto.TokenParmVal;
-import com.ibm.fhir.persistence.jdbc.impl.ParameterTransactionDataImpl;
-import com.ibm.fhir.persistence.jdbc.util.CanonicalSupport;
-import com.ibm.fhir.schema.control.FhirSchemaConstants;
-import com.ibm.fhir.search.util.ReferenceValue;
-import com.ibm.fhir.search.util.ReferenceValue.ReferenceType;
-import com.ibm.fhir.search.util.SearchHelper;
-
-/**
- * Batch insert into the parameter values tables. Avoids having to create one stored procedure
- * per resource type, because the row type array approach apparently won't work with dynamic
- * SQL (EXECUTE ... USING ...). Unfortunately this means we have more database round-trips, we
- * don't have a choice.
- */
-public class ParameterVisitorBatchDAO implements ExtractedParameterValueVisitor, AutoCloseable {
- private static final Logger logger = Logger.getLogger(ParameterVisitorBatchDAO.class.getName());
-
- // the connection to use for the inserts
- private final Connection connection;
-
- // the max number of rows we accumulate for a given statement before we submit the batch
- private final int batchSize;
-
- // Enable the feature to store whole system parameters
- private final boolean storeWholeSystemParams = true;
-
- // FK to the logical resource for the parameters being added
- private final long logicalResourceId;
-
- // Maintainers: remember to close all statements in AutoCloseable#close()
- private final String insertString;
- private final PreparedStatement strings;
- private int stringCount;
-
- private final String insertNumber;
- private final PreparedStatement numbers;
- private int numberCount;
-
- private final String insertDate;
- private final PreparedStatement dates;
- private int dateCount;
-
- private final String insertQuantity;
- private final PreparedStatement quantities;
- private int quantityCount;
-
- // rarely used so no need for {@code java.sql.PreparedStatement} or batching on this one
- // even on Location resources its only there once by default
- private final String insertLocation;
-
- // Searchable string attributes stored at the system level
- private final PreparedStatement systemStrings;
- private int systemStringCount;
-
- // Searchable date attributes stored at the system level
- private final PreparedStatement systemDates;
- private int systemDateCount;
-
- // DAO for handling parameters stored as token values (including system-level token search params)
- private final IResourceReferenceDAO resourceReferenceDAO;
-
- // Collect a list of token values to process in one go
- private final List tokenValueRecs = new ArrayList<>();
- private final List referenceValueRecs = new ArrayList<>();
-
- // Tags are now stored in their own tables
- private final List tagTokenRecs = new ArrayList<>();
-
- // Security params are now stored in their own tables
- private final List securityTokenRecs = new ArrayList<>();
-
- // Profiles are now stored in their own tables
- private final List profileRecs = new ArrayList<>();
-
- // The table prefix (resourceType)
- private final String tablePrefix;
-
- // The common cache for all our identity lookup needs
- private final JDBCIdentityCache identityCache;
-
- // If not null, we stash certain parameter data here for insertion later
- private final ParameterTransactionDataImpl transactionData;
-
- // tracks the number of composites so we know what next composite_id to use
- int compositeIdCounter = 0;
-
- // when set, this value is added as the composite_id value for each parameter we store
- Integer currentCompositeId = null;
-
- // Supports slightly more useful error messages if we hit a nested composite
- String currentCompositeParameterName = null;
-
- // Enable use of legacy whole-system search parameters for the search request
- private final boolean legacyWholeSystemSearchParamsEnabled;
-
- /**
- * Public constructor
- * @param c
- * @param resourceId
- */
- public ParameterVisitorBatchDAO(Connection c, String adminSchemaName, String tablePrefix, boolean multitenant, long logicalResourceId, int batchSize,
- JDBCIdentityCache identityCache, IResourceReferenceDAO resourceReferenceDAO, ParameterTransactionDataImpl ptdi) throws SQLException {
- if (batchSize < 1) {
- throw new IllegalArgumentException("batchSize must be >= 1");
- }
-
- this.connection = c;
- this.logicalResourceId = logicalResourceId;
- this.batchSize = batchSize;
- this.identityCache = identityCache;
- this.resourceReferenceDAO = resourceReferenceDAO;
- this.tablePrefix = tablePrefix;
- this.transactionData = ptdi;
- this.legacyWholeSystemSearchParamsEnabled =
- FHIRConfigHelper.getBooleanProperty(PROPERTY_SEARCH_ENABLE_LEGACY_WHOLE_SYSTEM_SEARCH_PARAMS, false);
-
- insertString = multitenant ?
- "INSERT INTO " + tablePrefix + "_str_values (mt_id, parameter_name_id, str_value, str_value_lcase, logical_resource_id, composite_id) VALUES (" + adminSchemaName + ".sv_tenant_id,?,?,?,?,?)"
- :
- "INSERT INTO " + tablePrefix + "_str_values (parameter_name_id, str_value, str_value_lcase, logical_resource_id, composite_id) VALUES (?,?,?,?,?)";
- strings = c.prepareStatement(insertString);
-
- insertNumber = multitenant ?
- "INSERT INTO " + tablePrefix + "_number_values (mt_id, parameter_name_id, number_value, number_value_low, number_value_high, logical_resource_id, composite_id) VALUES (" + adminSchemaName + ".sv_tenant_id,?,?,?,?,?,?)"
- :
- "INSERT INTO " + tablePrefix + "_number_values (parameter_name_id, number_value, number_value_low, number_value_high, logical_resource_id, composite_id) VALUES (?,?,?,?,?,?)";
- numbers = c.prepareStatement(insertNumber);
-
- insertDate = multitenant ?
- "INSERT INTO " + tablePrefix + "_date_values (mt_id, parameter_name_id, date_start, date_end, logical_resource_id, composite_id) VALUES (" + adminSchemaName + ".sv_tenant_id,?,?,?,?,?)"
- :
- "INSERT INTO " + tablePrefix + "_date_values (parameter_name_id, date_start, date_end, logical_resource_id, composite_id) VALUES (?,?,?,?,?)";
- dates = c.prepareStatement(insertDate);
-
- insertQuantity = multitenant ?
- "INSERT INTO " + tablePrefix + "_quantity_values (mt_id, parameter_name_id, code_system_id, code, quantity_value, quantity_value_low, quantity_value_high, logical_resource_id, composite_id) VALUES (" + adminSchemaName + ".sv_tenant_id,?,?,?,?,?,?,?,?)"
- :
- "INSERT INTO " + tablePrefix + "_quantity_values (parameter_name_id, code_system_id, code, quantity_value, quantity_value_low, quantity_value_high, logical_resource_id, composite_id) VALUES (?,?,?,?,?,?,?,?)";
- quantities = c.prepareStatement(insertQuantity);
-
- insertLocation = multitenant ? "INSERT INTO " + tablePrefix + "_latlng_values (mt_id, parameter_name_id, latitude_value, longitude_value, logical_resource_id, composite_id) VALUES (" + adminSchemaName + ".sv_tenant_id,?,?,?,?,?)"
- : "INSERT INTO " + tablePrefix + "_latlng_values (parameter_name_id, latitude_value, longitude_value, logical_resource_id, composite_id) VALUES (?,?,?,?,?)";
-
- if (storeWholeSystemParams) {
- // System level string attributes
- String insertSystemString = multitenant ?
- "INSERT INTO str_values (mt_id, parameter_name_id, str_value, str_value_lcase, logical_resource_id) VALUES (" + adminSchemaName + ".sv_tenant_id,?,?,?,?)"
- :
- "INSERT INTO str_values (parameter_name_id, str_value, str_value_lcase, logical_resource_id) VALUES (?,?,?,?)";
- systemStrings = c.prepareStatement(insertSystemString);
-
- // System level date attributes
- String insertSystemDate = multitenant ?
- "INSERT INTO date_values (mt_id, parameter_name_id, date_start, date_end, logical_resource_id) VALUES (" + adminSchemaName + ".sv_tenant_id,?,?,?,?)"
- :
- "INSERT INTO date_values (parameter_name_id, date_start, date_end, logical_resource_id) VALUES (?,?,?,?)";
- systemDates = c.prepareStatement(insertSystemDate);
- } else {
- systemStrings = null;
- systemDates = null;
- }
- }
-
- /**
- * Look up the normalized id for the parameter, adding it to the parameter_names table if it doesn't yet exist
- * @param parameterName
- * @return
- */
- protected int getParameterNameId(String parameterName) throws FHIRPersistenceException {
- return identityCache.getParameterNameId(parameterName);
- }
-
- /**
- * Looks up the code system. If it doesn't exist, adds it to the database
- * @param codeSystem
- * @return
- */
- protected int getCodeSystemId(String codeSystem) throws FHIRPersistenceException {
- return identityCache.getCodeSystemId(codeSystem);
- }
-
- @Override
- public void visit(StringParmVal param) throws FHIRPersistenceException {
- String parameterName = param.getName();
- String value = param.getValueString();
-
- if (PROFILE.equals(parameterName)) {
- // profile canonicals are now stored in their own tables.
- processProfile(param);
- if (!legacyWholeSystemSearchParamsEnabled) {
- // Don't store in legacy search param tables
- return;
- }
- }
-
- while (value != null && value.getBytes().length > FhirSchemaConstants.MAX_SEARCH_STRING_BYTES) {
- // keep chopping the string in half until its byte representation fits inside
- // the VARCHAR
- value = value.substring(0, value.length() / 2);
- }
-
- try {
- int parameterNameId = getParameterNameId(parameterName);
- if (storeWholeSystem(param)) {
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("systemStringValue: " + parameterName + "[" + parameterNameId + "], " + value);
- }
-
- systemStrings.setInt(1, parameterNameId);
- if (value != null) {
- systemStrings.setString(2, value);
- systemStrings.setString(3, SearchHelper.normalizeForSearch(value));
- }
- else {
- systemStrings.setNull(2, Types.VARCHAR);
- systemStrings.setNull(3, Types.VARCHAR);
- }
- systemStrings.setLong(4, logicalResourceId);
- systemStrings.addBatch();
-
- if (++systemStringCount == this.batchSize) {
- systemStrings.executeBatch();
- systemStringCount = 0;
- }
- }
-
- // always store at the resource-specific level
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("stringValue: " + parameterName + "[" + parameterNameId + "], " + value);
- }
-
- setStringParms(strings, parameterNameId, value);
- strings.addBatch();
-
- if (++stringCount == this.batchSize) {
- strings.executeBatch();
- stringCount = 0;
- }
- } catch (SQLException x) {
- throw new FHIRPersistenceDataAccessException(parameterName + "=" + value, x);
- }
- }
-
- private void setStringParms(PreparedStatement insert, int parameterNameId, String value) throws SQLException {
- insert.setInt(1, parameterNameId);
- if (value != null) {
- insert.setString(2, value);
- insert.setString(3, SearchHelper.normalizeForSearch(value));
- } else {
- insert.setNull(2, Types.VARCHAR);
- insert.setNull(3, Types.VARCHAR);
- }
- insert.setLong(4, logicalResourceId);
- setCompositeId(insert, 5);
- }
-
- /**
- * Special case to store profile strings as canonical uri values in their own table
- * @param param
- * @throws FHIRPersistenceException
- */
- private void processProfile(StringParmVal param) throws FHIRPersistenceException {
- final String parameterName = param.getName();
- final int resourceTypeId = identityCache.getResourceTypeId(param.getResourceType());
-
- // Parse the parameter value to extract the URI|VERSION#FRAGMENT pieces
- ResourceProfileRec rec = CanonicalSupport.makeResourceProfileRec(parameterName, param.getResourceType(), resourceTypeId, this.logicalResourceId, param.getValueString(), param.isWholeSystem());
- if (transactionData != null) {
- transactionData.addValue(rec);
- } else {
- profileRecs.add(rec);
- }
- }
-
- /**
- * Set the composite_id column value or null if required
- * @param ps the statement
- * @param idx the column index
- * @throws SQLException
- */
- private void setCompositeId(PreparedStatement ps, int idx) throws SQLException {
- if (this.currentCompositeId != null) {
- ps.setInt(idx, this.currentCompositeId);
- } else {
- ps.setNull(idx, Types.INTEGER);
- }
- }
-
- @Override
- public void visit(NumberParmVal param) throws FHIRPersistenceException {
- String parameterName = param.getName();
- BigDecimal value = param.getValueNumber();
- BigDecimal valueLow = param.getValueNumberLow();
- BigDecimal valueHigh = param.getValueNumberHigh();
-
- // System-level number search parameters are not supported
- if (storeWholeSystem(param)) {
- String msg = "System-level number search parameters are not supported: " + parameterName;
- logger.warning(msg);
- throw new IllegalArgumentException(msg);
- }
-
- try {
- int parameterNameId = getParameterNameId(parameterName);
-
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("numberValue: " + parameterName + "[" + parameterNameId + "], "
- + value + " [" + valueLow + ", " + valueHigh + "]");
- }
-
- setNumberParms(numbers, parameterNameId, value, valueLow, valueHigh);
- numbers.addBatch();
-
- if (++numberCount == this.batchSize) {
- numbers.executeBatch();
- numberCount = 0;
- }
- }
- catch (SQLException x) {
- throw new FHIRPersistenceDataAccessException(parameterName + "={" + value + " ["+ valueLow + "," + valueHigh + "}", x);
- }
- }
-
- private void setNumberParms(PreparedStatement insert, int parameterNameId, BigDecimal value, BigDecimal valueLow, BigDecimal valueHigh) throws SQLException {
- insert.setInt(1, parameterNameId);
- insert.setBigDecimal(2, value);
- insert.setBigDecimal(3, valueLow);
- insert.setBigDecimal(4, valueHigh);
- insert.setLong(5, logicalResourceId);
- setCompositeId(insert, 6);
- }
-
- @Override
- public void visit(DateParmVal param) throws FHIRPersistenceException {
- String parameterName = param.getName();
- Timestamp dateStart = param.getValueDateStart();
- Timestamp dateEnd = param.getValueDateEnd();
- try {
- int parameterNameId = getParameterNameId(parameterName);
-
- if (storeWholeSystem(param)) {
- // store as a system level search param
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("systemDateValue: " + parameterName + "[" + parameterNameId + "], "
- + "[" + dateStart + ", " + dateEnd + "]");
- }
-
- // Insert record into the base level date attribute table
- setDateParms(systemDates, parameterNameId, dateStart, dateEnd);
- systemDates.addBatch();
-
- if (++systemDateCount == this.batchSize) {
- systemDates.executeBatch();
- systemDateCount = 0;
- }
- }
-
- // always store the param at the resource-specific level
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("dateValue: " + parameterName + "[" + parameterNameId + "], "
- + "period: [" + dateStart + ", " + dateEnd + "]");
- }
-
- setDateParms(dates, parameterNameId, dateStart, dateEnd);
- dates.addBatch();
-
- if (++dateCount == this.batchSize) {
- dates.executeBatch();
- dateCount = 0;
- }
- } catch (SQLException x) {
- throw new FHIRPersistenceDataAccessException(parameterName + "={" + dateStart + ", " + dateEnd + "}", x);
- }
-
- }
-
- private void setDateParms(PreparedStatement insert, int parameterNameId, Timestamp dateStart, Timestamp dateEnd) throws SQLException {
- final Calendar UTC = CalendarHelper.getCalendarForUTC();
- insert.setInt(1, parameterNameId);
- insert.setTimestamp(2, dateStart, UTC);
- insert.setTimestamp(3, dateEnd, UTC);
- insert.setLong(4, logicalResourceId);
- setCompositeId(insert, 5);
- }
-
- @Override
- public void visit(TokenParmVal param) throws FHIRPersistenceException {
- String parameterName = param.getName();
- String codeSystem = param.getValueSystem();
- String tokenValue = param.getValueCode();
- try {
- boolean isSystemParam = storeWholeSystem(param);
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("tokenValue: " + parameterName + ", " + codeSystem + ", " + tokenValue);
- }
-
- // Add the new token value to the collection we're building...what's the resourceTypeId?
- final int resourceTypeId = identityCache.getResourceTypeId(param.getResourceType());
- if (tokenValue == null) {
- logger.fine(() -> "tokenValue is NULL for: " + parameterName + ", " + codeSystem);
- }
-
- // Issue 1683, for composites we now also record the current composite id (can be null)
- ResourceTokenValueRec rec = new ResourceTokenValueRec(parameterName, param.getResourceType(), resourceTypeId, logicalResourceId, codeSystem, tokenValue, this.currentCompositeId, isSystemParam);
- if (TAG.equals(parameterName)) {
- // tag search params are often low-selectivity (many resources sharing the same value) so
- // we put them into their own tables to allow better cardinality estimation by the query
- // optimizer
- if (this.transactionData != null) {
- this.transactionData.addTagValue(rec);
- } else {
- this.tagTokenRecs.add(rec);
- }
- if (legacyWholeSystemSearchParamsEnabled) {
- // Store as legacy search params as well
- if (this.transactionData != null) {
- this.transactionData.addValue(rec);
- } else {
- this.tokenValueRecs.add(rec);
- }
- }
- } else if (SECURITY.equals(parameterName)) {
- // search search params are often low-selectivity (many resources sharing the same value) so
- // we put them into their own tables to allow better cardinality estimation by the query
- // optimizer
- if (this.transactionData != null) {
- this.transactionData.addSecurityValue(rec);
- } else {
- this.securityTokenRecs.add(rec);
- }
- if (legacyWholeSystemSearchParamsEnabled) {
- // Store as legacy search params as well
- if (this.transactionData != null) {
- this.transactionData.addValue(rec);
- } else {
- this.tokenValueRecs.add(rec);
- }
- }
- } else {
- if (this.transactionData != null) {
- this.transactionData.addValue(rec);
- } else {
- this.tokenValueRecs.add(rec);
- }
- }
- } catch (FHIRPersistenceDataAccessException x) {
- throw new FHIRPersistenceDataAccessException(parameterName + "=" + codeSystem + ":" + tokenValue, x);
- }
- }
-
- @Override
- public void visit(QuantityParmVal param) throws FHIRPersistenceException {
- String parameterName = param.getName();
- String code = param.getValueCode();
- String codeSystem = param.getValueSystem();
- BigDecimal quantityValue = param.getValueNumber();
- BigDecimal quantityLow = param.getValueNumberLow();
- BigDecimal quantityHigh = param.getValueNumberHigh();
-
- // System-level quantity search parameters are not supported
- if (storeWholeSystem(param)) {
- String msg = "System-level quantity search parameters are not supported: " + parameterName;
- logger.warning(msg);
- throw new IllegalArgumentException(msg);
- }
-
- // Skip anything with a null code, since CODE column is non-nullable,
- // but allow empty code for when no code or unit is specified
- if (code == null) {
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("CODELESS QUANTITY (skipped): " + parameterName + "=" + code + ":" + codeSystem + "{" + quantityValue + ", " + quantityLow + ", " + quantityHigh + "}");
- }
- } else {
- try {
- int parameterNameId = getParameterNameId(parameterName);
-
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("quantityValue: " + parameterName + "[" + parameterNameId + "], "
- + quantityValue + " [" + quantityLow + ", " + quantityHigh + "]");
- }
-
- setQuantityParms(quantities, parameterNameId, codeSystem, code, quantityValue, quantityLow, quantityHigh);
- quantities.addBatch();
-
- if (++quantityCount == batchSize) {
- quantities.executeBatch();
- quantityCount = 0;
- }
- } catch (FHIRPersistenceDataAccessException x) {
- // wrap the exception so we have more context about the parameter causing the problem
- throw new FHIRPersistenceDataAccessException(parameterName + "=" + code + ":" + codeSystem + "{" + quantityValue + ", " + quantityLow + ", " + quantityHigh + "}", x);
- } catch (SQLException x) {
- throw new FHIRPersistenceDataAccessException(parameterName + "=" + code + ":" + codeSystem + "{" + quantityValue + ", " + quantityLow + ", " + quantityHigh + "}", x);
- }
- }
-
- }
-
- private void setQuantityParms(PreparedStatement insert, int parameterNameId, String codeSystem, String code, BigDecimal quantityValue, BigDecimal quantityLow, BigDecimal quantityHigh)
- throws SQLException, FHIRPersistenceException {
- insert.setInt(1, parameterNameId);
- insert.setInt(2, getCodeSystemId(codeSystem));
- insert.setString(3, code);
- insert.setBigDecimal(4, quantityValue);
- insert.setBigDecimal(5, quantityLow);
- insert.setBigDecimal(6, quantityHigh);
- insert.setLong(7, logicalResourceId);
- setCompositeId(insert, 8);
- }
-
- @Override
- public void visit(LocationParmVal param) throws FHIRPersistenceException {
- String parameterName = param.getName();
- double lat = param.getValueLatitude();
- double lng = param.getValueLongitude();
-
- // System-level location search parameters are not supported
- if (storeWholeSystem(param)) {
- String msg = "System-level location search parameters are not supported: " + parameterName;
- logger.warning(msg);
- throw new IllegalArgumentException(msg);
- }
-
- try {
- PreparedStatement insert = connection.prepareStatement(insertLocation);
- setLocationParms(insert, getParameterNameId(parameterName), lat, lng);
- insert.executeUpdate();
- } catch (SQLException x) {
- throw new FHIRPersistenceDataAccessException(parameterName + "={" + lat + ", " + lng + "}", x);
- }
- }
-
- private void setLocationParms(PreparedStatement insert, int parameterNameId, double lat, double lng) throws SQLException, FHIRPersistenceException {
- insert.setInt(1, parameterNameId);
- insert.setDouble(2, lat);
- insert.setDouble(3, lng);
- insert.setLong(4, logicalResourceId);
- setCompositeId(insert, 5);
-
- }
-
- @Override
- public void visit(CompositeParmVal compositeParameter) throws FHIRPersistenceException {
- if (this.currentCompositeId != null) {
- // no soup for you
- logger.warning("A compositeParameter '" + currentCompositeParameterName + "' cannot itself contain a composite '" + compositeParameter.getName());
- throw new FHIRPersistenceException("composite parameters cannot themselves contain composites");
- }
-
- // This composite is a collection of multiple parameters.
- List component = compositeParameter.getComponent();
- this.currentCompositeId = this.compositeIdCounter++;
- this.currentCompositeParameterName = compositeParameter.getName();
- for (ExtractedParameterValue val : component) {
- val.accept(this);
- }
-
- // Clear the currentCompositeId value so we no longer associate it with other parameters
- this.currentCompositeId = null;
- this.currentCompositeParameterName = null;
- }
-
- @Override
- public void close() throws Exception {
- // flush any stragglers, remembering to reset each count because
- // close() should be idempotent.
- try {
- if (stringCount > 0) {
- strings.executeBatch();
- stringCount = 0;
- }
-
- if (numberCount > 0) {
- numbers.executeBatch();
- numberCount = 0;
- }
-
- if (dateCount > 0) {
- dates.executeBatch();
- dateCount = 0;
- }
-
- if (quantityCount > 0) {
- quantities.executeBatch();
- quantityCount = 0;
- }
-
- if (systemStringCount > 0) {
- systemStrings.executeBatch();
- systemStringCount = 0;
- }
-
- if (systemDateCount > 0) {
- systemDates.executeBatch();
- systemDateCount = 0;
- }
- } catch (SQLException x) {
- SQLException batchException = x.getNextException();
- if (batchException != null) {
- // We're really interested in the underlying cause here
- throw batchException;
- }
- else {
- throw x;
- }
- }
-
- if (this.transactionData == null) {
- // Not using transaction data, so we need to process collected values right here
- this.resourceReferenceDAO.addNormalizedValues(this.tablePrefix, tokenValueRecs, referenceValueRecs, profileRecs, tagTokenRecs, securityTokenRecs);
- }
-
- closeStatement(strings);
- closeStatement(numbers);
- closeStatement(dates);
- closeStatement(quantities);
- closeStatement(systemStrings);
- closeStatement(systemDates);
- }
-
- /**
- * Quietly close the given statement
- * @param ps
- */
- private void closeStatement(PreparedStatement ps) {
- try {
- ps.close();
- } catch (SQLException x) {
- logger.warning("failed to close statement");
- }
- }
-
- /**
- * Should we store this parameter also at the whole-system search level?
- * @param param
- * @return
- */
- private boolean storeWholeSystem(ExtractedParameterValue param) {
- return storeWholeSystemParams && param.isWholeSystem();
- }
-
- @Override
- public void visit(ReferenceParmVal rpv) throws FHIRPersistenceException {
- if (rpv.getRefValue() == null) {
- return;
- }
-
- final String resourceType = this.tablePrefix;
- final String parameterName = rpv.getName();
- Integer resourceTypeId = identityCache.getResourceTypeId(resourceType);
- if (resourceTypeId == null) {
- // resourceType is not sensitive, so it's OK to include in the exception message
- throw new FHIRPersistenceException("Resource type not found in cache: '" + resourceType + "'");
- }
-
-
- // The ReferenceValue has already been processed to convert the reference to
- // the required standard form, ready for insertion as a token value.
- ReferenceValue refValue = rpv.getRefValue();
-
- // Ignore references containing only a "display" element (apparently supported by the spec,
- // but contains nothing useful to store because there's no searchable value).
- String refResourceType = refValue.getTargetResourceType();
- String refLogicalId = refValue.getValue();
- Integer refVersion = refValue.getVersion();
-
- if (refValue.getType() == ReferenceType.DISPLAY_ONLY || refValue.getType() == ReferenceType.INVALID) {
- // protect against code regression. Invalid/improper references should be
- // filtered out already.
- logger.warning("Invalid reference parameter type: '" + resourceType + "." + rpv.getName() + "' type=" + refValue.getType().name());
- throw new IllegalArgumentException("Invalid reference parameter value. See server log for details.");
- }
-
- // V0027. Absolute references won't have a resource type, but in order to store them
- // in the LOGICAL_RESOURCE_IDENT table we need to have a valid LOGICAL_RESOURCE_ID. For
- // that we use "Resource"
- if (refResourceType == null) {
- refResourceType = "Resource";
- }
- // Store a reference value configured as a reference to another resource (reference params
- // are never system-level).
- int refResourceTypeId = identityCache.getResourceTypeId(refResourceType);
- ResourceReferenceValueRec rec = new ResourceReferenceValueRec(parameterName, resourceType, resourceTypeId, logicalResourceId,
- refResourceType, refResourceTypeId,
- refLogicalId, refVersion, this.currentCompositeId);
- if (this.transactionData != null) {
- this.transactionData.addReferenceValue(rec);
- } else {
- this.referenceValueRecs.add(rec);
- }
- }
-}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceDAOImpl.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceDAOImpl.java
index 03dab085d0e..54363c0c1be 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceDAOImpl.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceDAOImpl.java
@@ -15,7 +15,6 @@
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
-import java.sql.SQLIntegrityConstraintViolationException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
@@ -24,7 +23,6 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Objects;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -33,40 +31,26 @@
import com.ibm.fhir.database.utils.common.CalendarHelper;
import com.ibm.fhir.database.utils.query.QueryUtil;
import com.ibm.fhir.database.utils.query.Select;
-import com.ibm.fhir.persistence.InteractionStatus;
import com.ibm.fhir.persistence.context.FHIRPersistenceContext;
import com.ibm.fhir.persistence.exception.FHIRPersistenceDataAccessException;
import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
-import com.ibm.fhir.persistence.exception.FHIRPersistenceVersionIdMismatchException;
-import com.ibm.fhir.persistence.index.FHIRRemoteIndexService;
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
-import com.ibm.fhir.persistence.jdbc.dao.api.FHIRDAOConstants;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.api.JDBCIdentityCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ResourceDAO;
-import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValue;
import com.ibm.fhir.persistence.jdbc.dto.Resource;
import com.ibm.fhir.persistence.jdbc.exception.FHIRPersistenceDBConnectException;
-import com.ibm.fhir.persistence.jdbc.exception.FHIRPersistenceFKVException;
import com.ibm.fhir.persistence.jdbc.impl.ParameterTransactionDataImpl;
import com.ibm.fhir.persistence.util.InputOutputByteStream;
-import com.ibm.fhir.schema.control.FhirSchemaConstants;
/**
* This Data Access Object implements the ResourceDAO interface for creating, updating,
* and retrieving rows in the IBM FHIR Server resource tables.
*/
-public class ResourceDAOImpl extends FHIRDbDAOImpl implements ResourceDAO {
+public abstract class ResourceDAOImpl extends FHIRDbDAOImpl implements ResourceDAO {
private static final Logger log = Logger.getLogger(ResourceDAOImpl.class.getName());
private static final String CLASSNAME = ResourceDAOImpl.class.getName();
- // Per issue with private memory in db2, we have set this to 1M.
- // Anything larger than 1M is then inserted into the db with an update.
- private static final String LARGE_BLOB = "UPDATE %s_RESOURCES SET DATA = ? WHERE RESOURCE_ID = ?";
-
public static final String DEFAULT_VALUE_REINDEX_TSTAMP = "1970-01-01 00:00:00";
// column indices for all our resource reading queries
@@ -156,7 +140,6 @@ public class ResourceDAOImpl extends FHIRDbDAOImpl implements ResourceDAO {
private Map newResourceTypeIds = new HashMap<>();
private boolean runningInTrx = false;
private TransactionSynchronizationRegistry trxSynchRegistry;
- private final IResourceReferenceDAO resourceReferenceDAO;
private final FHIRPersistenceJDBCCache cache;
@@ -171,12 +154,11 @@ public class ResourceDAOImpl extends FHIRDbDAOImpl implements ResourceDAO {
* @param trxSyncRegistry
*/
public ResourceDAOImpl(Connection c, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry,
- FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd, ParameterTransactionDataImpl ptdi) {
+ FHIRPersistenceJDBCCache cache, ParameterTransactionDataImpl ptdi) {
super(c, schemaName, flavor);
this.runningInTrx = true;
this.trxSynchRegistry = trxSynchRegistry;
this.cache = cache;
- this.resourceReferenceDAO = rrd;
this.transactionData = ptdi;
}
@@ -187,24 +169,14 @@ public ResourceDAOImpl(Connection c, String schemaName, FHIRDbFlavor flavor, Tra
* @param schemaName
* @param flavor
*/
- public ResourceDAOImpl(Connection c, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd) {
+ public ResourceDAOImpl(Connection c, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache) {
super(c, schemaName, flavor);
this.runningInTrx = false;
this.trxSynchRegistry = null;
this.cache = cache;
- this.resourceReferenceDAO = rrd;
this.transactionData = null; // not supported outside JEE
}
- /**
- * Getter for the IResourceReferenceDAO used by this ResourceDAO implementation
- *
- * @return
- */
- protected IResourceReferenceDAO getResourceReferenceDAO() {
- return this.resourceReferenceDAO;
- }
-
/**
* Get the ParameterTransactionDataImpl held by this.
*
@@ -475,141 +447,6 @@ protected boolean checkIfNoneMatch(Integer ifNoneMatch, int currentVersionId) {
return ifNoneMatch != null && ifNoneMatch == 0;
}
- @Override
- public Resource insert(Resource resource, List parameters, String parameterHashB64, ParameterDAO parameterDao,
- Integer ifNoneMatch)
- throws FHIRPersistenceException {
- final String METHODNAME = "insert(Resource, List";
- log.entering(CLASSNAME, METHODNAME);
-
- final Connection connection = getConnection(); // do not close
- CallableStatement stmt = null;
- String stmtString = null;
- Timestamp lastUpdated;
- long dbCallStartTime = System.nanoTime();
-
- try {
- // Do a lookup on the resource type, just so we know it's valid in the database
- // before we call the procedure
- Objects.requireNonNull(getResourceTypeId(resource.getResourceType()));
-
- stmtString = String.format(SQL_INSERT_WITH_PARAMETERS, getSchemaName());
- stmt = connection.prepareCall(stmtString);
- stmt.setString(1, resource.getResourceType());
- stmt.setString(2, resource.getLogicalId());
-
- boolean large = false;
- if (resource.getDataStream() != null) {
- // Check for large objects, and branch around it.
- large = FhirSchemaConstants.STORED_PROCEDURE_SIZE_LIMIT < resource.getDataStream().size();
- if (large) {
- // Outside of the normal flow we have a BIG JSON or XML
- stmt.setNull(3, Types.BLOB);
- } else {
- // Normal Flow, we set the data
- stmt.setBinaryStream(3, resource.getDataStream().inputStream());
- }
- } else {
- // payload offloaded to another data store
- stmt.setNull(3, Types.BLOB);
- }
-
- lastUpdated = resource.getLastUpdated();
- stmt.setTimestamp(4, lastUpdated, CalendarHelper.getCalendarForUTC());
- stmt.setString(5, resource.isDeleted() ? "Y": "N");
- stmt.setInt(6, resource.getVersionId());
- stmt.setString(7, parameterHashB64);
- setInt(stmt, 8, ifNoneMatch);
- setString(stmt, 9, resource.getResourcePayloadKey());
- stmt.registerOutParameter(10, Types.BIGINT); // logical_resource_id
- stmt.registerOutParameter(11, Types.BIGINT); // resource_id
- stmt.registerOutParameter(12, Types.VARCHAR); // current_hash
- stmt.registerOutParameter(13, Types.INTEGER); // o_interaction_status
- stmt.registerOutParameter(14, Types.INTEGER); // o_if_none_match_version
-
- stmt.execute();
- long latestTime = System.nanoTime();
- double dbCallDuration = (latestTime-dbCallStartTime)/1e6;
-
- resource.setLogicalResourceId(stmt.getLong(10));
- final long versionedResourceRowId = stmt.getLong(11);
- final String currentHash = stmt.getString(12);
- final int interactionStatus = stmt.getInt(13);
- if (interactionStatus == 1) {
- // No update, so no need to make any more changes
- resource.setInteractionStatus(InteractionStatus.IF_NONE_MATCH_EXISTED);
- resource.setIfNoneMatchVersion(stmt.getInt(14));
- } else {
- resource.setInteractionStatus(InteractionStatus.MODIFIED);
- resource.setCurrentParameterHash(currentHash);
-
- if (large) {
- String largeStmtString = String.format(LARGE_BLOB, resource.getResourceType());
- try (PreparedStatement ps = connection.prepareStatement(largeStmtString)) {
- // Use the long id to update the record in the database with the large object.
- ps.setBinaryStream(1, resource.getDataStream().inputStream());
- ps.setLong(2, versionedResourceRowId);
- long dbCallStartTime2 = System.nanoTime();
- int numberOfRows = -1;
- ps.execute();
- double dbCallDuration2 = (System.nanoTime() - dbCallStartTime2) / 1e6;
- if (log.isLoggable(Level.FINE)) {
- log.fine("DB update large blob complete. ROWS=[" + numberOfRows + "] SQL=[" + largeStmtString + "] executionTime=" + dbCallDuration2
- + "ms");
- }
- }
- }
-
- // Parameter time
- // TODO FHIR_ADMIN schema name needs to come from the configuration/context
- // We can skip the parameter insert if we've been given parameterHashB64 and
- // it matches the current value just returned by the stored procedure call
- FHIRRemoteIndexService remoteIndexService = FHIRRemoteIndexService.getServiceInstance();
- long paramInsertStartTime = latestTime;
- if (remoteIndexService == null
- && parameters != null && (parameterHashB64 == null || parameterHashB64.isEmpty()
- || !parameterHashB64.equals(currentHash))) {
- JDBCIdentityCache identityCache = new JDBCIdentityCacheImpl(cache, this, parameterDao, getResourceReferenceDAO());
- try (ParameterVisitorBatchDAO pvd = new ParameterVisitorBatchDAO(connection, "FHIR_ADMIN", resource.getResourceType(), true,
- resource.getLogicalResourceId(), 100, identityCache, resourceReferenceDAO, this.transactionData)) {
- for (ExtractedParameterValue p: parameters) {
- p.accept(pvd);
- }
- }
- }
-
- if (log.isLoggable(Level.FINE)) {
- latestTime = System.nanoTime();
- double totalDuration = (latestTime - dbCallStartTime) / 1e6;
- double paramInsertDuration = (latestTime-paramInsertStartTime)/1e6;
- log.fine("Successfully inserted Resource. logicalResourceId=" + resource.getLogicalResourceId() + " total=" + totalDuration + "ms, proc=" + dbCallDuration + "ms, param=" + paramInsertDuration + "ms");
- }
- }
- } catch (FHIRPersistenceDBConnectException |
- FHIRPersistenceDataAccessException e) {
- throw e;
- } catch (SQLIntegrityConstraintViolationException e) {
- FHIRPersistenceFKVException fx = new FHIRPersistenceFKVException("Encountered FK violation while inserting Resource.");
- throw severe(log, fx, e);
- } catch (SQLException e) {
- if (FHIRDAOConstants.SQLSTATE_WRONG_VERSION.equals(e.getSQLState())) {
- // this is just a concurrency update, so there's no need to log the SQLException here
- throw new FHIRPersistenceVersionIdMismatchException("Encountered version id mismatch while inserting Resource");
- } else {
- FHIRPersistenceDataAccessException fx = new FHIRPersistenceDataAccessException("SQLException encountered while inserting Resource.");
- throw severe(log, fx, e);
- }
- } catch (Throwable e) {
- FHIRPersistenceDataAccessException fx = new FHIRPersistenceDataAccessException("Failure inserting Resource.");
- throw severe(log, fx, e);
- } finally {
- this.cleanup(stmt);
- log.exiting(CLASSNAME, METHODNAME);
- }
-
- return resource;
- }
-
@Override
public List search(String sqlSelect) throws FHIRPersistenceDataAccessException, FHIRPersistenceDBConnectException {
final String METHODNAME = "search";
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceReferenceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceReferenceDAO.java
deleted file mode 100644
index 3a61e224684..00000000000
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceReferenceDAO.java
+++ /dev/null
@@ -1,1267 +0,0 @@
-/*
- * (C) Copyright IBM Corp. 2020, 2022
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-package com.ibm.fhir.persistence.jdbc.dao.impl;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Types;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import java.util.stream.Collectors;
-
-import com.ibm.fhir.config.FHIRRequestContext;
-import com.ibm.fhir.config.MetricHandle;
-import com.ibm.fhir.database.utils.api.IDatabaseTranslator;
-import com.ibm.fhir.database.utils.common.DataDefinitionUtil;
-import com.ibm.fhir.database.utils.common.PreparedStatementHelper;
-import com.ibm.fhir.persistence.exception.FHIRPersistenceDataAccessException;
-import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.INameIdCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.api.LogicalResourceIdentKey;
-import com.ibm.fhir.persistence.jdbc.dao.api.LogicalResourceIdentValue;
-import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValue;
-import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValueResult;
-import com.ibm.fhir.persistence.jdbc.exception.FHIRPersistenceDBConnectException;
-import com.ibm.fhir.persistence.jdbc.util.FHIRPersistenceJDBCMetric;
-import com.ibm.fhir.schema.control.FhirSchemaConstants;
-
-/**
- * DAO to handle maintenance of the local and external reference tables
- * which contain the relationships described by "reference" elements in
- * each resource (e.g. Observation.subject).
- *
- * The DAO uses a cache for looking up the ids for various entities. The
- * DAO can create new entries, but these can only be used locally until
- * the transaction commits, at which point they can be consolidated into
- * the shared cache. This has the benefit that we reduce the number of times
- * we need to lock the global cache, because we only update it once per
- * transaction.
- *
- * For improved performance, we also make use of batch statements which
- * are managed as member variables. This is why it's important to close
- * this DAO before the transaction commits, ensuring that any outstanding
- * DML batched but not yet executed is processed. Calling close does not
- * close the provided Connection. That is up to the caller to manage.
- * Close does close any statements which are opened inside the class.
- */
-public abstract class ResourceReferenceDAO implements IResourceReferenceDAO, AutoCloseable {
- private static final Logger logger = Logger.getLogger(ResourceReferenceDAO.class.getName());
-
- private final String schemaName;
-
- // hold on to the connection because we use batches to improve efficiency
- private final Connection connection;
-
- // The cache used to track the ids of the normalized entities we're managing
- private final ICommonTokenValuesCache cache;
-
- // Cache of parameter names to id
- private final INameIdCache parameterNameCache;
-
- // Cache of the logical resource id values from logical_resource_ident
- private final ILogicalResourceIdentCache logicalResourceIdentCache;
-
- // The translator for the type of database we are connected to
- private final IDatabaseTranslator translator;
-
- // The number of operations we allow before submitting a batch
- protected static final int BATCH_SIZE = 1000;
-
- /**
- * Public constructor
- *
- * @param t
- * @param c
- * @param schemaName
- * @param cache
- * @param parameterNameCache
- * @param logicalResourceIdentCache
- */
- public ResourceReferenceDAO(IDatabaseTranslator t, Connection c, String schemaName, ICommonTokenValuesCache cache, INameIdCache parameterNameCache, ILogicalResourceIdentCache logicalResourceIdentCache) {
- this.translator = t;
- this.schemaName = schemaName;
- this.connection = c;
- this.cache = cache;
- this.parameterNameCache = parameterNameCache;
- this.logicalResourceIdentCache = logicalResourceIdentCache;
- }
-
- /**
- * Getter for the {@link IDatabaseTranslator} held by this DAO
- * @return
- */
- protected IDatabaseTranslator getTranslator() {
- return this.translator;
- }
-
- /**
- * Getter for the {@link ICommonTokenValuesCache} held by this DAO
- * @return
- */
- protected ICommonTokenValuesCache getCache() {
- return this.cache;
- }
-
- /**
- * Getter for the {@link Connection} held by this DAO
- * @return
- */
- protected Connection getConnection() {
- return this.connection;
- }
-
- /**
- * Getter for subclass access to the schemaName
- * @return
- */
- protected String getSchemaName() {
- return this.schemaName;
- }
-
- @Override
- public void flush() throws FHIRPersistenceException {
- // NOP at this time
- }
-
- @Override
- public void close() throws FHIRPersistenceException {
- flush();
- }
-
- @Override
- public ICommonTokenValuesCache getResourceReferenceCache() {
- return this.cache;
- }
-
- @Override
- public CommonTokenValueResult readCommonTokenValueId(String codeSystem, String tokenValue) {
- CommonTokenValueResult result;
-
- final String SQL = ""
- + "SELECT c.code_system_id, c.common_token_value_id "
- + " FROM common_token_values c,"
- + " code_systems s "
- + " WHERE c.token_value = ? "
- + " AND s.code_system_name = ? "
- + " AND c.code_system_id = s.code_system_id";
- try (PreparedStatement ps = connection.prepareStatement(SQL)) {
- ps.setString(1, tokenValue);
- ps.setString(2, codeSystem);
- ResultSet rs = ps.executeQuery();
- if (rs.next()) {
- result = new CommonTokenValueResult(null, rs.getInt(1), rs.getLong(2));
- } else {
- result = null;
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, SQL, x);
- throw translator.translate(x);
- }
-
- return result;
- }
-
- @Override
- public Set readCommonTokenValueIds(Collection tokenValues) {
- if (tokenValues.isEmpty()) {
- return Collections.emptySet();
- }
-
- Set result = new HashSet<>();
-
- StringBuilder select = new StringBuilder()
- .append("SELECT c.token_value, c.code_system_id, c.common_token_value_id ")
- .append(" FROM common_token_values c")
- .append(" JOIN (VALUES ");
-
- String delim = "";
- for (CommonTokenValue tokenValue : tokenValues) {
- select.append(delim);
- // CodeSystemId is an int that comes from the db so we can use the literal for that.
- // TokenValue is a string that could possibly come from the end user, so that should be a bind variable.
- select.append("(?," + tokenValue.getCodeSystemId() + ")");
- delim = ", ";
- }
-
- select.append(") AS tmp (token_value,code_system_id) ON tmp.token_value = c.token_value AND tmp.code_system_id = c.code_system_id");
-
- try (PreparedStatement ps = connection.prepareStatement(select.toString())) {
- Iterator iterator = tokenValues.iterator();
- for (int i = 1; i <= tokenValues.size(); i++) {
- CommonTokenValue tokenValue = iterator.next();
-
- ps.setString(i, tokenValue.getTokenValue());
- }
-
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- result.add(new CommonTokenValueResult(rs.getString(1), rs.getInt(2), rs.getLong(3)));
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, select.toString(), x);
- throw translator.translate(x);
- }
-
- return result;
- }
-
- @Override
- public Long readCanonicalId(String canonicalValue) {
- Long result;
- final String SQL = ""
- + "SELECT canonical_id "
- + " FROM common_canonical_values "
- + " WHERE url = ? ";
- try (PreparedStatement ps = connection.prepareStatement(SQL)) {
- ps.setString(1, canonicalValue);
- ResultSet rs = ps.executeQuery();
- if (rs.next()) {
- result = rs.getLong(1);
- } else {
- result = null;
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, SQL, x);
- throw translator.translate(x);
- }
-
- return result;
- }
-
- @Override
- public void addNormalizedValues(String resourceType, Collection xrefs, Collection resourceRefs, Collection profileRecs, Collection tagRecs, Collection securityRecs) throws FHIRPersistenceException {
- // This method is only called when we're not using transaction data
- logger.fine("Persist parameters for this resource - no transaction data available");
- persist(xrefs, resourceRefs, profileRecs, tagRecs, securityRecs);
- }
-
- /**
- * Insert the values in the resource-type-specific _resource_token_refs table. This
- * is a simple batch insert because all the FKs have already been resolved and updated
- * in the ResourceTokenValueRec records
- * @param resourceType
- * @param xrefs
- */
- protected void insertResourceTokenRefs(String resourceType, Collection xrefs) {
- // Now all the values should have ids assigned so we can go ahead and insert them
- // as a batch
- final String tableName = resourceType + "_RESOURCE_TOKEN_REFS";
- DataDefinitionUtil.assertValidName(tableName);
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("Inserting " + xrefs.size() + " values into " + tableName);
- }
- final String insert = "INSERT INTO " + tableName + "("
- + "parameter_name_id, logical_resource_id, common_token_value_id, composite_id) "
- + "VALUES (?, ?, ?, ?)";
- try (PreparedStatement ps = connection.prepareStatement(insert)) {
- int count = 0;
- for (ResourceTokenValueRec xr: xrefs) {
- ps.setInt(1, xr.getParameterNameId());
- ps.setLong(2, xr.getLogicalResourceId());
-
- // common token value can be null
- if (xr.getCommonTokenValueId() != null) {
- ps.setLong(3, xr.getCommonTokenValueId());
- } else {
- ps.setNull(3, Types.BIGINT);
- }
-
- // compositeId can be null
- if (xr.getCompositeId() != null) {
- ps.setInt(4, xr.getCompositeId());
- } else {
- ps.setNull(4, Types.INTEGER);
- }
- ps.addBatch();
- if (++count == BATCH_SIZE) {
- ps.executeBatch();
- count = 0;
- }
- }
-
- if (count > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert, x);
- throw translator.translate(x);
- }
- }
-
- /**
- * Insert the values in the resource-type-specific _ref_values table. This
- * is a simple batch insert because all the FKs have already been resolved and updated
- * in the ResourceTokenValueRec records
- * @param resourceType
- * @param xrefs
- */
- protected void insertRefValues(String resourceType, Collection xrefs) {
- // Now all the values should have ids assigned so we can go ahead and insert them
- // as a batch
- final String tableName = resourceType + "_REF_VALUES";
- DataDefinitionUtil.assertValidName(tableName);
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("Inserting " + xrefs.size() + " values into " + tableName);
- }
- final String insert = "INSERT INTO " + tableName + "("
- + "parameter_name_id, logical_resource_id, ref_logical_resource_id, ref_version_id, composite_id) "
- + "VALUES (?, ?, ?, ?, ?)";
- try (PreparedStatement ps = connection.prepareStatement(insert)) {
- int count = 0;
- PreparedStatementHelper psh = new PreparedStatementHelper(ps);
- for (ResourceReferenceValueRec xr: xrefs) {
- psh.setInt(xr.getParameterNameId())
- .setLong(xr.getLogicalResourceId())
- .setLong(xr.getRefLogicalResourceId())
- .setInt(xr.getRefVersionId())
- .setInt(xr.getCompositeId())
- .addBatch();
-
- if (++count == BATCH_SIZE) {
- ps.executeBatch();
- count = 0;
- }
- }
-
- if (count > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert, x);
- throw translator.translate(x);
- }
- }
-
- /**
- * Add all the systems we currently don't have in the database. If all target
- * databases handled MERGE properly this would be easy, but they don't so
- * we go old-school with a negative outer join instead (which is pretty much
- * what MERGE does behind the scenes anyway).
- * @param systems
- */
- public void upsertCodeSystems(List systems) {
- if (systems.isEmpty()) {
- return;
- }
-
- // Unique list so we don't try and create the same name more than once
- final Set systemNames = systems.stream().map(xr -> xr.getCodeSystemValue()).collect(Collectors.toSet());
- final List sortedSystemNames = new ArrayList<>(systemNames);
- sortedSystemNames.sort(String::compareTo);
- final StringBuilder paramList = new StringBuilder();
- final StringBuilder inList = new StringBuilder();
- for (int i=0; i 0) {
- paramList.append(", ");
- inList.append(",");
- }
- paramList.append("(CAST(? AS VARCHAR(" + FhirSchemaConstants.MAX_SEARCH_STRING_BYTES + ")))");
- inList.append("?");
- }
-
- final String paramListStr = paramList.toString();
- doCodeSystemsUpsert(paramListStr, sortedSystemNames);
-
- // Now grab the ids for the rows we just created. If we had a RETURNING implementation
- // which worked reliably across all our database platforms, we wouldn't need this
- // second query.
- final Map idMap = new HashMap<>();
- doCodeSystemsFetch(idMap, inList.toString(), sortedSystemNames);
-
- // Now update the ids for all the matching systems in our list
- for (ResourceTokenValueRec xr: systems) {
- Integer id = idMap.get(xr.getCodeSystemValue());
- if (id != null) {
- xr.setCodeSystemValueId(id);
-
- // Add this value to the (thread-local) cache
- cache.addCodeSystem(xr.getCodeSystemValue(), id);
- } else {
- // Unlikely...but need to handle just in case
- logger.severe("Record for code_system_name '" + xr.getCodeSystemValue() + "' inserted but not found");
- throw new IllegalStateException("id deleted from database!");
- }
- }
- }
-
- /**
- * Fetch the code_system_id values for each of the code_system_name values in the sortedSystemNames list.
- * @param idMap the code_system_name -> code_system_id map to populate
- * @param inList a list of bind markers for the values in the sortedSystemNames list
- * @param sortedSystemNames the list of code_system_name values to fetch
- */
- protected void doCodeSystemsFetch(Map idMap, String inList, List sortedSystemNames) {
- StringBuilder select = new StringBuilder();
- select.append("SELECT code_system_name, code_system_id FROM code_systems WHERE code_system_name IN (");
- select.append(inList);
- select.append(")");
-
- try (PreparedStatement ps = connection.prepareStatement(select.toString())) {
- // load a map with all the ids we need which we can then use to update the
- // ExternalResourceReferenceRec objects
- int a = 1;
- for (String name: sortedSystemNames) {
- ps.setString(a++, name);
- }
-
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- idMap.put(rs.getString(1), rs.getInt(2));
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, select.toString(), x);
- throw translator.translate(x);
- }
- }
-
- /**
- * Add the missing values to the database (and get ids allocated)
- * @param profileValues
- */
- public void upsertCanonicalValues(List profileValues) {
- if (profileValues.isEmpty()) {
- return;
- }
-
- // Unique list so we don't try and create the same name more than once
- Set valueSet = profileValues.stream().map(xr -> xr.getCanonicalValue()).collect(Collectors.toSet());
- List sortedValues = new ArrayList(valueSet);
- sortedValues.sort(String::compareTo);
-
- StringBuilder paramList = new StringBuilder();
- StringBuilder inList = new StringBuilder();
- for (int i=0; i 0) {
- paramList.append(", ");
- inList.append(",");
- }
- paramList.append("(CAST(? AS VARCHAR(" + FhirSchemaConstants.CANONICAL_URL_BYTES + ")))");
- inList.append("?");
- }
-
- final String paramListStr = paramList.toString();
- doCanonicalValuesUpsert(paramListStr, sortedValues);
-
-
- // Now grab the ids for the rows we just created. If we had a RETURNING implementation
- // which worked reliably across all our database platforms, we wouldn't need this
- // second query.
- StringBuilder select = new StringBuilder();
- select.append("SELECT url, canonical_id FROM common_canonical_values WHERE url IN (");
- select.append(inList);
- select.append(")");
-
- Map idMap = new HashMap<>();
- try (PreparedStatement ps = connection.prepareStatement(select.toString())) {
- // load a map with all the ids we need which we can then use to update the
- // ExternalResourceReferenceRec objects
- int a = 1;
- for (String name: sortedValues) {
- ps.setString(a++, name);
- }
-
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- idMap.put(rs.getString(1), rs.getInt(2));
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, select.toString(), x);
- throw translator.translate(x);
- }
-
- // Now update the ids for all the matching systems in our list
- for (ResourceProfileRec xr: profileValues) {
- Integer id = idMap.get(xr.getCanonicalValue());
- if (id != null) {
- xr.setCanonicalValueId(id);
-
- // Add this value to the (thread-local) cache
- cache.addCanonicalValue(xr.getCanonicalValue(), id);
- } else {
- // Unlikely...but need to handle just in case
- logger.severe("Record for common_canonical_value '" + xr.getCanonicalValue() + "' inserted but not found");
- throw new IllegalStateException("id deleted from database!");
- }
- }
- }
-
- /**
- * Insert any whole-system parameters to the token_refs table
- * @param resourceType
- * @param xrefs
- */
- protected void insertSystemResourceTokenRefs(String resourceType, Collection xrefs) {
- // Now all the values should have ids assigned so we can go ahead and insert them
- // as a batch
- final String tableName = "RESOURCE_TOKEN_REFS";
- DataDefinitionUtil.assertValidName(tableName);
- final String insert = "INSERT INTO " + tableName + "("
- + "parameter_name_id, logical_resource_id, common_token_value_id) "
- + "VALUES (?, ?, ?)";
- try (PreparedStatement ps = connection.prepareStatement(insert)) {
- int count = 0;
- for (ResourceTokenValueRec xr: xrefs) {
- if (xr.isSystemLevel()) {
- ps.setInt(1, xr.getParameterNameId());
- ps.setLong(2, xr.getLogicalResourceId());
-
- // common token value can be null
- if (xr.getCommonTokenValueId() != null) {
- ps.setLong(3, xr.getCommonTokenValueId());
- } else {
- ps.setNull(3, Types.BIGINT);
- }
-
- ps.addBatch();
- if (++count == BATCH_SIZE) {
- ps.executeBatch();
- count = 0;
- }
- }
- }
-
- if (count > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert, x);
- throw translator.translate(x);
- }
- }
-
- protected void insertResourceProfiles(String resourceType, Collection profiles) {
- // Now all the values should have ids assigned so we can go ahead and insert them
- // as a batch
- final String tableName = resourceType + "_PROFILES";
- DataDefinitionUtil.assertValidName(tableName);
- final String insert = "INSERT INTO " + tableName + "("
- + "logical_resource_id, canonical_id, version, fragment) "
- + "VALUES (?, ?, ?, ?)";
- try (PreparedStatement ps = connection.prepareStatement(insert)) {
- int count = 0;
- for (ResourceProfileRec xr: profiles) {
- ps.setLong(1, xr.getLogicalResourceId());
- ps.setLong(2, xr.getCanonicalValueId());
-
- // canonical version can be null
- if (xr.getVersion() != null) {
- ps.setString(3, xr.getVersion());
- } else {
- ps.setNull(3, Types.VARCHAR);
- }
-
- // canonical fragment can be null
- if (xr.getFragment() != null) {
- ps.setString(4, xr.getFragment());
- } else {
- ps.setNull(4, Types.VARCHAR);
- }
- ps.addBatch();
- if (++count == BATCH_SIZE) {
- ps.executeBatch();
- count = 0;
- }
- }
-
- if (count > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert, x);
- throw translator.translate(x);
- }
- }
-
- /**
- * Insert PROFILE parameters
- * @param resourceType
- * @param profiles
- */
- protected void insertSystemResourceProfiles(String resourceType, Collection profiles) {
- final String tableName = "LOGICAL_RESOURCE_PROFILES";
- DataDefinitionUtil.assertValidName(tableName);
- final String insert = "INSERT INTO " + tableName + "("
- + "logical_resource_id, canonical_id, version, fragment) "
- + "VALUES (?, ?, ?, ?)";
- try (PreparedStatement ps = connection.prepareStatement(insert)) {
- int count = 0;
- for (ResourceProfileRec xr: profiles) {
- ps.setLong(1, xr.getLogicalResourceId());
- ps.setLong(2, xr.getCanonicalValueId());
-
- // canonical version can be null
- if (xr.getVersion() != null) {
- ps.setString(3, xr.getVersion());
- } else {
- ps.setNull(3, Types.VARCHAR);
- }
-
- // canonical fragment can be null
- if (xr.getFragment() != null) {
- ps.setString(4, xr.getFragment());
- } else {
- ps.setNull(4, Types.VARCHAR);
- }
- ps.addBatch();
- if (++count == BATCH_SIZE) {
- ps.executeBatch();
- count = 0;
- }
- }
-
- if (count > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert, x);
- throw translator.translate(x);
- }
- }
-
- /**
- * Insert the tags referenced by the given collection of token value records
- * @param resourceType
- * @param xrefs
- */
- protected void insertResourceTags(String resourceType, Collection xrefs) {
- // Now all the values should have ids assigned so we can go ahead and insert them
- // as a batch
- final String tableName = resourceType + "_TAGS";
- DataDefinitionUtil.assertValidName(tableName);
- final String insert = "INSERT INTO " + tableName + "("
- + "logical_resource_id, common_token_value_id) "
- + "VALUES (?, ?)";
- try (PreparedStatement ps = connection.prepareStatement(insert)) {
- int count = 0;
- for (ResourceTokenValueRec xr: xrefs) {
- ps.setLong(1, xr.getLogicalResourceId());
- ps.setLong(2, xr.getCommonTokenValueId());
- ps.addBatch();
- if (++count == BATCH_SIZE) {
- ps.executeBatch();
- count = 0;
- }
- }
-
- if (count > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert, x);
- throw translator.translate(x);
- }
- }
-
- /**
- * Insert _tag parameters to the whole-system LOGICAL_RESOURCE_TAGS table
- * @param resourceType
- * @param xrefs
- */
- protected void insertSystemResourceTags(String resourceType, Collection xrefs) {
- // Now all the values should have ids assigned so we can go ahead and insert them
- // as a batch
- final String tableName = "LOGICAL_RESOURCE_TAGS";
- DataDefinitionUtil.assertValidName(tableName);
- final String insert = "INSERT INTO " + tableName + "("
- + "logical_resource_id, common_token_value_id) "
- + "VALUES (?, ?)";
- try (PreparedStatement ps = connection.prepareStatement(insert)) {
- int count = 0;
- for (ResourceTokenValueRec xr: xrefs) {
- ps.setLong(1, xr.getLogicalResourceId());
- ps.setLong(2, xr.getCommonTokenValueId());
- ps.addBatch();
- if (++count == BATCH_SIZE) {
- ps.executeBatch();
- count = 0;
- }
- }
-
- if (count > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert, x);
- throw translator.translate(x);
- }
- }
-
- /**
- * Insert _security parameters to the resource-specific xx_SECURITY table
- * @param resourceType
- * @param xrefs
- */
- protected void insertResourceSecurity(String resourceType, Collection xrefs) {
- // Now all the values should have ids assigned so we can go ahead and insert them
- // as a batch
- final String tableName = resourceType + "_SECURITY";
- DataDefinitionUtil.assertValidName(tableName);
- final String insert = "INSERT INTO " + tableName + "("
- + "logical_resource_id, common_token_value_id) "
- + "VALUES (?, ?)";
- try (PreparedStatement ps = connection.prepareStatement(insert)) {
- int count = 0;
- for (ResourceTokenValueRec xr: xrefs) {
- ps.setLong(1, xr.getLogicalResourceId());
- ps.setLong(2, xr.getCommonTokenValueId());
- ps.addBatch();
- if (++count == BATCH_SIZE) {
- ps.executeBatch();
- count = 0;
- }
- }
-
- if (count > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert, x);
- throw translator.translate(x);
- }
- }
-
- /**
- * Insert _security parametes to the whole-system LOGICAL_REOURCE_SECURITY table
- * @param resourceType
- * @param xrefs
- */
- protected void insertSystemResourceSecurity(String resourceType, Collection xrefs) {
- // Now all the values should have ids assigned so we can go ahead and insert them
- // as a batch
- final String tableName = "LOGICAL_RESOURCE_SECURITY";
- DataDefinitionUtil.assertValidName(tableName);
- final String insert = "INSERT INTO " + tableName + "("
- + "logical_resource_id, common_token_value_id) "
- + "VALUES (?, ?)";
- try (PreparedStatement ps = connection.prepareStatement(insert)) {
- int count = 0;
- for (ResourceTokenValueRec xr: xrefs) {
- ps.setLong(1, xr.getLogicalResourceId());
- ps.setLong(2, xr.getCommonTokenValueId());
- ps.addBatch();
- if (++count == BATCH_SIZE) {
- ps.executeBatch();
- count = 0;
- }
- }
-
- if (count > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert, x);
- throw translator.translate(x);
- }
- }
-
- /**
- * Insert any missing values into the code_systems table
- * @param paramList
- * @param systemNames a sorted collection of system names
- */
- public abstract void doCodeSystemsUpsert(String paramList, Collection sortedSystemNames);
-
- /**
- * Insert any missing values into the common_canonical_values table
- * @param paramList
- * @param urls
- */
- public abstract void doCanonicalValuesUpsert(String paramList, Collection sortedURLS);
-
- /**
- * Add reference value records for each unique reference name in the given list
- * @param values
- */
- public void upsertCommonTokenValues(List values) {
-
- // Unique list so we don't try and create the same name more than once.
- // Ignore any null token-values, because we don't want to (can't) store
- // them in our common token values table.
- Set tokenValueSet = values.stream().filter(x -> x.getTokenValue() != null).map(xr -> new CommonTokenValue(xr.getCodeSystemValue(), xr.getCodeSystemValueId(), xr.getTokenValue())).collect(Collectors.toSet());
-
- if (tokenValueSet.isEmpty()) {
- // nothing to do
- return;
- }
-
- // Sort the values so we always process in the same order (deadlock protection)
- List sortedTokenValues = new ArrayList<>(tokenValueSet);
- sortedTokenValues.sort(CommonTokenValue::compareTo);
-
- // Process the data in a window.
- int idx = 0;
- int max = sortedTokenValues.size();
-
- // The maximum number of query parameters that are available for a particular persistence layer.
- // There are two '?' parameters declared for each CommonTokenValue.
- Optional maxQuery = translator.maximumQueryParameters();
- int maxSub;
- if (maxQuery.isPresent() && (max * 2) > maxQuery.get()) {
- maxSub = maxQuery.get() / 2;
- } else {
- maxSub = max;
- }
-
- // The sliding window
- int window = 1;
- while (idx < max) {
- List sortedTokenValuesSub = new ArrayList<>();
-
- // Build a string of parameter values we use in the query to drive the insert statement.
- // The database needs to know the type when it parses the query, hence the slightly verbose CAST functions:
- // VALUES ((CAST(? AS VARCHAR(1234)), CAST(? AS INT)), (...)) AS V(common_token_value, parameter_name_id, code_system_id)
- StringBuilder inList = new StringBuilder(); // for the select query later
- StringBuilder paramList = new StringBuilder();
- for (; idx < (maxSub * window) && idx < max; idx++) {
- if (paramList.length() > 0) {
- paramList.append(", ");
- }
- paramList.append("(CAST(? AS VARCHAR(" + FhirSchemaConstants.MAX_TOKEN_VALUE_BYTES + "))");
- paramList.append(",CAST(? AS INT))");
-
- // also build the inList for the select statement later
- if (inList.length() > 0) {
- inList.append(",");
- }
- inList.append("(?,?)");
-
- sortedTokenValuesSub.add(sortedTokenValues.get(idx));
- }
-
- // Condition where there are more than one
- if (logger.isLoggable(Level.FINE)) {
- logger.fine("sortedTokenValuesSub=[" + sortedTokenValuesSub.size() + "] sortedTokenValues=[" + sortedTokenValues.size() + "]");
- }
-
- final String paramListStr = paramList.toString();
- try (MetricHandle m = FHIRRequestContext.get().getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_UPSERT_COMMON_TOKEN_VALUES.name())) {
- doCommonTokenValuesUpsert(paramListStr, sortedTokenValuesSub);
- }
-
- // Now grab the ids for the rows we just created. If we had a RETURNING implementation
- // which worked reliably across all our database platforms, we wouldn't need this
- // second query.
- // Derby doesn't support IN LISTS with multiple members, so we have to join against
- // a VALUES again. No big deal...probably similar amount of work for the database
- final Map idMap = new HashMap<>();
- try (MetricHandle m = FHIRRequestContext.get().getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_FETCH_NEW_COMMON_TOKEN_VALUES.name())) {
- StringBuilder select = new StringBuilder();
- select.append(" SELECT ctv.code_system_id, ctv.token_value, ctv.common_token_value_id FROM ");
- select.append(" (VALUES ").append(paramListStr).append(" ) AS v(token_value, code_system_id) ");
- select.append(" JOIN common_token_values ctv ");
- select.append(" ON ctv.token_value = v.token_value ");
- select.append(" AND ctv.code_system_id = v.code_system_id ");
-
- // Grab the ids
- try (PreparedStatement ps = connection.prepareStatement(select.toString())) {
- int a = 1;
- for (CommonTokenValue tv: sortedTokenValuesSub) {
- ps.setString(a++, tv.getTokenValue());
- ps.setInt(a++, tv.getCodeSystemId());
- }
-
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- // SELECT code_system_id, token_value...note codeSystem not required
- CommonTokenValue key = new CommonTokenValue(null, rs.getInt(1), rs.getString(2));
- idMap.put(key, rs.getLong(3));
- }
- } catch (SQLException x) {
- throw translator.translate(x);
- }
- }
-
- // Now update the ids for all the matching systems in our list
- for (ResourceTokenValueRec xr: values) {
- // ignore entries with null tokenValue elements - we don't store them in common_token_values
- if (xr.getTokenValue() != null) {
- CommonTokenValue key = new CommonTokenValue(null, xr.getCodeSystemValueId(), xr.getTokenValue());
- Long id = idMap.get(key);
- if (id != null) {
- xr.setCommonTokenValueId(id);
-
- // update the thread-local cache with this id. The values aren't committed to the shared cache
- // until the transaction commits
- cache.addTokenValue(key, id);
- }
- }
- }
- window++;
- }
- }
-
- /**
- * Execute the insert (upsert) into the common_token_values table for the
- * given collection of values. Note, this insert from negative outer join
- * requires the database concurrency implementation to be correct. This does
- * not work for Postgres, hence Postgres gets its own implementation of this
- * method
- * @param paramList
- * @param tokenValues
- */
- protected abstract void doCommonTokenValuesUpsert(String paramList, Collection sortedTokenValues);
-
- @Override
- public void persist(Collection records, Collection referenceRecords, Collection profileRecs, Collection tagRecs, Collection securityRecs) throws FHIRPersistenceException {
-
- boolean gotSomething = collectAndResolveParameterNames(records, referenceRecords, profileRecs, tagRecs, securityRecs);
- if (!gotSomething) {
- // nothing to do
- return;
- }
-
- FHIRRequestContext requestContext = FHIRRequestContext.get();
-
- // Grab the ids for all the code-systems, and upsert any misses
- List systemMisses = new ArrayList<>();
- try (MetricHandle m = requestContext.getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_RESOLVE_CODE_SYSTEMS.name())) {
- cache.resolveCodeSystems(records, systemMisses);
- cache.resolveCodeSystems(tagRecs, systemMisses);
- cache.resolveCodeSystems(securityRecs, systemMisses);
- upsertCodeSystems(systemMisses);
- }
-
- // Now that all the code-systems ids are known, we can search the cache
- // for all the token values, upserting anything new
- List valueMisses = new ArrayList<>();
- try (MetricHandle m = requestContext.getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_RESOLVE_COMMON_TOKEN_VALUES.name())) {
- cache.resolveTokenValues(records, valueMisses);
- cache.resolveTokenValues(tagRecs, valueMisses);
- cache.resolveTokenValues(securityRecs, valueMisses);
- upsertCommonTokenValues(valueMisses);
- }
-
- // Resolve all the LOGICAL_RESOURCE_IDENT records we need as reference targets
- List referenceMisses = new ArrayList<>();
- try (MetricHandle m = requestContext.getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_RESOLVE_LOGICAL_RESOURCE_IDENT.name())) {
- logicalResourceIdentCache.resolveReferenceValues(referenceRecords, referenceMisses);
- upsertLogicalResourceIdents(referenceMisses);
- }
-
- // Process all the common canonical values
- List canonicalMisses = new ArrayList<>();
- try (MetricHandle m = requestContext.getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_RESOLVE_CANONICAL_VALUES.name())) {
- cache.resolveCanonicalValues(profileRecs, canonicalMisses);
- upsertCanonicalValues(canonicalMisses);
- }
-
- // Now split the token-value records into groups based on resource type.
- Map> recordMap = new HashMap<>();
- for (ResourceTokenValueRec rtv: records) {
- List list = recordMap.computeIfAbsent(rtv.getResourceType(), k -> { return new ArrayList<>(); });
- list.add(rtv);
- }
-
- try (MetricHandle m = requestContext.getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_INSERT_TOKEN_REFS.name())) {
- for (Map.Entry> entry: recordMap.entrySet()) {
- insertResourceTokenRefs(entry.getKey(), entry.getValue());
- insertSystemResourceTokenRefs(entry.getKey(), entry.getValue());
- }
- }
-
- // Split reference records by resource type
- Map> referenceRecordMap = new HashMap<>();
- for (ResourceReferenceValueRec rtv: referenceRecords) {
- List list = referenceRecordMap.computeIfAbsent(rtv.getResourceType(), k -> { return new ArrayList<>(); });
- list.add(rtv);
- }
-
- // process each list of reference values by resource type
- try (MetricHandle m = requestContext.getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_INSERT_REF_VALUES.name())) {
- for (Map.Entry> entry: referenceRecordMap.entrySet()) {
- insertRefValues(entry.getKey(), entry.getValue());
- }
- }
-
- // Split profile values by resource type
- Map> profileMap = new HashMap<>();
- for (ResourceProfileRec rtv: profileRecs) {
- List list = profileMap.computeIfAbsent(rtv.getResourceType(), k -> { return new ArrayList<>(); });
- list.add(rtv);
- }
-
- try (MetricHandle m = requestContext.getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_INSERT_PROFILES.name())) {
- for (Map.Entry> entry: profileMap.entrySet()) {
- insertResourceProfiles(entry.getKey(), entry.getValue());
- insertSystemResourceProfiles(entry.getKey(), entry.getValue());
- }
- }
-
- // Split tag records by resource type
- Map> tagMap = new HashMap<>();
- for (ResourceTokenValueRec rtv: tagRecs) {
- List list = tagMap.computeIfAbsent(rtv.getResourceType(), k -> { return new ArrayList<>(); });
- list.add(rtv);
- }
-
- try (MetricHandle m = requestContext.getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_INSERT_TAGS.name())) {
- for (Map.Entry> entry: tagMap.entrySet()) {
- insertResourceTags(entry.getKey(), entry.getValue());
- insertSystemResourceTags(entry.getKey(), entry.getValue());
- }
- }
-
- // Split security records by resource type
- Map> securityMap = new HashMap<>();
- for (ResourceTokenValueRec rtv: securityRecs) {
- List list = securityMap.computeIfAbsent(rtv.getResourceType(), k -> { return new ArrayList<>(); });
- list.add(rtv);
- }
-
- try (MetricHandle m = requestContext.getMetricHandle(FHIRPersistenceJDBCMetric.M_JDBC_INSERT_SECURITY.name())) {
- for (Map.Entry> entry: securityMap.entrySet()) {
- insertResourceSecurity(entry.getKey(), entry.getValue());
- insertSystemResourceSecurity(entry.getKey(), entry.getValue());
- }
- }
- }
-
- /**
- * Build a unique list of parameter names then sort before resolving the ids. This reduces
- * the chance we'll get a deadlock under high concurrency conditions
- * @param records
- * @param referenceRecords
- * @param profileRecs
- * @param tagRecs
- * @param securityRecs
- * @return true if we processed one or more records
- */
- private boolean collectAndResolveParameterNames(Collection records, Collection referenceRecords, Collection profileRecs,
- Collection tagRecs, Collection securityRecs) throws FHIRPersistenceException {
-
- List recList = new ArrayList<>();
- recList.addAll(records);
- recList.addAll(referenceRecords);
- recList.addAll(profileRecs);
- recList.addAll(tagRecs);
- recList.addAll(securityRecs);
-
- // Build a unique list of parameter names and then sort
- Set parameterNameSet = new HashSet<>(recList.stream().map(rec -> rec.getParameterName()).collect(Collectors.toList()));
- List parameterNameList = new ArrayList<>(parameterNameSet);
- parameterNameList.sort(String::compareTo);
-
- // Do lookups in order (deadlock protection)
- for (String parameterName: parameterNameList) {
- // The cache holds a local map of name to id, so no need to duplicate that here
- getParameterNameId(parameterName);
- }
-
- // Fetch the values that we just cached in the previous loop
- for (ResourceRefRec rec: recList) {
- rec.setParameterNameId(getParameterNameId(rec.getParameterName()));
- }
- return recList.size() > 0;
- }
-
- @Override
- public List readCommonTokenValueIdList(final String tokenValue) {
- final List result = new ArrayList<>();
- final String SQL = ""
- + "SELECT c.common_token_value_id "
- + " FROM common_token_values c "
- + " WHERE c.token_value = ?";
- try (PreparedStatement ps = connection.prepareStatement(SQL)) {
- ps.setString(1, tokenValue);
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- result.add(rs.getLong(1));
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, SQL, x);
- throw translator.translate(x);
- }
-
- return result;
- }
-
- /**
- * Get the id from the local (tenant-specific) identity cache, or read/create using
- * the database if needed.
- * @param parameterName
- * @return
- * @throws FHIRPersistenceDBConnectException
- * @throws FHIRPersistenceDataAccessException
- */
- protected int getParameterNameId(String parameterName) throws FHIRPersistenceDBConnectException, FHIRPersistenceDataAccessException {
- Integer result = parameterNameCache.getId(parameterName);
- if (result == null) {
- result = readOrAddParameterNameId(parameterName);
- parameterNameCache.addEntry(parameterName, result);
- }
- return result;
- }
-
- /**
- * Fetch the id for the given parameter name from the database, creating a new entry if required.
- * @param parameterName
- * @return
- * @throws FHIRPersistenceDBConnectException
- * @throws FHIRPersistenceDataAccessException
- */
- protected abstract int readOrAddParameterNameId(String parameterName) throws FHIRPersistenceDBConnectException, FHIRPersistenceDataAccessException;
-
- protected void upsertLogicalResourceIdents(List unresolved) throws FHIRPersistenceException {
- if (unresolved.isEmpty()) {
- return;
- }
-
- // Build a unique set of logical_resource_ident keys
- Set keys = unresolved.stream().map(v -> new LogicalResourceIdentValue(v.getRefResourceTypeId(), v.getRefLogicalId())).collect(Collectors.toSet());
- List missing = new ArrayList<>(keys);
- // Sort the list in logicalId,resourceTypeId order
- missing.sort((a,b) -> {
- int result = a.getLogicalId().compareTo(b.getLogicalId());
- if (result == 0) {
- result = Integer.compare(a.getResourceTypeId(), b.getResourceTypeId());
- }
- return result;
- });
- addMissingLogicalResourceIdents(missing);
-
- // Now fetch all the identity records we just created so that we can
- // process the unresolved list of ResourceReferenceValueRec records
- Map lrIdentMap = new HashMap<>();
- fetchLogicalResourceIdentIds(lrIdentMap, missing);
-
- // Now we can use the map to find the logical_resource_id for each of the unresolved
- // ResourceReferenceValueRec records
- for (ResourceReferenceValueRec rec: unresolved) {
- LogicalResourceIdentKey key = new LogicalResourceIdentKey(rec.getRefResourceTypeId(), rec.getRefLogicalId());
- LogicalResourceIdentValue val = lrIdentMap.get(key);
- if (val != null) {
- rec.setRefLogicalResourceId(val.getLogicalResourceId());
- } else {
- // Shouldn't happen, but be defensive in case someone breaks something
- throw new FHIRPersistenceException("logical_resource_idents still missing after upsert");
- }
- }
- }
-
-
- /**
- * Build and prepare a statement to fetch the code_system_id and code_system_name
- * from the code_systems table for all the given (unresolved) code system values
- * @param values
- * @return
- * @throws SQLException
- */
- protected PreparedStatement buildLogicalResourceIdentSelectStatement(List values) throws SQLException {
- StringBuilder query = new StringBuilder();
- query.append("SELECT lri.resource_type_id, lri.logical_id, lri.logical_resource_id ");
- query.append(" FROM logical_resource_ident AS lri ");
- query.append(" JOIN (VALUES ");
- for (int i=0; i 0) {
- query.append(",");
- }
- query.append("(?,?)");
- }
- query.append(") AS v(resource_type_id, logical_id) ");
- query.append(" ON (lri.resource_type_id = v.resource_type_id AND lri.logical_id = v.logical_id)");
- PreparedStatement ps = connection.prepareStatement(query.toString());
- // bind the parameter values
- int param = 1;
- for (LogicalResourceIdentValue val: values) {
- ps.setInt(param++, val.getResourceTypeId());
- ps.setString(param++, val.getLogicalId());
- }
-
- if (logger.isLoggable(Level.FINE)) {
- String params = String.join(",", values.stream().map(v -> "(" + v.getResourceTypeId() + "," + v.getLogicalId() + ")").collect(Collectors.toList()));
- logger.fine("ident fetch: " + query.toString() + "; params: " + params);
- }
-
- return ps;
- }
-
- /**
- * These logical_resource_ident values weren't found in the database, so we need to try and add them.
- * We have to deal with concurrency here - there's a chance another thread could also
- * be trying to add them. To avoid deadlocks, it's important to do any inserts in a
- * consistent order. At the end, we should be able to read back values for each entry
- * @param missing
- */
- protected void addMissingLogicalResourceIdents(List missing) throws FHIRPersistenceException {
-
- // simplified implementation which handles inserts individually
- final String nextVal = translator.nextValue(schemaName, "fhir_sequence");
- StringBuilder insert = new StringBuilder();
- insert.append("INSERT INTO logical_resource_ident (resource_type_id, logical_id, logical_resource_id) VALUES (?,?,");
- insert.append(nextVal); // next sequence value
- insert.append(")");
-
- logger.fine(() -> "ident insert: " + insert.toString());
- try (PreparedStatement ps = connection.prepareStatement(insert.toString())) {
- for (LogicalResourceIdentKey value: missing) {
- ps.setInt(1, value.getResourceTypeId());
- ps.setString(2, value.getLogicalId());
- try {
- ps.executeUpdate();
- } catch (SQLException x) {
- if (getTranslator().isDuplicate(x)) {
- // do nothing
- } else {
- throw x;
- }
- }
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, "logical_resource_ident insert failed: " + insert.toString(), x);
- throw new FHIRPersistenceException("logical_resource_ident insert failed");
- }
- }
-
- protected void fetchLogicalResourceIdentIds(Map lrIdentMap, List unresolved) throws FHIRPersistenceException {
-
- int resultCount = 0;
- final int maxValuesPerStatement = 512;
- int offset = 0;
- while (offset < unresolved.size()) {
- int remaining = unresolved.size() - offset;
- int subSize = Math.min(remaining, maxValuesPerStatement);
- List sub = unresolved.subList(offset, offset+subSize); // remember toIndex is exclusive
- offset += subSize; // set up for the next iteration
- try (PreparedStatement ps = buildLogicalResourceIdentSelectStatement(sub)) {
- ResultSet rs = ps.executeQuery();
- // We can't rely on the order of result rows matching the order of the in-list,
- // so we have to go back to our map to look up each LogicalResourceIdentValue
- while (rs.next()) {
- resultCount++;
- final int resourceTypeId = rs.getInt(1);
- final String logicalId = rs.getString(2);
- LogicalResourceIdentKey key = new LogicalResourceIdentKey(resourceTypeId, logicalId);
- LogicalResourceIdentValue identValue = new LogicalResourceIdentValue(resourceTypeId, logicalId);
- identValue.setLogicalResourceId(rs.getLong(3));
- lrIdentMap.put(key, identValue);
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, "logical resource ident fetch failed", x);
- throw new FHIRPersistenceException("logical resource ident fetch failed");
- }
- }
- // quick check to make sure we got everything we expected
- if (resultCount < unresolved.size()) {
- throw new FHIRPersistenceException("logical_resource_ident fetch did not fetch everything expected");
- }
- }
-}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyCommonValuesDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyCommonValuesDAO.java
new file mode 100644
index 00000000000..6b2fcd8a191
--- /dev/null
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyCommonValuesDAO.java
@@ -0,0 +1,81 @@
+/*
+ * (C) Copyright IBM Corp. 2020, 2022
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package com.ibm.fhir.persistence.jdbc.derby;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.ibm.fhir.database.utils.api.IDatabaseTranslator;
+import com.ibm.fhir.persistence.jdbc.dao.impl.CommonValuesDAO;
+import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValue;
+import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValueResult;
+
+
+/**
+ * Derby-specific extension of the {@link CommonValuesDAO} to work around
+ * some SQL syntax and Derby concurrency issues
+ */
+public class DerbyCommonValuesDAO extends CommonValuesDAO {
+ private static final Logger logger = Logger.getLogger(DerbyCommonValuesDAO.class.getName());
+
+ /**
+ * Public constructor
+ * @param t
+ * @param c
+ * @param schemaName
+ */
+ public DerbyCommonValuesDAO(IDatabaseTranslator t, Connection c, String schemaName) {
+ super(t, c, schemaName);
+ }
+
+ @Override
+ public Set readCommonTokenValueIds(Collection tokenValues) {
+ if (tokenValues.isEmpty()) {
+ return Collections.emptySet();
+ }
+
+ Set result = new HashSet<>();
+
+ StringBuilder select = new StringBuilder()
+ .append("SELECT c.token_value, c.code_system_id, c.common_token_value_id ")
+ .append(" FROM common_token_values c")
+ .append(" WHERE ");
+
+ String delim = "";
+ for (CommonTokenValue ctv : tokenValues) {
+ select.append(delim);
+ select.append("(c.token_value = ? AND c.code_system_id = " + ctv.getCodeSystemId() + ")");
+ delim = " OR ";
+ }
+
+ try (PreparedStatement ps = getConnection().prepareStatement(select.toString())) {
+ Iterator iterator = tokenValues.iterator();
+ for (int i = 1; i <= tokenValues.size(); i++) {
+ ps.setString(i, iterator.next().getTokenValue());
+ }
+
+ ResultSet rs = ps.executeQuery();
+ while (rs.next()) {
+ result.add(new CommonTokenValueResult(rs.getString(1), rs.getInt(2), rs.getLong(3)));
+ }
+ } catch (SQLException x) {
+ logger.log(Level.SEVERE, select.toString(), x);
+ throw getTranslator().translate(x);
+ }
+
+ return result;
+ }
+}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceDAO.java
index 3aaf2d20444..1a3a5252798 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceDAO.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceDAO.java
@@ -34,7 +34,6 @@
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
import com.ibm.fhir.persistence.jdbc.dao.api.FHIRDAOConstants;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceDAOImpl;
import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValue;
@@ -66,8 +65,15 @@ public class DerbyResourceDAO extends ResourceDAOImpl {
private static final DerbyTranslator translator = new DerbyTranslator();
- public DerbyResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd) {
- super(connection, schemaName, flavor, cache, rrd);
+ /**
+ * Public constructor
+ * @param connection
+ * @param schemaName
+ * @param flavor
+ * @param cache
+ */
+ public DerbyResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache) {
+ super(connection, schemaName, flavor, cache);
}
/**
@@ -75,9 +81,11 @@ public DerbyResourceDAO(Connection connection, String schemaName, FHIRDbFlavor f
* for a stand-alone full FHIR server.
* @param strat the connection strategy
* @param trxSynchRegistry
+ * @param cache
+ * @param ptdi
*/
- public DerbyResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd, ParameterTransactionDataImpl ptdi) {
- super(connection, schemaName, flavor, trxSynchRegistry, cache, rrd, ptdi);
+ public DerbyResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache, ParameterTransactionDataImpl ptdi) {
+ super(connection, schemaName, flavor, trxSynchRegistry, cache, ptdi);
}
@Override
@@ -180,8 +188,8 @@ public Resource insert(Resource resource, List paramete
* This works because we never delete a logical_resource record, and so don't have to deal
* with concurrency issues caused when deletes are mingled with inserts/updates
*
- * Note the execution flow aligns very closely with the DB2 stored procedure
- * implementation (fhir-persistence-schema/src/main/resources/db2/add_any_resource.sql)
+ * Note the execution flow aligns very closely with the PostgreSQL stored procedure
+ * implementation (fhir-persistence-schema/src/main/resources/postgres/add_any_resource.sql)
*
* @param tablePrefix
* @param parameters
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceReferenceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceReferenceDAO.java
deleted file mode 100644
index 7728081e180..00000000000
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceReferenceDAO.java
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * (C) Copyright IBM Corp. 2020, 2021
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-package com.ibm.fhir.persistence.jdbc.derby;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import java.util.stream.Collectors;
-
-import com.ibm.fhir.database.utils.api.IDatabaseTranslator;
-import com.ibm.fhir.persistence.exception.FHIRPersistenceDataAccessException;
-import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.INameIdCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.LogicalResourceIdentKey;
-import com.ibm.fhir.persistence.jdbc.dao.api.LogicalResourceIdentValue;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceReferenceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceTokenValueRec;
-import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValue;
-import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValueResult;
-import com.ibm.fhir.persistence.jdbc.exception.FHIRPersistenceDBConnectException;
-import com.ibm.fhir.persistence.params.api.ParamSchemaConstants;
-import com.ibm.fhir.persistence.params.api.ParameterNameDAO;
-import com.ibm.fhir.persistence.params.database.DerbyParameterNamesDAO;
-
-
-/**
- * Derby-specific extension of the {@link ResourceReferenceDAO} to work around
- * some SQL syntax and Derby concurrency issues
- */
-public class DerbyResourceReferenceDAO extends ResourceReferenceDAO {
- private static final Logger logger = Logger.getLogger(DerbyResourceReferenceDAO.class.getName());
-
- private static final int BATCH_SIZE = 100;
-
- /**
- * Public constructor
- * @param t
- * @param c
- * @param schemaName
- * @param cache
- * @param parameterNameCache
- * @param logicalResourceIdentCache
- */
- public DerbyResourceReferenceDAO(IDatabaseTranslator t, Connection c, String schemaName, ICommonTokenValuesCache cache, INameIdCache parameterNameCache,
- ILogicalResourceIdentCache logicalResourceIdentCache) {
- super(t, c, schemaName, cache, parameterNameCache, logicalResourceIdentCache);
- }
-
- @Override
- public Set readCommonTokenValueIds(Collection tokenValues) {
- if (tokenValues.isEmpty()) {
- return Collections.emptySet();
- }
-
- Set result = new HashSet<>();
-
- StringBuilder select = new StringBuilder()
- .append("SELECT c.token_value, c.code_system_id, c.common_token_value_id ")
- .append(" FROM common_token_values c")
- .append(" WHERE ");
-
- String delim = "";
- for (CommonTokenValue ctv : tokenValues) {
- select.append(delim);
- select.append("(c.token_value = ? AND c.code_system_id = " + ctv.getCodeSystemId() + ")");
- delim = " OR ";
- }
-
- try (PreparedStatement ps = getConnection().prepareStatement(select.toString())) {
- Iterator iterator = tokenValues.iterator();
- for (int i = 1; i <= tokenValues.size(); i++) {
- ps.setString(i, iterator.next().getTokenValue());
- }
-
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- result.add(new CommonTokenValueResult(rs.getString(1), rs.getInt(2), rs.getLong(3)));
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, select.toString(), x);
- throw getTranslator().translate(x);
- }
-
- return result;
- }
-
- @Override
- public void doCodeSystemsUpsert(String paramList, Collection sortedSystemNames) {
-
- // Ideally we'd use an INSERT-FROM-NEGATIVE-OUTER-JOIN here to make sure
- // we only try to insert rows that don't already exist, but this doesn't
- // work with Derby (PostgreSQL has a similar issue, hence the ON CONFLICT
- // DO NOTHING strategy there). For Derby, we are left to handle this
- // ourselves, and just do things row-by-row:
- final String nextVal = getTranslator().nextValue(getSchemaName(), "fhir_ref_sequence");
- final String INS = ""
- + "INSERT INTO code_systems (code_system_id, code_system_name) "
- + " VALUES (" + nextVal + ", ?)";
- try (PreparedStatement ps = getConnection().prepareStatement(INS)) {
- for (String codeSystemName: sortedSystemNames) {
- ps.setString(1, codeSystemName);
-
- try {
- ps.executeUpdate();
- } catch (SQLException x) {
- if (getTranslator().isDuplicate(x)) {
- // ignore because this row has already been inserted by another thread
- } else {
- throw x;
- }
- }
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, INS, x);
- throw getTranslator().translate(x);
- }
- }
-
- @Override
- protected void doCodeSystemsFetch(Map idMap, String inList, List sortedSystemNames) {
- // For Derby, we get deadlocks when selecting using the in-list method (see parent implementation
- // of this method). Instead, we execute individual statements in the order of the sortedSystemNames
- // list so that the (S) locks will be acquired in the same order as the (X) locks obtained when
- // inserting.
- final String SQL = "SELECT code_system_id FROM code_systems WHERE code_system_name = ?";
-
- try (PreparedStatement ps = getConnection().prepareStatement(SQL)) {
- for (String codeSystemName: sortedSystemNames) {
- ps.setString(1, codeSystemName);
- ResultSet rs = ps.executeQuery();
- if (rs.next()) {
- idMap.put(codeSystemName, rs.getInt(1));
- }
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, SQL, x);
- throw getTranslator().translate(x);
- }
- }
-
- @Override
- public void doCanonicalValuesUpsert(String paramList, Collection sortedURLS) {
-
- // Derby doesn't like really huge VALUES lists, so we instead need
- // to go with a declared temporary table. As with code_systems_tmp, we generate
- // the id here to allow for better deadlock protection later
- final String nextVal = getTranslator().nextValue(getSchemaName(), ParamSchemaConstants.CANONICAL_ID_SEQ);
- final String insert = "INSERT INTO SESSION.canonical_values_tmp (url, canonical_id) VALUES (?," + nextVal + ")";
- int batchCount = 0;
- try (PreparedStatement ps = getConnection().prepareStatement(insert)) {
- for (String url: sortedURLS) {
- ps.setString(1, url);
- ps.addBatch();
-
- if (++batchCount == BATCH_SIZE) {
- ps.executeBatch();
- batchCount = 0;
- }
- }
-
- if (batchCount > 0) {
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert.toString(), x);
- throw getTranslator().translate(x);
- }
-
- // Upsert values. See the similar code_systems insert for details
- // about deadlock protection
- StringBuilder upsert = new StringBuilder();
- upsert.append("INSERT INTO common_canonical_values (canonical_id, url) ");
- upsert.append(" SELECT src.canonical_id, src.url ");
- upsert.append(" FROM SESSION.canonical_values_tmp src ");
- upsert.append(" LEFT OUTER JOIN common_canonical_values cs ");
- upsert.append(" ON cs.url = src.url ");
- upsert.append(" WHERE cs.url IS NULL ");
- upsert.append(" ORDER BY src.url");
-
- try (Statement s = getConnection().createStatement()) {
- s.executeUpdate(upsert.toString());
- } catch (SQLException x) {
- logger.log(Level.SEVERE, upsert.toString(), x);
- throw getTranslator().translate(x);
- }
- }
-
- @Override
- protected void doCommonTokenValuesUpsert(String paramList, Collection sortedTokenValues) {
-
- // Doing a sorted INSERT-FROM-NEGATIVE-OUTER-JOIN apparently isn't good enough
- // to avoid deadlock issues in Derby. To address this, we need to go row by
- // row in the sorted order (similar to how CODE_SYSTEMS is handled). In most
- // cases the sortedTokenValues list should only contain new rows. However in
- // high concurrency situations we can still end up with duplicates, which is
- // why we need to handle that here
- final String INS = "INSERT INTO common_token_values (token_value, code_system_id) VALUES (?, ?)";
- try (PreparedStatement ps = getConnection().prepareStatement(INS)) {
- for (CommonTokenValue ctv: sortedTokenValues) {
- try {
- ps.setString(1, ctv.getTokenValue());
- ps.setInt(2, ctv.getCodeSystemId());
- ps.executeUpdate();
- } catch (SQLException x) {
- if (getTranslator().isDuplicate(x)) {
- // do nothing
- } else {
- throw x;
- }
- }
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, INS, x);
- throw getTranslator().translate(x);
- }
- }
-
- @Override
- public void upsertCommonTokenValues(List values) {
- // Special case for Derby so we don't try and create monster SQL statements
- // resulting in a stack overflow when Derby attempts to parse it.
-
- // Unique list so we don't try and create the same name more than once.
- // Ignore any null token-values, because we don't want to (can't) store
- // them in our common token values table.
- Set tokenValueSet = values.stream().filter(x -> x.getTokenValue() != null).map(xr -> new CommonTokenValue(xr.getCodeSystemValue(), xr.getCodeSystemValueId(), xr.getTokenValue())).collect(Collectors.toSet());
-
- if (tokenValueSet.isEmpty()) {
- // nothing to do
- return;
- }
-
- // Sort the values so we always process in the same order (deadlock protection)
- List sortedTokenValues = new ArrayList<>(tokenValueSet);
- sortedTokenValues.sort(CommonTokenValue::compareTo);
-
- final String paramListStr = null;
- doCommonTokenValuesUpsert(paramListStr, sortedTokenValues);
-
- // Fetch the ids for all the records we need. Because we can have
- // read (S) locks conflicting with write (X) locks, it's important
- // to do this fetching in exactly the same order we try to insert.
- // Unfortunately, for Derby this means going row-by-row (just like we
- // do for CODE_SYSTEMS).
- final String FETCH = ""
- + " SELECT common_token_value_id "
- + " FROM common_token_values "
- + " WHERE token_value = ?"
- + " AND code_system_id = ?";
-
- Map idMap = new HashMap<>();
- try (PreparedStatement ps = getConnection().prepareStatement(FETCH)) {
- for (CommonTokenValue ctv: sortedTokenValues) {
- ps.setString(1, ctv.getTokenValue());
- ps.setInt(2, ctv.getCodeSystemId());
- ResultSet rs = ps.executeQuery();
- if (rs.next()) {
- idMap.put(ctv, rs.getLong(1));
- }
- }
- } catch (SQLException x) {
- throw getTranslator().translate(x);
- }
-
- // Now update the ids for all the matching systems in our list
- for (ResourceTokenValueRec xr: values) {
- // ignore entries with null tokenValue elements - we don't store them in common_token_values
- if (xr.getTokenValue() != null) {
- CommonTokenValue key = new CommonTokenValue(xr.getCodeSystemValue(), xr.getCodeSystemValueId(), xr.getTokenValue());
- Long id = idMap.get(key);
- if (id != null) {
- xr.setCommonTokenValueId(id);
-
- // update the thread-local cache with this id. The values aren't committed to the shared cache
- // until the transaction commits
- getCache().addTokenValue(key, id);
- }
- }
- }
- }
-
- @Override
- protected int readOrAddParameterNameId(String parameterName) throws FHIRPersistenceDBConnectException, FHIRPersistenceDataAccessException {
- final ParameterNameDAO pnd = new DerbyParameterNamesDAO(getConnection(), getSchemaName());
- return pnd.readOrAddParameterNameId(parameterName);
- }
-
- @Override
- protected PreparedStatement buildLogicalResourceIdentSelectStatement(List values) throws SQLException {
- // Derby doesn't support a VALUES table list, so instead we simply build a big
- // OR predicate
- StringBuilder query = new StringBuilder();
- query.append("SELECT lri.resource_type_id, lri.logical_id, lri.logical_resource_id ");
- query.append(" FROM logical_resource_ident AS lri ");
- query.append(" WHERE ");
- for (int i=0; i 0) {
- query.append(" OR ");
- }
- query.append("(resource_type_id = ? AND logical_id = ?)");
- }
- PreparedStatement ps = getConnection().prepareStatement(query.toString());
- // bind the parameter values
- int param = 1;
- for (LogicalResourceIdentValue val: values) {
- ps.setInt(param++, val.getResourceTypeId());
- ps.setString(param++, val.getLogicalId());
- }
-
- if (logger.isLoggable(Level.FINE)) {
- String params = String.join(",", values.stream().map(v -> "(" + v.getResourceTypeId() + "," + v.getLogicalId() + ")").collect(Collectors.toList()));
- logger.fine("ident fetch: " + query.toString() + "; params: " + params);
- }
-
- return ps;
- }
-
- @Override
- protected void fetchLogicalResourceIdentIds(Map lrIdentMap, List unresolved) throws FHIRPersistenceException {
- // For Derby, we opt to do this row by row so that we can keep the selects in order which
- // helps us to avoid deadlocks due to lock compatibility issues with Derby
- StringBuilder query = new StringBuilder();
- query.append("SELECT lri.logical_resource_id ");
- query.append(" FROM logical_resource_ident AS lri ");
- query.append(" WHERE lri.resource_type_id = ? AND lri.logical_id = ?");
- final String sql = query.toString();
- try (PreparedStatement ps = getConnection().prepareStatement(sql)) {
- for (LogicalResourceIdentValue value: unresolved) {
- ps.setInt(1, value.getResourceTypeId());
- ps.setString(2, value.getLogicalId());
- ResultSet rs = ps.executeQuery();
- if (rs.next()) {
- final long logicalResourceId = rs.getLong(1);
- LogicalResourceIdentKey key = new LogicalResourceIdentKey(value.getResourceTypeId(), value.getLogicalId());
- value.setLogicalResourceId(logicalResourceId);
- lrIdentMap.put(key, value);
- } else {
- // something wrong with our data handling code because we should already have values
- // for every logical resource at this point
- throw new FHIRPersistenceException("logical_resource_ident record missing: resourceTypeId["
- + value.getResourceTypeId() + "] logicalId[" + value.getLogicalId() + "]");
- }
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, "logical resource ident fetch failed", x);
- throw new FHIRPersistenceException("logical resource ident fetch failed");
- }
- }
-}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/FHIRPersistenceJDBCImpl.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/FHIRPersistenceJDBCImpl.java
index 45a5df5d142..cd9446043e0 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/FHIRPersistenceJDBCImpl.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/FHIRPersistenceJDBCImpl.java
@@ -138,18 +138,17 @@
import com.ibm.fhir.persistence.jdbc.connection.SetMultiShardModifyModeAction;
import com.ibm.fhir.persistence.jdbc.dao.EraseResourceDAO;
import com.ibm.fhir.persistence.jdbc.dao.ReindexResourceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.JDBCIdentityCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ResourceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ResourceIndexRecord;
+import com.ibm.fhir.persistence.jdbc.dao.impl.CommonValuesDAO;
import com.ibm.fhir.persistence.jdbc.dao.impl.FetchResourceChangesDAO;
import com.ibm.fhir.persistence.jdbc.dao.impl.FetchResourcePayloadsDAO;
import com.ibm.fhir.persistence.jdbc.dao.impl.JDBCIdentityCacheImpl;
import com.ibm.fhir.persistence.jdbc.dao.impl.ParameterDAOImpl;
import com.ibm.fhir.persistence.jdbc.dao.impl.ParameterTransportVisitor;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceProfileRec;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceReferenceValueRec;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceTokenValueRec;
import com.ibm.fhir.persistence.jdbc.dao.impl.RetrieveIndexDAO;
@@ -748,11 +747,10 @@ private ResourceDAO makeResourceDAO(FHIRPersistenceContext persistenceContext, C
* @throws FHIRPersistenceException
* @throws IllegalArgumentException
*/
- private ResourceReferenceDAO makeResourceReferenceDAO(Connection connection)
+ private CommonValuesDAO makeCommonValuesDAO(Connection connection)
throws FHIRPersistenceDataAccessException, FHIRPersistenceException, IllegalArgumentException {
- return FHIRResourceDAOFactory.getResourceReferenceDAO(connection, FhirSchemaConstants.FHIR_ADMIN,
- schemaNameSupplier.getSchemaForRequestContext(connection), connectionStrategy.getFlavor(),
- this.cache);
+ return FHIRResourceDAOFactory.getCommonValuesDAO(connection, FhirSchemaConstants.FHIR_ADMIN,
+ schemaNameSupplier.getSchemaForRequestContext(connection), connectionStrategy.getFlavor());
}
/**
@@ -881,7 +879,7 @@ public MultiResourceResult search(FHIRPersistenceContext context, Class extend
connectionStrategy.applySearchOptimizerOptions(connection, SearchHelper.isCompartmentSearch(searchContext));
ResourceDAO resourceDao = makeResourceDAO(context, connection);
ParameterDAO parameterDao = makeParameterDAO(connection);
- ResourceReferenceDAO rrd = makeResourceReferenceDAO(connection);
+ CommonValuesDAO rrd = makeCommonValuesDAO(connection);
JDBCIdentityCache identityCache = new JDBCIdentityCacheImpl(cache, resourceDao, parameterDao, rrd);
List> resourceResults = null;
@@ -3217,10 +3215,9 @@ public ResourceEraseRecord erase(FHIRPersistenceContext context, EraseDTO eraseD
try (Connection connection = openConnection()) {
doCachePrefill(context, connection);
IDatabaseTranslator translator = FHIRResourceDAOFactory.getTranslatorForFlavor(connectionStrategy.getFlavor());
- IResourceReferenceDAO rrd = makeResourceReferenceDAO(connection);
EraseResourceDAO eraseDao = new EraseResourceDAO(connection, FhirSchemaConstants.FHIR_ADMIN, translator,
schemaNameSupplier.getSchemaForRequestContext(connection),
- connectionStrategy.getFlavor(), this.cache, rrd);
+ connectionStrategy.getFlavor(), this.cache);
long eraseResourceGroupId = eraseDao.erase(eraseRecord, eraseDto);
// If offloading is enabled, we need to remove the corresponding offloaded resource payloads
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresReindexResourceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresReindexResourceDAO.java
index 45af38e4680..fb5c8585557 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresReindexResourceDAO.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresReindexResourceDAO.java
@@ -24,7 +24,6 @@
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
import com.ibm.fhir.persistence.jdbc.dao.ReindexResourceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ResourceIndexRecord;
import com.ibm.fhir.persistence.jdbc.impl.ParameterTransactionDataImpl;
@@ -91,10 +90,9 @@ public class PostgresReindexResourceDAO extends ReindexResourceDAO {
* @param schemaName
* @param flavor
* @param cache
- * @param rrd
*/
- public PostgresReindexResourceDAO(Connection connection, IDatabaseTranslator translator, ParameterDAO parameterDao, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd) {
- super(connection, translator, parameterDao, schemaName, flavor, cache, rrd);
+ public PostgresReindexResourceDAO(Connection connection, IDatabaseTranslator translator, ParameterDAO parameterDao, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache) {
+ super(connection, translator, parameterDao, schemaName, flavor, cache);
}
/**
@@ -106,11 +104,10 @@ public PostgresReindexResourceDAO(Connection connection, IDatabaseTranslator tra
* @param flavor
* @param trxSynchRegistry
* @param cache
- * @param rrd
*/
- public PostgresReindexResourceDAO(Connection connection, IDatabaseTranslator translator, ParameterDAO parameterDao, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd,
+ public PostgresReindexResourceDAO(Connection connection, IDatabaseTranslator translator, ParameterDAO parameterDao, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache,
ParameterTransactionDataImpl ptdi) {
- super(connection, translator, parameterDao, schemaName, flavor, trxSynchRegistry, cache, rrd, ptdi);
+ super(connection, translator, parameterDao, schemaName, flavor, trxSynchRegistry, cache, ptdi);
}
@Override
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceDAO.java
index b2a29c03a26..38dfae312cd 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceDAO.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceDAO.java
@@ -33,7 +33,6 @@
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
import com.ibm.fhir.persistence.jdbc.dao.api.FHIRDAOConstants;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceDAOImpl;
import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValue;
@@ -87,14 +86,32 @@ public class PostgresResourceDAO extends ResourceDAOImpl {
// The (optional) shard key used with sharded databases
private final Short shardKey;
- public PostgresResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd, Short shardKey) {
- super(connection, schemaName, flavor, cache, rrd);
+ /**
+ * Public constructor used in runtimes without UserTransaction support
+ * @param connection
+ * @param schemaName
+ * @param flavor
+ * @param cache
+ * @param shardKey
+ */
+ public PostgresResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, Short shardKey) {
+ super(connection, schemaName, flavor, cache);
this.shardKey = shardKey;
}
- public PostgresResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd,
+ /**
+ * Public constructor used when UserTransaction is available
+ * @param connection
+ * @param schemaName
+ * @param flavor
+ * @param trxSynchRegistry
+ * @param cache
+ * @param ptdi
+ * @param shardKey
+ */
+ public PostgresResourceDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache,
ParameterTransactionDataImpl ptdi, Short shardKey) {
- super(connection, schemaName, flavor, trxSynchRegistry, cache, rrd, ptdi);
+ super(connection, schemaName, flavor, trxSynchRegistry, cache, ptdi);
this.shardKey = shardKey;
}
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceNoProcDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceNoProcDAO.java
index b59287aa6f6..000e25396d7 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceNoProcDAO.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceNoProcDAO.java
@@ -33,11 +33,7 @@
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
import com.ibm.fhir.persistence.jdbc.dao.api.FHIRDAOConstants;
-import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.api.JDBCIdentityCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO;
-import com.ibm.fhir.persistence.jdbc.dao.impl.JDBCIdentityCacheImpl;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ParameterVisitorBatchDAO;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceDAOImpl;
import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValue;
import com.ibm.fhir.persistence.jdbc.dto.Resource;
@@ -66,13 +62,13 @@ public class PostgresResourceNoProcDAO extends ResourceDAOImpl {
// DAO used to obtain sequence values from FHIR_REF_SEQUENCE
private FhirRefSequenceDAO fhirRefSequenceDAO;
- public PostgresResourceNoProcDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd) {
- super(connection, schemaName, flavor, cache, rrd);
+ public PostgresResourceNoProcDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache) {
+ super(connection, schemaName, flavor, cache);
}
- public PostgresResourceNoProcDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd,
+ public PostgresResourceNoProcDAO(Connection connection, String schemaName, FHIRDbFlavor flavor, TransactionSynchronizationRegistry trxSynchRegistry, FHIRPersistenceJDBCCache cache,
ParameterTransactionDataImpl ptdi) {
- super(connection, schemaName, flavor, trxSynchRegistry, cache, rrd, ptdi);
+ super(connection, schemaName, flavor, trxSynchRegistry, cache, ptdi);
}
@Override
@@ -431,18 +427,6 @@ public long storeResource(String tablePrefix, List para
}
}
- // Note we don't get any parameters for the resource soft-delete operation
- if (parameters != null && requireParameterUpdate) {
- // PostgreSQL doesn't support partitioned multi-tenancy, so we disable it on the DAO:
- JDBCIdentityCache identityCache = new JDBCIdentityCacheImpl(getCache(), this, parameterDao, getResourceReferenceDAO());
- try (ParameterVisitorBatchDAO pvd = new ParameterVisitorBatchDAO(conn, null, tablePrefix, false, v_logical_resource_id, 100,
- identityCache, getResourceReferenceDAO(), getTransactionData())) {
- for (ExtractedParameterValue p: parameters) {
- p.accept(pvd);
- }
- }
- }
-
// Finally, write a record to RESOURCE_CHANGE_LOG which records each event
// related to resources changes (issue-1955)
String changeType = p_is_deleted ? "D" : (v_new_resource || v_currently_deleted) ? "C" : "U";
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceReferenceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceReferenceDAO.java
deleted file mode 100644
index e8daf92b73e..00000000000
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceReferenceDAO.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * (C) Copyright IBM Corp. 2020, 2022
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-package com.ibm.fhir.persistence.jdbc.postgres;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.Collection;
-import java.util.List;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import com.ibm.fhir.database.utils.api.IDatabaseTranslator;
-import com.ibm.fhir.persistence.exception.FHIRPersistenceDataAccessException;
-import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.INameIdCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.LogicalResourceIdentKey;
-import com.ibm.fhir.persistence.jdbc.dao.api.LogicalResourceIdentValue;
-import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceReferenceDAO;
-import com.ibm.fhir.persistence.jdbc.dto.CommonTokenValue;
-import com.ibm.fhir.persistence.jdbc.exception.FHIRPersistenceDBConnectException;
-import com.ibm.fhir.persistence.params.api.ParamSchemaConstants;
-import com.ibm.fhir.persistence.params.api.ParameterNameDAO;
-
-/**
- * Postgres-specific extension of the {@link ResourceReferenceDAO} to work around
- * some SQL syntax and Postgres concurrency issues
- */
-public class PostgresResourceReferenceDAO extends ResourceReferenceDAO {
- private static final Logger logger = Logger.getLogger(PostgresResourceReferenceDAO.class.getName());
-
- /**
- * Public constructor
- *
- * @param t
- * @param c
- * @param schemaName
- * @param cache
- * @param parameterNameCache
- * @param logicalResourceIdentCache
- */
- public PostgresResourceReferenceDAO(IDatabaseTranslator t, Connection c, String schemaName, ICommonTokenValuesCache cache, INameIdCache parameterNameCache,
- ILogicalResourceIdentCache logicalResourceIdentCache) {
- super(t, c, schemaName, cache, parameterNameCache, logicalResourceIdentCache);
- }
-
- @Override
- public void doCodeSystemsUpsert(String paramList, Collection sortedSystemNames) {
- // query is a negative outer join so we only pick the rows where
- // the row "s" from the actual table doesn't exist. Note the order by,
- // which is crucial to avoid deadlocks (even though adding code-systems
- // isn't that common).
- final String nextVal = getTranslator().nextValue(getSchemaName(), "fhir_ref_sequence");
- StringBuilder insert = new StringBuilder();
- insert.append("INSERT INTO code_systems (code_system_id, code_system_name) ");
- insert.append(" SELECT ").append(nextVal).append(", v.name ");
- insert.append(" FROM ");
- insert.append(" (VALUES ").append(paramList).append(" ) AS v(name) ");
- insert.append(" ON CONFLICT DO NOTHING ");
-
- // Note, we use PreparedStatement here on purpose. Partly because it's
- // secure coding best practice, but also because many resources will have the
- // same number of parameters, and hopefully we'll therefore share a small subset
- // of statements for better performance. Although once the cache warms up, this
- // shouldn't be called at all.
- try (PreparedStatement ps = getConnection().prepareStatement(insert.toString())) {
- // bind all the code_system_name values as parameters
- int a = 1;
- for (String name: sortedSystemNames) {
- ps.setString(a++, name);
- }
-
- ps.executeUpdate();
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert.toString(), x);
- throw getTranslator().translate(x);
- }
- }
-
- @Override
- public void doCanonicalValuesUpsert(String paramList, Collection sortedURLS) {
- // Because of how PostgreSQL MVCC implementation, the insert from negative outer
- // join pattern doesn't work...you still hit conflicts. The PostgreSQL pattern
- // for upsert is ON CONFLICT DO NOTHING, which is what we use here:
- final String nextVal = getTranslator().nextValue(getSchemaName(), ParamSchemaConstants.CANONICAL_ID_SEQ);
- StringBuilder insert = new StringBuilder();
- insert.append("INSERT INTO common_canonical_values (canonical_id, url) ");
- insert.append(" SELECT ").append(nextVal).append(", v.name ");
- insert.append(" FROM ");
- insert.append(" (VALUES ").append(paramList).append(" ) AS v(name) ");
- insert.append(" ON CONFLICT DO NOTHING ");
-
- // Note, we use PreparedStatement here on purpose. Partly because it's
- // secure coding best practice, but also because many resources will have the
- // same number of parameters, and hopefully we'll therefore share a small subset
- // of statements for better performance. Although once the cache warms up, this
- // shouldn't be called at all.
- try (PreparedStatement ps = getConnection().prepareStatement(insert.toString())) {
- // bind all the code_system_name values as parameters
- int a = 1;
- for (String name: sortedURLS) {
- ps.setString(a++, name);
- }
-
- ps.executeUpdate();
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert.toString(), x);
- throw getTranslator().translate(x);
- }
- }
-
- @Override
- protected void doCommonTokenValuesUpsert(String paramList, Collection sortedTokenValues) {
- // It would appear that Postgres MVCC doesn't properly handle the upsert pattern
- // based on not exists or a negative outer join (see the base class implementation
- // of this method for an example). It exposes a race condition, resulting in a
- // unique key duplicate value error. So instead, we have to use the Postgres custom
- // syntax to tell it to ignore any conflicts.
- StringBuilder insert = new StringBuilder();
- insert.append(" INSERT INTO common_token_values (token_value, code_system_id) ");
- insert.append(" SELECT v.token_value, v.code_system_id ");
- insert.append(" FROM (VALUES ").append(paramList).append(" ) AS v(token_value, code_system_id) ");
- insert.append(" ORDER BY v.code_system_id, v.token_value "); // minimize probability of deadlock
- insert.append(" ON CONFLICT DO NOTHING ");
-
- // Note, we use PreparedStatement here on purpose. Partly because it's
- // secure coding best practice, but also because many resources will have the
- // same number of parameters, and hopefully we'll therefore share a small subset
- // of statements for better performance. Although once the cache warms up, this
- // shouldn't be called at all.
- try (PreparedStatement ps = getConnection().prepareStatement(insert.toString())) {
- // bind all the name values as parameters
- int a = 1;
- for (CommonTokenValue tv: sortedTokenValues) {
- ps.setString(a++, tv.getTokenValue());
- ps.setInt(a++, tv.getCodeSystemId());
- }
-
- ps.executeUpdate();
- } catch (SQLException x) {
- logger.log(Level.SEVERE, insert.toString(), x);
- throw getTranslator().translate(x);
- }
- }
-
- @Override
- protected void addMissingLogicalResourceIdents(List missing) throws FHIRPersistenceException {
- // For PostgreSQL we can handle concurrency issues using ON CONFLICT DO NOTHING
- // to skip inserts for records that already exist
- final int batchSize = 256;
- final String nextVal = getTranslator().nextValue(getSchemaName(), "fhir_sequence");
- StringBuilder insert = new StringBuilder();
- insert.append("INSERT INTO logical_resource_ident (resource_type_id, logical_id, logical_resource_id) VALUES (?,?,");
- insert.append(nextVal); // next sequence value
- insert.append(") ON CONFLICT DO NOTHING");
-
- logger.fine(() -> "ident insert: " + insert.toString());
- try (PreparedStatement ps = getConnection().prepareStatement(insert.toString())) {
- int count = 0;
- for (LogicalResourceIdentKey value: missing) {
- ps.setInt(1, value.getResourceTypeId());
- ps.setString(2, value.getLogicalId());
- ps.addBatch();
- if (++count == batchSize) {
- // not too many statements in a single batch
- ps.executeBatch();
- count = 0;
- }
- }
- if (count > 0) {
- // final batch
- ps.executeBatch();
- }
- } catch (SQLException x) {
- logger.log(Level.SEVERE, "logical_resource_ident insert failed: " + insert.toString(), x);
- throw new FHIRPersistenceException("logical_resource_ident insert failed");
- }
- }
-
- @Override
- protected int readOrAddParameterNameId(String parameterName) throws FHIRPersistenceDBConnectException, FHIRPersistenceDataAccessException {
- final ParameterNameDAO pnd = new PostgresParameterNamesDAO(getConnection(), getSchemaName());
- return pnd.readOrAddParameterNameId(parameterName);
- }
-}
\ No newline at end of file
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/JDBCParameterBuildingVisitor.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/JDBCParameterBuildingVisitor.java
index 885e0e3d598..1882d4e63dd 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/JDBCParameterBuildingVisitor.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/JDBCParameterBuildingVisitor.java
@@ -86,7 +86,6 @@ public class JDBCParameterBuildingVisitor extends DefaultVisitor {
private static final boolean FORCE_CASE_SENSITIVE = true;
// Datetime Limits from
- // DB2: https://www.ibm.com/support/knowledgecenter/en/SSEPGG_11.5.0/com.ibm.db2.luw.sql.ref.doc/doc/r0001029.html
// Derby: https://db.apache.org/derby/docs/10.0/manuals/reference/sqlj271.html
private static final Timestamp SMALLEST_TIMESTAMP = Timestamp.from(
ZonedDateTime.parse("0001-01-01T00:00:00.000000Z").toInstant());
diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/JDBCParameterCacheAdapter.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/JDBCParameterCacheAdapter.java
index 8cb800e077c..7b7acf85c8b 100644
--- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/JDBCParameterCacheAdapter.java
+++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/JDBCParameterCacheAdapter.java
@@ -34,12 +34,12 @@ public Integer getParameterNameId(String parameterName) {
@Override
public Integer getCodeSystemId(String codeSystem) {
- return cache.getResourceReferenceCache().getCodeSystemId(codeSystem);
+ return cache.getCommonValuesCache().getCodeSystemId(codeSystem);
}
@Override
public Long getCommonTokenValueId(short shardKey, String codeSystem, String tokenValue) {
- return cache.getResourceReferenceCache().getCommonTokenValueId(codeSystem, tokenValue);
+ return cache.getCommonValuesCache().getCommonTokenValueId(codeSystem, tokenValue);
}
@Override
@@ -49,23 +49,23 @@ public void addParameterName(String parameterName, int parameterNameId) {
@Override
public Long getCommonCanonicalValueId(short shardKey, String url) {
- return cache.getResourceReferenceCache().getCanonicalId(url);
+ return cache.getCommonValuesCache().getCanonicalId(url);
}
@Override
public void addCommonCanonicalValue(short shardKey, String url, long commonCanonicalValueId) {
- cache.getResourceReferenceCache().addCanonicalValue(url, commonCanonicalValueId);
+ cache.getCommonValuesCache().addCanonicalValue(url, commonCanonicalValueId);
}
@Override
public void addCommonTokenValue(short shardKey, String codeSystem, int codeSystemId, String tokenValue, long commonTokenValueId) {
CommonTokenValue key = new CommonTokenValue(codeSystem, codeSystemId, tokenValue);
- cache.getResourceReferenceCache().addTokenValue(key, commonTokenValueId);
+ cache.getCommonValuesCache().addTokenValue(key, commonTokenValueId);
}
@Override
public void addCodeSystem(String codeSystem, int codeSystemId) {
- cache.getResourceReferenceCache().addCodeSystem(codeSystem, codeSystemId);
+ cache.getCommonValuesCache().addCodeSystem(codeSystem, codeSystemId);
}
@Override
diff --git a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/cache/test/ResourceReferenceCacheImplTest.java b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/cache/test/CommonValuesCacheImplTest.java
similarity index 92%
rename from fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/cache/test/ResourceReferenceCacheImplTest.java
rename to fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/cache/test/CommonValuesCacheImplTest.java
index 571cfbfbe9d..6a3d1c06a5a 100644
--- a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/cache/test/ResourceReferenceCacheImplTest.java
+++ b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/cache/test/CommonValuesCacheImplTest.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2020, 2021
+ * (C) Copyright IBM Corp. 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -13,20 +13,20 @@
import org.testng.annotations.Test;
-import com.ibm.fhir.persistence.jdbc.cache.CommonTokenValuesCacheImpl;
+import com.ibm.fhir.persistence.jdbc.cache.CommonValuesCacheImpl;
import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceTokenValueRec;
/**
- * unit test for {@link CommonTokenValuesCacheImpl}
+ * unit test for {@link CommonValuesCacheImpl}
*/
-public class ResourceReferenceCacheImplTest {
+public class CommonValuesCacheImplTest {
@Test
public void testExternalSystemNames() {
// A cache with a limited size of 3 code systems and 2 token values
// For this test to work, we have to make sure we can always resolve
// all the code systems, so don't make the cache size smaller than 3
- CommonTokenValuesCacheImpl impl = new CommonTokenValuesCacheImpl(3, 2, 1);
+ CommonValuesCacheImpl impl = new CommonValuesCacheImpl(3, 2, 1);
impl.addCodeSystem("sys1", 1);
impl.addCodeSystem("sys2", 2);
impl.addCodeSystem("sys3", 3);
diff --git a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/erase/EraseTestMain.java b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/erase/EraseTestMain.java
index 4098c1b1669..4b2f3c41921 100644
--- a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/erase/EraseTestMain.java
+++ b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/erase/EraseTestMain.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2021
+ * (C) Copyright IBM Corp. 2021, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -30,7 +30,7 @@
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor;
import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavorImpl;
import com.ibm.fhir.persistence.jdbc.dao.EraseResourceDAO;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesCache;
import com.ibm.fhir.persistence.jdbc.dao.api.IIdNameCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
import com.ibm.fhir.persistence.jdbc.dao.api.INameIdCache;
@@ -90,7 +90,7 @@ protected void erase() throws Exception {
System.out.println("Got a Connection");
try {
FHIRDbFlavor flavor = new FHIRDbFlavorImpl(dbType, SchemaType.PLAIN);
- EraseResourceDAO dao = new EraseResourceDAO(c, FhirSchemaConstants.FHIR_ADMIN, translator, schemaName, flavor, new MockLocalCache(), null);
+ EraseResourceDAO dao = new EraseResourceDAO(c, FhirSchemaConstants.FHIR_ADMIN, translator, schemaName, flavor, new MockLocalCache());
ResourceEraseRecord record = new ResourceEraseRecord();
EraseDTO eraseDto = new EraseDTO();
@@ -174,7 +174,7 @@ public void clearNeedToPrefill() {
}
@Override
- public ICommonTokenValuesCache getResourceReferenceCache() {
+ public ICommonValuesCache getCommonValuesCache() {
return null;
}
diff --git a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/spec/Main.java b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/spec/Main.java
index 3c66a490498..b7d48fc4613 100644
--- a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/spec/Main.java
+++ b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/spec/Main.java
@@ -40,12 +40,12 @@
import com.ibm.fhir.persistence.context.FHIRPersistenceContext;
import com.ibm.fhir.persistence.context.FHIRPersistenceContextFactory;
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
-import com.ibm.fhir.persistence.jdbc.cache.CommonTokenValuesCacheImpl;
+import com.ibm.fhir.persistence.jdbc.cache.CommonValuesCacheImpl;
import com.ibm.fhir.persistence.jdbc.cache.FHIRPersistenceJDBCCacheImpl;
import com.ibm.fhir.persistence.jdbc.cache.IdNameCache;
import com.ibm.fhir.persistence.jdbc.cache.LogicalResourceIdentCacheImpl;
import com.ibm.fhir.persistence.jdbc.cache.NameIdCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
import com.ibm.fhir.persistence.jdbc.impl.FHIRPersistenceJDBCImpl;
import com.ibm.fhir.schema.derby.DerbyFhirDatabase;
@@ -323,7 +323,7 @@ protected void processDerby() throws Exception {
// IConnectionProvider implementation used by the persistence
// layer to obtain connections.
try (DerbyFhirDatabase database = new DerbyFhirDatabase()) {
- ICommonTokenValuesCache rrc = new CommonTokenValuesCacheImpl(100, 100, 100);
+ ICommonValuesCache rrc = new CommonValuesCacheImpl(100, 100, 100);
ILogicalResourceIdentCache lric = new LogicalResourceIdentCacheImpl(100);
FHIRPersistenceJDBCCache cache = new FHIRPersistenceJDBCCacheImpl(new NameIdCache(),
new IdNameCache(), new NameIdCache(), rrc, lric);
@@ -377,7 +377,7 @@ protected void processDerbyNetwork() throws Exception {
PoolConnectionProvider connectionPool = new PoolConnectionProvider(cp, this.threads);
ITransactionProvider transactionProvider = new SimpleTransactionProvider(connectionPool);
FHIRConfigProvider configProvider = new DefaultFHIRConfigProvider();
- ICommonTokenValuesCache rrc = new CommonTokenValuesCacheImpl(100, 100, 100);
+ ICommonValuesCache rrc = new CommonValuesCacheImpl(100, 100, 100);
ILogicalResourceIdentCache lric = new LogicalResourceIdentCacheImpl(100);
FHIRPersistenceJDBCCache cache = new FHIRPersistenceJDBCCacheImpl(new NameIdCache(), new IdNameCache(), new NameIdCache(), rrc, lric);
@@ -433,7 +433,7 @@ protected void processPostgreSql() throws Exception {
PoolConnectionProvider connectionPool = new PoolConnectionProvider(cp, this.threads);
ITransactionProvider transactionProvider = new SimpleTransactionProvider(connectionPool);
FHIRConfigProvider configProvider = new DefaultFHIRConfigProvider();
- ICommonTokenValuesCache rrc = new CommonTokenValuesCacheImpl(100, 100, 100);
+ ICommonValuesCache rrc = new CommonValuesCacheImpl(100, 100, 100);
ILogicalResourceIdentCache lric = new LogicalResourceIdentCacheImpl(100);
FHIRPersistenceJDBCCache cache = new FHIRPersistenceJDBCCacheImpl(new NameIdCache(), new IdNameCache(), new NameIdCache(), rrc, lric);
diff --git a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/spec/R4JDBCExamplesTest.java b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/spec/R4JDBCExamplesTest.java
index a64ffd46164..59a402a1a80 100644
--- a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/spec/R4JDBCExamplesTest.java
+++ b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/spec/R4JDBCExamplesTest.java
@@ -26,12 +26,12 @@
import com.ibm.fhir.persistence.context.FHIRPersistenceContext;
import com.ibm.fhir.persistence.context.FHIRPersistenceContextFactory;
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
-import com.ibm.fhir.persistence.jdbc.cache.CommonTokenValuesCacheImpl;
+import com.ibm.fhir.persistence.jdbc.cache.CommonValuesCacheImpl;
import com.ibm.fhir.persistence.jdbc.cache.FHIRPersistenceJDBCCacheImpl;
import com.ibm.fhir.persistence.jdbc.cache.IdNameCache;
import com.ibm.fhir.persistence.jdbc.cache.LogicalResourceIdentCacheImpl;
import com.ibm.fhir.persistence.jdbc.cache.NameIdCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
import com.ibm.fhir.persistence.jdbc.test.util.DerbyInitializer;
import com.ibm.fhir.persistence.test.common.AbstractPersistenceTest;
@@ -62,7 +62,7 @@ public void perform() throws Exception {
PoolConnectionProvider connectionPool = new PoolConnectionProvider(derbyConnectionProvider, 1);
ITransactionProvider transactionProvider = new SimpleTransactionProvider(connectionPool);
FHIRConfigProvider configProvider = new DefaultFHIRConfigProvider();
- ICommonTokenValuesCache rrc = new CommonTokenValuesCacheImpl(100, 100, 100);
+ ICommonValuesCache rrc = new CommonValuesCacheImpl(100, 100, 100);
ILogicalResourceIdentCache lric = new LogicalResourceIdentCacheImpl(100);
FHIRPersistenceJDBCCache cache = new FHIRPersistenceJDBCCacheImpl(new NameIdCache(), new IdNameCache(), new NameIdCache(), rrc, lric);
diff --git a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/util/PersistenceTestSupport.java b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/util/PersistenceTestSupport.java
index 9ee0795ed63..34fe0c57e37 100644
--- a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/util/PersistenceTestSupport.java
+++ b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/util/PersistenceTestSupport.java
@@ -18,12 +18,12 @@
import com.ibm.fhir.model.test.TestUtil;
import com.ibm.fhir.persistence.FHIRPersistence;
import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache;
-import com.ibm.fhir.persistence.jdbc.cache.CommonTokenValuesCacheImpl;
+import com.ibm.fhir.persistence.jdbc.cache.CommonValuesCacheImpl;
import com.ibm.fhir.persistence.jdbc.cache.FHIRPersistenceJDBCCacheImpl;
import com.ibm.fhir.persistence.jdbc.cache.IdNameCache;
import com.ibm.fhir.persistence.jdbc.cache.LogicalResourceIdentCacheImpl;
import com.ibm.fhir.persistence.jdbc.cache.NameIdCache;
-import com.ibm.fhir.persistence.jdbc.dao.api.ICommonTokenValuesCache;
+import com.ibm.fhir.persistence.jdbc.dao.api.ICommonValuesCache;
import com.ibm.fhir.persistence.jdbc.dao.api.ILogicalResourceIdentCache;
import com.ibm.fhir.persistence.jdbc.impl.FHIRPersistenceJDBCImpl;
import com.ibm.fhir.search.util.SearchHelper;
@@ -53,7 +53,7 @@ public PersistenceTestSupport() throws Exception {
derbyInit = new DerbyInitializer(this.testProps);
IConnectionProvider cp = derbyInit.getConnectionProvider(false);
this.connectionPool = new PoolConnectionProvider(cp, 1);
- ICommonTokenValuesCache rrc = new CommonTokenValuesCacheImpl(100, 100, 100);
+ ICommonValuesCache rrc = new CommonValuesCacheImpl(100, 100, 100);
ILogicalResourceIdentCache lric = new LogicalResourceIdentCacheImpl(100);
cache = new FHIRPersistenceJDBCCacheImpl(new NameIdCache(), new IdNameCache(), new NameIdCache(), rrc, lric);
}
diff --git a/fhir-persistence-schema/pom.xml b/fhir-persistence-schema/pom.xml
index 015ff2515a8..e6cec1d70d5 100644
--- a/fhir-persistence-schema/pom.xml
+++ b/fhir-persistence-schema/pom.xml
@@ -39,11 +39,6 @@
derbytools
true
-
- com.ibm.db2
- jcc
- true
-
org.postgresql
postgresql
diff --git a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/app/SchemaPrinter.java b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/app/SchemaPrinter.java
index 05de90ba4bc..59275cb1d94 100644
--- a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/app/SchemaPrinter.java
+++ b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/app/SchemaPrinter.java
@@ -82,11 +82,6 @@
* Without to-file, the output is the current System.out else it's schema.sql, grants.sql and stored-procedures.sql of
* the current directory.
*
- * For db2 import to
- * - schema.sql {@code db2 -tvf schema.sql}
- * - grants.sql {@code db2 -tvf grants.sql}
- * - stored-procedures.sql {@code db2 -td@ -vf stored-procedures.sql}
- *
*/
public class SchemaPrinter {
private static final String DELIMITER = ";";
diff --git a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirResourceTableGroup.java b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirResourceTableGroup.java
index f85656e1331..1a57eb5ef0c 100644
--- a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirResourceTableGroup.java
+++ b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirResourceTableGroup.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2019, 2021
+ * (C) Copyright IBM Corp. 2019, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -94,7 +94,6 @@
import com.ibm.fhir.database.utils.model.ObjectGroup;
import com.ibm.fhir.database.utils.model.OrderedColumnDef;
import com.ibm.fhir.database.utils.model.PhysicalDataModel;
-import com.ibm.fhir.database.utils.model.SessionVariableDef;
import com.ibm.fhir.database.utils.model.Table;
import com.ibm.fhir.database.utils.model.Tablespace;
import com.ibm.fhir.database.utils.model.View;
@@ -112,9 +111,6 @@ public class FhirResourceTableGroup {
// The schema we place all of our tables into
private final String schemaName;
- // The session variable we depend on for access control
- private final SessionVariableDef sessionVariable;
-
// Build the multitenant variant of the schema
private final boolean multitenant;
@@ -137,13 +133,12 @@ public class FhirResourceTableGroup {
/**
* Public constructor
*/
- public FhirResourceTableGroup(PhysicalDataModel model, String schemaName, boolean multitenant, SessionVariableDef sessionVariable,
+ public FhirResourceTableGroup(PhysicalDataModel model, String schemaName, boolean multitenant,
Set procedureDependencies, Tablespace fhirTablespace, Collection privileges,
List withs) {
this.model = model;
this.schemaName = schemaName;
this.multitenant = multitenant;
- this.sessionVariable = sessionVariable;
this.procedureDependencies = procedureDependencies;
this.fhirTablespace = fhirTablespace;
this.resourceTablePrivileges = privileges;
@@ -221,7 +216,6 @@ public void addLogicalResources(List group, String prefix) {
.addForeignKeyConstraint("FK_" + tableName + "_LRID", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
// Add indexes to avoid dead lock issue of derby, and improve Db2 performance
// Derby requires all columns used in where clause to be indexed, otherwise whole table lock will be
// used instead of row lock, which can cause dead lock issue frequently during concurrent accesses.
@@ -355,7 +349,6 @@ public void addResources(List group, String prefix) {
.addPrimaryKey(tableName + "_PK", RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
if (priorVersion < FhirSchemaVersion.V0024.vid()) {
@@ -441,7 +434,6 @@ public void addStrValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_RID", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -523,7 +515,6 @@ public Table addResourceTokenRefs(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_LR", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -609,7 +600,6 @@ public Table addRefValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_LR", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -655,7 +645,6 @@ public Table addProfiles(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_LR", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -702,7 +691,6 @@ public Table addTags(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_LR", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -746,7 +734,6 @@ public Table addSecurity(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_LR", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -905,7 +892,6 @@ public void addDateValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -972,7 +958,6 @@ public void addNumberValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_RID", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -1047,7 +1032,6 @@ public void addLatLngValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_RID", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -1125,7 +1109,6 @@ public void addQuantityValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -1173,7 +1156,6 @@ public void addListLogicalResourceItems(List group, String pref
.addForeignKeyConstraint(FK + LIST_LOGICAL_RESOURCE_ITEMS + "_RTID", schemaName, RESOURCE_TYPES, RESOURCE_TYPE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -1219,7 +1201,6 @@ public void addPatientCurrentRefs(List group, String prefix) {
.addForeignKeyConstraint(FK + PATIENT_CURRENT_REFS + "_LRID", schemaName, PATIENT_LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(withs)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
diff --git a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaConstants.java b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaConstants.java
index 16fe9c22ee4..afc460d8d6b 100644
--- a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaConstants.java
+++ b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaConstants.java
@@ -11,9 +11,6 @@
*/
public class FhirSchemaConstants {
- // This limit is used to limit db2 size in a stored procedure.
- public static final int STORED_PROCEDURE_SIZE_LIMIT = 1048576;
-
// Make sure the connection pool is sized larger than the thread pool
public static final int CONNECTION_POOL_HEADROOM = 9;
diff --git a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaGenerator.java b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaGenerator.java
index 9a841a19c5d..39f103630be 100644
--- a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaGenerator.java
+++ b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaGenerator.java
@@ -111,7 +111,6 @@
import com.ibm.fhir.database.utils.model.PhysicalDataModel;
import com.ibm.fhir.database.utils.model.Privilege;
import com.ibm.fhir.database.utils.model.Sequence;
-import com.ibm.fhir.database.utils.model.SessionVariableDef;
import com.ibm.fhir.database.utils.model.Table;
import com.ibm.fhir.database.utils.model.Tablespace;
import com.ibm.fhir.database.utils.model.With;
@@ -157,9 +156,6 @@ public class FhirSchemaGenerator {
// Sequence used by the admin tenant tables
private Sequence tenantSequence;
- // The session variable used for row access control. All tables depend on this
- private SessionVariableDef sessionVariable;
-
private Table tenantsTable;
private Table tenantKeysTable;
@@ -273,7 +269,6 @@ public void buildAdminSchema(PhysicalDataModel model) {
addTenantSequence(model);
addTenantTable(model);
addTenantKeysTable(model);
- addVariable(model);
// Add a NopObject which acts as a single dependency marker for the procedure objects to depend on
this.allAdminTablesComplete = new NopObject(adminSchemaName, "allAdminTablesComplete");
@@ -282,21 +277,6 @@ public void buildAdminSchema(PhysicalDataModel model) {
model.addObject(allAdminTablesComplete);
}
- /**
- * Add the session variable we need. This variable is used to support multi-tenancy
- * via the row-based access control permission predicate.
- * @param model
- */
- public void addVariable(PhysicalDataModel model) {
- this.sessionVariable = new SessionVariableDef(adminSchemaName, "SV_TENANT_ID", FhirSchemaVersion.V0001.vid());
- this.sessionVariable.addTag(SCHEMA_GROUP_TAG, ADMIN_GROUP);
- variablePrivileges.forEach(p -> p.addToObject(this.sessionVariable));
-
- // Make sure any admin procedures are built after the session variable
- adminProcedureDependencies.add(this.sessionVariable);
- model.addObject(this.sessionVariable);
- }
-
/**
* Create a table to manage the list of tenants. The tenant id is used
* as a partition value for all the other tables
@@ -595,7 +575,6 @@ public void addLogicalResources(PhysicalDataModel pdm) {
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
.addForeignKeyConstraint(FK + tableName + "_RTID", schemaName, RESOURCE_TYPES, RESOURCE_TYPE_ID)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // add table tuning
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -712,7 +691,6 @@ private void addLogicalResourceIdent(PhysicalDataModel pdm) {
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
.addForeignKeyConstraint(FK + tableName + "_RTID", schemaName, RESOURCE_TYPES, RESOURCE_TYPE_ID)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // add table tuning
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -752,7 +730,6 @@ public void addCommonCanonicalValues(PhysicalDataModel pdm) {
.addUniqueIndex(unqCanonicalUrl, URL)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
// Intentionally NOP
@@ -795,7 +772,6 @@ public Table addLogicalResourceProfiles(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_LR", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // New Column for V0017
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -842,7 +818,6 @@ public Table addLogicalResourceTags(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_LR", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // New Column for V0017
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -886,7 +861,6 @@ public Table addLogicalResourceSecurity(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_LR", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // New Column for V0017
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -940,7 +914,6 @@ public void addResourceChangeLog(PhysicalDataModel pdm) {
.addUniqueIndex("UNQ_" + RESOURCE_CHANGE_LOG + "_CTRTRI", CHANGE_TSTAMP, RESOURCE_TYPE_ID, RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(customWiths) // Does not require fillfactor tuning
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -989,7 +962,6 @@ public Table addLogicalResourceCompartments(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_COMP", schemaName, LOGICAL_RESOURCES, COMPARTMENT_LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // New Column for V0017
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -1042,7 +1014,6 @@ public Table addResourceStrValues(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + STR_VALUES + "_RID", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // New Column for V0017
.setDistributionType(DistributionType.DISTRIBUTED) // V0027 support for sharding
.addMigration(priorVersion -> {
@@ -1088,7 +1059,6 @@ public Table addResourceDateValues(PhysicalDataModel model) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // New Column for V0017
.setDistributionType(DistributionType.DISTRIBUTED) // V0027 support for sharding
.addMigration(priorVersion -> {
@@ -1141,7 +1111,6 @@ protected void addResourceTypes(PhysicalDataModel model) {
.addPrimaryKey(RESOURCE_TYPES + "_PK", RESOURCE_TYPE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.setDistributionType(DistributionType.REFERENCE) // V0027 supporting for sharding
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -1168,13 +1137,8 @@ protected void addResourceTypes(PhysicalDataModel model) {
* @param model
*/
protected void addResourceTables(PhysicalDataModel model, IDatabaseObject... dependency) {
- if (this.sessionVariable == null) {
- throw new IllegalStateException("Session variable must be defined before adding resource tables");
- }
- // The sessionVariable is used to enable access control on every table, so we
- // provide it as a dependency
- FhirResourceTableGroup frg = new FhirResourceTableGroup(model, this.schemaName, false, sessionVariable,
+ FhirResourceTableGroup frg = new FhirResourceTableGroup(model, this.schemaName, false,
this.procedureDependencies, this.fhirTablespace, this.resourceTablePrivileges, addWiths());
for (String resourceType: this.resourceTypes) {
@@ -1225,7 +1189,6 @@ protected void addParameterNames(PhysicalDataModel model) {
.addPrimaryKey(PARAMETER_NAMES + "_PK", PARAMETER_NAME_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.setDistributionType(DistributionType.REFERENCE) // V0027 supporting for sharding
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -1263,7 +1226,6 @@ protected void addCodeSystems(PhysicalDataModel model) {
.addPrimaryKey(CODE_SYSTEMS + "_PK", CODE_SYSTEM_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.setDistributionType(DistributionType.REFERENCE) // V0027 supporting for sharding
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -1331,7 +1293,6 @@ public void addCommonTokenValuesStandard(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_CSID", schemaName, CODE_SYSTEMS, CODE_SYSTEM_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.setDistributionType(DistributionType.REFERENCE) // V0027 shard using token_value
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -1367,7 +1328,6 @@ public void addCommonTokenValuesDistributed(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_CSID", schemaName, CODE_SYSTEMS, CODE_SYSTEM_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
// Intentionally a NOP
@@ -1411,7 +1371,6 @@ public Table addResourceTokenRefs(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_PNID", schemaName, PARAMETER_NAMES, PARAMETER_NAME_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // table tuning
.addMigration(priorVersion -> {
// Replace the indexes initially defined in the V0006 version with better ones
@@ -1492,7 +1451,6 @@ public void addErasedResources(PhysicalDataModel pdm) {
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
.addForeignKeyConstraint(FK + tableName + "_RTID", schemaName, RESOURCE_TYPES, RESOURCE_TYPE_ID)
- .enableAccessControl(this.sessionVariable)
.addWiths(addWiths()) // add table tuning
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
diff --git a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/size/Db2SizeCollector.java b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/size/Db2SizeCollector.java
deleted file mode 100644
index f3538056bbd..00000000000
--- a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/size/Db2SizeCollector.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * (C) Copyright IBM Corp. 2022
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-package com.ibm.fhir.schema.size;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import com.ibm.fhir.database.utils.api.DataAccessException;
-import com.ibm.fhir.database.utils.api.IDatabaseTranslator;
-import com.ibm.fhir.schema.app.util.SchemaSupport;
-
-/**
- * Collect size metrics for a Db2 database and add them
- * to the model
- */
-public class Db2SizeCollector implements ISizeCollector {
- private static final Logger logger = Logger.getLogger(Db2SizeCollector.class.getName());
-
- // The model to which we add the info pulled from the DB
- private final FHIRDbSizeModel model;
-
- // The tenant name identifying the tenant from FHIR_ADMIN.TENANTS
- private final String tenantName;
-
- /**
- * Public constructor
- * @param model
- */
- public Db2SizeCollector(FHIRDbSizeModel model, String tenantName) {
- this.model = model;
- this.tenantName = tenantName;
- }
-
- @Override
- public void run(String schemaName, Connection connection, IDatabaseTranslator translator) {
- final String usn = schemaName.toUpperCase();
- int dataPartitionId = getDataPartitionId(schemaName, connection, translator);
- collectTableInfo(usn, connection, translator, dataPartitionId);
- collectIndexInfo(usn, connection, translator, dataPartitionId);
- }
-
- private void collectTableInfo(String schemaName, Connection connection, IDatabaseTranslator translator, int dataPartitionId) {
- // Note our schema does not use long or XML data types, so we don't need to include
- // their size. But it is important to target only the data partition associated with
- // the tenant.
- final String SQL = ""
- + "SELECT ati.tabname, tabs.card, "
- + " (data_object_p_size + lob_object_p_size) * 1024 AS table_bytes "
- + " FROM sysibmadm.admintabinfo AS ati, "
- + " syscat.tables tabs "
- + " WHERE ati.tabschema = ? "
- + " AND tabs.tabschema = ati.tabschema "
- + " AND tabs.tabname = ati.tabname "
- + " AND ati.data_partition_id = ? "
- + " AND ati.tabname NOT LIKE 'DRP_%'" // for Db2 don't include the partition drop tables
- ;
-
- logger.info("Collecting Db2 table size info for schema: '" + schemaName + "'");
- SchemaSupport util = new SchemaSupport();
- try (PreparedStatement ps = connection.prepareStatement(SQL)) {
- ps.setString(1, schemaName);
- ps.setInt(2, dataPartitionId);
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- final String tableName = rs.getString(1);
- final long rowEstimate = rs.getLong(2);
- final long totalBytes = rs.getLong(3);
-
- // Note resourceType will be null for tables we don't care about
- final String resourceType = util.getResourceTypeFromTableName(tableName);
- if (resourceType != null) {
- final boolean isParamTable = util.isParamTable(tableName);
-
- if (logger.isLoggable(Level.FINE)) {
- logger.fine(String.format("%56s %34s %8d %10d", tableName, resourceType, rowEstimate, totalBytes));
- }
- model.accumulateTableSize(resourceType, tableName, isParamTable, totalBytes, rowEstimate);
- }
- }
- } catch (SQLException x) {
- throw translator.translate(x);
- }
- }
-
- /**
- * Get index info
- * @param schemaName
- * @param connection
- * @param translator
- */
- private void collectIndexInfo(String schemaName, Connection connection, IDatabaseTranslator translator, int dataPartitionId) {
- final String SQL = ""
- + "SELECT tabname AS table_name,"
- + " indname AS index_name,"
- + " index_object_p_size * 1024 as index_size "
- + " FROM TABLE(sysproc.admin_get_index_info('I', ?,'')) "
- + " WHERE tabname NOT LIKE 'DRP_%' " // for Db2 don't include the partition drop tables
- + " AND datapartitionid = ? " // make sure we only get data for the tenant's partition
- ;
-
- logger.info("Collecting Db2 index size info for schema: '" + schemaName + "'");
- SchemaSupport util = new SchemaSupport();
- try (PreparedStatement ps = connection.prepareStatement(SQL)) {
- ps.setString(1, schemaName);
- ps.setInt(2, dataPartitionId);
- ResultSet rs = ps.executeQuery();
- while (rs.next()) {
- final String tableName = rs.getString(1);
- final String indexName = rs.getString(2);
- final long indexBytes = rs.getLong(3);
-
- // Note resourceType will be null for tables we don't care about
- final String resourceType = util.getResourceTypeFromTableName(tableName);
- if (resourceType != null) {
- final boolean isParamTable = util.isParamTable(tableName);
-
- if (logger.isLoggable(Level.FINE)) {
- logger.fine(String.format("%56s %56s %34s %10d", tableName, indexName, resourceType, indexBytes));
- }
- model.accumulateIndexSize(resourceType, tableName, isParamTable, indexName, indexBytes);
- }
- }
- } catch (SQLException x) {
- throw translator.translate(x);
- }
- }
-
- /**
- * Get the DB partition id for the tenant.
- *
- * @param schemaName
- * @param connection
- * @param translator
- * @return
- */
- private int getDataPartitionId(String schemaName, Connection connection, IDatabaseTranslator translator) {
- int dataPartitionId;
- // query expected to return a single row
- final String SQL = ""
- + "SELECT dp.datapartitionid "
- + " FROM syscat.datapartitions dp,"
- + " fhir_admin.tenants t "
- + " WHERE t.tenant_name = ? "
- + " AND dp.tabschema = ? "
- + " AND dp.tabname = 'PARAMETER_NAMES' " // any of our partitioned tables
- + " AND dp.datapartitionname = 'TENANT' || t.mt_id"; // connect our tenant to db2 data partition
-
- try (PreparedStatement ps = connection.prepareStatement(SQL)) {
- ps.setString(1, this.tenantName);
- ps.setString(2, schemaName);
- ResultSet rs = ps.executeQuery();
- if (rs.next()) {
- dataPartitionId = rs.getInt(1);
- } else {
- throw new DataAccessException("Invalid tenant name/schema combination: tenantName='"
- + this.tenantName + "', schemaName='" + schemaName + "'");
- }
-
- if (rs.next()) {
- // Just a safety check to make sure we haven't butchered the join
- throw new DataAccessException("Query returned multiple matches: " + SQL);
- }
- } catch (SQLException x) {
- throw translator.translate(x);
- }
- return dataPartitionId;
- }
-}
diff --git a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/OldFhirResourceTableGroup.java b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/OldFhirResourceTableGroup.java
index 2b428e4c9f7..db2a33e6e72 100644
--- a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/OldFhirResourceTableGroup.java
+++ b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/OldFhirResourceTableGroup.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2019, 2020
+ * (C) Copyright IBM Corp. 2019, 2020, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -61,7 +61,6 @@
import com.ibm.fhir.database.utils.model.IDatabaseObject;
import com.ibm.fhir.database.utils.model.ObjectGroup;
import com.ibm.fhir.database.utils.model.PhysicalDataModel;
-import com.ibm.fhir.database.utils.model.SessionVariableDef;
import com.ibm.fhir.database.utils.model.Table;
import com.ibm.fhir.database.utils.model.Tablespace;
import com.ibm.fhir.schema.control.FhirSchemaTags;
@@ -81,9 +80,6 @@ public class OldFhirResourceTableGroup {
// The schema we place all of our tables into
private final String schemaName;
- // The session variable we depend on for access control
- private final SessionVariableDef sessionVariable;
-
// All the tables created by this component
@SuppressWarnings("unused")
private final Set procedureDependencies;
@@ -115,11 +111,10 @@ public class OldFhirResourceTableGroup {
/**
* Public constructor
*/
- public OldFhirResourceTableGroup(PhysicalDataModel model, String schemaName, SessionVariableDef sessionVariable,
+ public OldFhirResourceTableGroup(PhysicalDataModel model, String schemaName,
Set procedureDependencies, Tablespace fhirTablespace, Collection privileges) {
this.model = model;
this.schemaName = schemaName;
- this.sessionVariable = sessionVariable;
this.procedureDependencies = procedureDependencies;
this.fhirTablespace = fhirTablespace;
this.resourceTablePrivileges = privileges;
@@ -185,7 +180,6 @@ public void addLogicalResources(List group, String prefix) {
.addForeignKeyConstraint("FK_" + tableName + "_LRID", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
// Add indexes to avoid dead lock issue of derby, and improve Db2 performance
// Derby requires all columns used in where clause to be indexed, otherwise whole table lock will be
// used instead of row lock, which can cause dead lock issue frequently during concurrent accesses.
@@ -246,7 +240,6 @@ public void addResources(List group, String prefix) {
.addPrimaryKey(tableName + "_PK", RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
group.add(tbl);
@@ -298,7 +291,6 @@ public void addStrValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_RID", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -346,7 +338,6 @@ public void addTokenValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -401,7 +392,6 @@ public void addDateValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -448,7 +438,6 @@ public void addNumberValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_RID", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -497,7 +486,6 @@ public void addLatLngValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_RID", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -560,7 +548,6 @@ public void addQuantityValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -635,8 +622,7 @@ public void addComposites(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable);
-
+ ;
Table composites = tbl.build(model);
group.add(composites);
model.addTable(composites);
@@ -664,7 +650,6 @@ public void addListLogicalResourceItems(List group, String pref
.addForeignKeyConstraint(FK + LIST_LOGICAL_RESOURCE_ITEMS + "_RTID", schemaName, RESOURCE_TYPES, RESOURCE_TYPE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -697,7 +682,6 @@ public void addPatientCurrentRefs(List group, String prefix) {
.addForeignKeyConstraint(FK + PATIENT_CURRENT_REFS + "_LRID", schemaName, PATIENT_LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
diff --git a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/OldFhirSchemaGenerator.java b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/OldFhirSchemaGenerator.java
index 072ea6155d0..f4df278cb49 100644
--- a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/OldFhirSchemaGenerator.java
+++ b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/OldFhirSchemaGenerator.java
@@ -64,7 +64,6 @@
import com.ibm.fhir.database.utils.model.RowArrayType;
import com.ibm.fhir.database.utils.model.RowTypeBuilder;
import com.ibm.fhir.database.utils.model.Sequence;
-import com.ibm.fhir.database.utils.model.SessionVariableDef;
import com.ibm.fhir.database.utils.model.Table;
import com.ibm.fhir.database.utils.model.Tablespace;
import com.ibm.fhir.schema.control.FhirSchemaConstants;
@@ -89,10 +88,6 @@ public class OldFhirSchemaGenerator {
private final String adminSchemaName;
private static final String ADD_RESOURCE_TEMPLATE = "add_resource_template.sql";
- private static final String ADD_CODE_SYSTEM = "ADD_CODE_SYSTEM";
- private static final String ADD_PARAMETER_NAME = "ADD_PARAMETER_NAME";
- private static final String ADD_RESOURCE_TYPE = "ADD_RESOURCE_TYPE";
- private static final String ADD_ANY_RESOURCE = "ADD_ANY_RESOURCE";
// Tags used to control how we manage privilege grants
public static final String TAG_GRANT = "GRANT";
@@ -114,14 +109,9 @@ public class OldFhirSchemaGenerator {
// Sequence used by the admin tenant tables
private Sequence tenantSequence;
- // The session variable used for row access control. All tables depend on this
- private SessionVariableDef sessionVariable;
-
private Table tenantsTable;
private Table tenantKeysTable;
- private static final String SET_TENANT = "SET_TENANT";
-
// The set of dependencies common to all of our admin stored procedures
private Set adminProcedureDependencies = new HashSet<>();
@@ -230,7 +220,6 @@ public void buildAdminSchema(PhysicalDataModel model) {
addTenantSequence(model);
addTenantTable(model);
addTenantKeysTable(model);
- addVariable(model);
// Add a NopObject which acts as a single dependency marker for the procedure objects to depend on
this.allAdminTablesComplete = new NopObject(adminSchemaName, "allAdminTablesComplete");
@@ -239,21 +228,6 @@ public void buildAdminSchema(PhysicalDataModel model) {
model.addObject(allAdminTablesComplete);
}
- /**
- * Add the session variable we need. This variable is used to support multi-tenancy
- * via the row-based access control permission predicate.
- * @param model
- */
- public void addVariable(PhysicalDataModel model) {
- this.sessionVariable = new SessionVariableDef(adminSchemaName, "SV_TENANT_ID", FhirSchemaVersion.V0001.vid());
- this.sessionVariable.addTag(SCHEMA_GROUP_TAG, ADMIN_GROUP);
- variablePrivileges.forEach(p -> p.addToObject(this.sessionVariable));
-
- // Make sure any admin procedures are built after the session variable
- adminProcedureDependencies.add(this.sessionVariable);
- model.addObject(this.sessionVariable);
- }
-
/**
* Create a table to manage the list of tenants. The tenant id is used
* as a partition value for all the other tables
@@ -378,7 +352,6 @@ public void addLogicalResources(PhysicalDataModel pdm) {
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
.addForeignKeyConstraint(FK + tableName + "_RTID", schemaName, RESOURCE_TYPES, RESOURCE_TYPE_ID)
- .enableAccessControl(this.sessionVariable)
.build(pdm);
// TODO should not need to add as a table and an object. Get the table to add itself?
@@ -413,7 +386,6 @@ public Table addResourceTokenValues(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_PN", schemaName, PARAMETER_NAMES, PARAMETER_NAME_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(pdm);
// TODO should not need to add as a table and an object. Get the table to add itself?
@@ -448,7 +420,6 @@ public Table addResourceStrValues(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + STR_VALUES + "_RID", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(pdm);
tbl.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
@@ -484,7 +455,6 @@ public Table addResourceDateValues(PhysicalDataModel model) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
tbl.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
@@ -520,7 +490,6 @@ protected void addResourceTypes(PhysicalDataModel model) {
.addPrimaryKey(RESOURCE_TYPES + "_PK", RESOURCE_TYPE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
// TODO Table should be immutable, so add support to the Builder for this
@@ -536,13 +505,7 @@ protected void addResourceTypes(PhysicalDataModel model) {
* @param model
*/
protected void addResourceTables(PhysicalDataModel model, IDatabaseObject... dependency) {
- if (this.sessionVariable == null) {
- throw new IllegalStateException("Session variable must be defined before adding resource tables");
- }
-
- // The sessionVariable is used to enable access control on every table, so we
- // provide it as a dependency
- OldFhirResourceTableGroup frg = new OldFhirResourceTableGroup(model, this.schemaName, sessionVariable, this.procedureDependencies, this.fhirTablespace, this.resourceTablePrivileges);
+ OldFhirResourceTableGroup frg = new OldFhirResourceTableGroup(model, this.schemaName, this.procedureDependencies, this.fhirTablespace, this.resourceTablePrivileges);
for (String resourceType: this.resourceTypes) {
ObjectGroup group = frg.addResourceType(resourceType);
group.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
@@ -600,7 +563,6 @@ protected void addParameterNames(PhysicalDataModel model) {
.addPrimaryKey(PARAMETER_NAMES + "_PK", PARAMETER_NAME_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
this.parameterNamesTable.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
@@ -632,7 +594,6 @@ protected void addCodeSystems(PhysicalDataModel model) {
.addPrimaryKey(CODE_SYSTEMS + "_PK", CODE_SYSTEM_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
this.codeSystemsTable.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
diff --git a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/prior/FhirResourceTableGroup455.java b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/prior/FhirResourceTableGroup455.java
index 3cbe340a0c0..a9ba73a4778 100644
--- a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/prior/FhirResourceTableGroup455.java
+++ b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/prior/FhirResourceTableGroup455.java
@@ -1,5 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2019, 2021
+ * (C) Copyright IBM Corp. 2019, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -79,7 +79,6 @@
import com.ibm.fhir.database.utils.model.IDatabaseObject;
import com.ibm.fhir.database.utils.model.ObjectGroup;
import com.ibm.fhir.database.utils.model.PhysicalDataModel;
-import com.ibm.fhir.database.utils.model.SessionVariableDef;
import com.ibm.fhir.database.utils.model.Table;
import com.ibm.fhir.database.utils.model.Tablespace;
import com.ibm.fhir.database.utils.model.View;
@@ -97,9 +96,6 @@ public class FhirResourceTableGroup455 {
// The schema we place all of our tables into
private final String schemaName;
- // The session variable we depend on for access control
- private final SessionVariableDef sessionVariable;
-
// Build the multitenant variant of the schema
private final boolean multitenant;
@@ -118,9 +114,6 @@ public class FhirResourceTableGroup455 {
private static final String COMP = "COMP";
private static final String ROW_ID = "ROW_ID";
- // suffix for the token view
- private static final String _TOKEN_VALUES_V = "_TOKEN_VALUES_V";
-
/**
* The maximum number of components we can store in the X_COMPOSITES tables.
* Per the current design, each component will add 6 columns to the table, so don't go too high.
@@ -137,12 +130,11 @@ public class FhirResourceTableGroup455 {
/**
* Public constructor
*/
- public FhirResourceTableGroup455(PhysicalDataModel model, String schemaName, boolean multitenant, SessionVariableDef sessionVariable,
+ public FhirResourceTableGroup455(PhysicalDataModel model, String schemaName, boolean multitenant,
Set procedureDependencies, Tablespace fhirTablespace, Collection privileges) {
this.model = model;
this.schemaName = schemaName;
this.multitenant = multitenant;
- this.sessionVariable = sessionVariable;
this.procedureDependencies = procedureDependencies;
this.fhirTablespace = fhirTablespace;
this.resourceTablePrivileges = privileges;
@@ -210,7 +202,6 @@ public void addLogicalResources(List group, String prefix) {
.addForeignKeyConstraint("FK_" + tableName + "_LRID", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
// Add indexes to avoid dead lock issue of derby, and improve Db2 performance
// Derby requires all columns used in where clause to be indexed, otherwise whole table lock will be
// used instead of row lock, which can cause dead lock issue frequently during concurrent accesses.
@@ -271,7 +262,6 @@ public void addResources(List group, String prefix) {
.addPrimaryKey(tableName + "_PK", RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
group.add(tbl);
@@ -339,7 +329,6 @@ public void addStrValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_RID", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -392,7 +381,6 @@ public Table addTokenValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -434,7 +422,6 @@ public Table addResourceTokenRefs(List group, String prefix) {
.setIdentityColumn(ROW_ID, Generated.BY_DEFAULT)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
group.add(tbl);
@@ -534,7 +521,6 @@ public void addDateValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
if (priorVersion == 1) {
@@ -598,7 +584,6 @@ public void addNumberValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_RID", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
if (priorVersion == 1) {
@@ -665,7 +650,6 @@ public void addLatLngValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_RID", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -733,7 +717,6 @@ public void addQuantityValues(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -814,7 +797,7 @@ public void addComposites(List group, String prefix) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable);
+ ;
tbl.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -887,7 +870,6 @@ public void addListLogicalResourceItems(List group, String pref
.addForeignKeyConstraint(FK + LIST_LOGICAL_RESOURCE_ITEMS + "_RTID", schemaName, RESOURCE_TYPES, RESOURCE_TYPE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
@@ -920,7 +902,6 @@ public void addPatientCurrentRefs(List group, String prefix) {
.addForeignKeyConstraint(FK + PATIENT_CURRENT_REFS + "_LRID", schemaName, PATIENT_LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model)
;
diff --git a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/prior/FhirSchemaGenerator455.java b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/prior/FhirSchemaGenerator455.java
index e775a1d3094..2540f9ef006 100644
--- a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/prior/FhirSchemaGenerator455.java
+++ b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/prior/FhirSchemaGenerator455.java
@@ -80,9 +80,7 @@
import com.ibm.fhir.database.utils.model.OrderedColumnDef;
import com.ibm.fhir.database.utils.model.PhysicalDataModel;
import com.ibm.fhir.database.utils.model.Privilege;
-import com.ibm.fhir.database.utils.model.ProcedureDef;
import com.ibm.fhir.database.utils.model.Sequence;
-import com.ibm.fhir.database.utils.model.SessionVariableDef;
import com.ibm.fhir.database.utils.model.Table;
import com.ibm.fhir.database.utils.model.Tablespace;
import com.ibm.fhir.schema.control.FhirSchemaConstants;
@@ -117,14 +115,9 @@ public class FhirSchemaGenerator455 {
// Sequence used by the admin tenant tables
private Sequence tenantSequence;
- // The session variable used for row access control. All tables depend on this
- private SessionVariableDef sessionVariable;
-
private Table tenantsTable;
private Table tenantKeysTable;
- private static final String SET_TENANT = "SET_TENANT";
-
// The set of dependencies common to all of our admin stored procedures
private Set adminProcedureDependencies = new HashSet<>();
@@ -237,7 +230,6 @@ public void buildAdminSchema(PhysicalDataModel model) {
addTenantSequence(model);
addTenantTable(model);
addTenantKeysTable(model);
- addVariable(model);
// Add a NopObject which acts as a single dependency marker for the procedure objects to depend on
this.allAdminTablesComplete = new NopObject(adminSchemaName, "allAdminTablesComplete");
@@ -245,37 +237,13 @@ public void buildAdminSchema(PhysicalDataModel model) {
this.allAdminTablesComplete.addTag(SCHEMA_GROUP_TAG, ADMIN_GROUP);
model.addObject(allAdminTablesComplete);
- // The set_tenant procedure can be created after all the admin tables are done
- final String ROOT_DIR = "db2/";
- ProcedureDef setTenant = model.addProcedure(this.adminSchemaName, SET_TENANT, 2,
- () -> SchemaGeneratorUtil.readTemplate(adminSchemaName, adminSchemaName,
- ROOT_DIR + SET_TENANT.toLowerCase() + ".sql", null),
- Arrays.asList(allAdminTablesComplete),
- procedurePrivileges);
- setTenant.addTag(SCHEMA_GROUP_TAG, ADMIN_GROUP);
-
// A final marker which is used to block any FHIR data schema activity until the admin schema is completed
this.adminSchemaComplete = new NopObject(adminSchemaName, "adminSchemaComplete");
- this.adminSchemaComplete.addDependencies(Arrays.asList(setTenant));
+ this.adminSchemaComplete.addDependencies(Arrays.asList(allAdminTablesComplete));
this.adminSchemaComplete.addTag(SCHEMA_GROUP_TAG, ADMIN_GROUP);
model.addObject(adminSchemaComplete);
}
- /**
- * Add the session variable we need. This variable is used to support multi-tenancy
- * via the row-based access control permission predicate.
- * @param model
- */
- public void addVariable(PhysicalDataModel model) {
- this.sessionVariable = new SessionVariableDef(adminSchemaName, "SV_TENANT_ID", FhirSchemaVersion.V0001.vid());
- this.sessionVariable.addTag(SCHEMA_GROUP_TAG, ADMIN_GROUP);
- variablePrivileges.forEach(p -> p.addToObject(this.sessionVariable));
-
- // Make sure any admin procedures are built after the session variable
- adminProcedureDependencies.add(this.sessionVariable);
- model.addObject(this.sessionVariable);
- }
-
/**
* Create a table to manage the list of tenants. The tenant id is used
* as a partition value for all the other tables
@@ -382,44 +350,6 @@ public void buildSchema(PhysicalDataModel model) {
model.addObject(allTablesComplete);
}
- public void buildDatabaseSpecificArtifactsDb2(PhysicalDataModel model) {
- // These procedures just depend on the table they are manipulating and the fhir sequence. But
- // to avoid deadlocks, we only apply them after all the tables are done, so we make all
- // procedures depend on the allTablesComplete marker.
- final String ROOT_DIR = "db2/";
- ProcedureDef pd = model.addProcedure(this.schemaName,
- ADD_CODE_SYSTEM,
- FhirSchemaVersion.V0001.vid(),
- () -> SchemaGeneratorUtil.readTemplate(adminSchemaName, schemaName, ROOT_DIR + ADD_CODE_SYSTEM.toLowerCase() + ".sql", null),
- Arrays.asList(fhirSequence, codeSystemsTable, allTablesComplete),
- procedurePrivileges);
- pd.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
-
- pd = model.addProcedure(this.schemaName,
- ADD_PARAMETER_NAME,
- FhirSchemaVersion.V0001.vid(),
- () -> SchemaGeneratorUtil.readTemplate(adminSchemaName, schemaName, ROOT_DIR + ADD_PARAMETER_NAME.toLowerCase() + ".sql", null),
- Arrays.asList(fhirSequence, parameterNamesTable, allTablesComplete),
- procedurePrivileges);
- pd.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
-
- pd = model.addProcedure(this.schemaName,
- ADD_RESOURCE_TYPE,
- FhirSchemaVersion.V0001.vid(),
- () -> SchemaGeneratorUtil.readTemplate(adminSchemaName, schemaName, ROOT_DIR + ADD_RESOURCE_TYPE.toLowerCase() + ".sql", null),
- Arrays.asList(fhirSequence, resourceTypesTable, allTablesComplete),
- procedurePrivileges);
- pd.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
-
- pd = model.addProcedure(this.schemaName,
- ADD_ANY_RESOURCE,
- FhirSchemaVersion.V0001.vid(),
- () -> SchemaGeneratorUtil.readTemplate(adminSchemaName, schemaName, ROOT_DIR + ADD_ANY_RESOURCE.toLowerCase() + ".sql", null),
- Arrays.asList(fhirSequence, resourceTypesTable, allTablesComplete),
- procedurePrivileges);
- pd.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
- }
-
public void buildDatabaseSpecificArtifactsPostgres(PhysicalDataModel model) {
// Add stored procedures/functions for postgresql.
// Have to use different object names from DB2, because the group processing doesn't support 2 objects with the same name.
@@ -482,7 +412,6 @@ public void addLogicalResources(PhysicalDataModel pdm) {
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
.addForeignKeyConstraint(FK + tableName + "_RTID", schemaName, RESOURCE_TYPES, RESOURCE_TYPE_ID)
- .enableAccessControl(this.sessionVariable)
.setVersion(FhirSchemaVersion.V0006.vid())
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
@@ -538,7 +467,6 @@ public Table addResourceTokenValues(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_PN", schemaName, PARAMETER_NAMES, PARAMETER_NAME_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(pdm);
// TODO should not need to add as a table and an object. Get the table to add itself?
@@ -580,7 +508,6 @@ public Table addLogicalResourceCompartments(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_COMP", schemaName, LOGICAL_RESOURCES, COMPARTMENT_LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(pdm);
// TODO should not need to add as a table and an object. Get the table to add itself?
@@ -616,7 +543,6 @@ public Table addResourceStrValues(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + STR_VALUES + "_RID", schemaName, LOGICAL_RESOURCES, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(pdm);
tbl.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
@@ -650,7 +576,6 @@ public Table addResourceDateValues(PhysicalDataModel model) {
.addForeignKeyConstraint(FK + tableName + "_R", schemaName, logicalResourcesTable, LOGICAL_RESOURCE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.addMigration(priorVersion -> {
List statements = new ArrayList<>();
if (priorVersion == 1) {
@@ -695,7 +620,6 @@ protected void addResourceTypes(PhysicalDataModel model) {
.addPrimaryKey(RESOURCE_TYPES + "_PK", RESOURCE_TYPE_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
// TODO Table should be immutable, so add support to the Builder for this
@@ -711,13 +635,7 @@ protected void addResourceTypes(PhysicalDataModel model) {
* @param model
*/
protected void addResourceTables(PhysicalDataModel model, IDatabaseObject... dependency) {
- if (this.sessionVariable == null) {
- throw new IllegalStateException("Session variable must be defined before adding resource tables");
- }
-
- // The sessionVariable is used to enable access control on every table, so we
- // provide it as a dependency
- FhirResourceTableGroup455 frg = new FhirResourceTableGroup455(model, this.schemaName, this.multitenant, sessionVariable, this.procedureDependencies, this.fhirTablespace, this.resourceTablePrivileges);
+ FhirResourceTableGroup455 frg = new FhirResourceTableGroup455(model, this.schemaName, this.multitenant, this.procedureDependencies, this.fhirTablespace, this.resourceTablePrivileges);
for (String resourceType: this.resourceTypes) {
ObjectGroup group = frg.addResourceType(resourceType);
group.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
@@ -761,7 +679,6 @@ protected void addParameterNames(PhysicalDataModel model) {
.addPrimaryKey(PARAMETER_NAMES + "_PK", PARAMETER_NAME_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
this.parameterNamesTable.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
@@ -793,7 +710,6 @@ protected void addCodeSystems(PhysicalDataModel model) {
.addPrimaryKey(CODE_SYSTEMS + "_PK", CODE_SYSTEM_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(model);
this.codeSystemsTable.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP);
@@ -842,7 +758,6 @@ public void addCommonTokenValues(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_CSID", schemaName, CODE_SYSTEMS, CODE_SYSTEM_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(pdm);
// TODO should not need to add as a table and an object. Get the table to add itself?
@@ -879,7 +794,6 @@ public Table addResourceTokenRefs(PhysicalDataModel pdm) {
.addForeignKeyConstraint(FK + tableName + "_PNID", schemaName, PARAMETER_NAMES, PARAMETER_NAME_ID)
.setTablespace(fhirTablespace)
.addPrivileges(resourceTablePrivileges)
- .enableAccessControl(this.sessionVariable)
.build(pdm);
// TODO should not need to add as a table and an object. Get the table to add itself?
diff --git a/fhir-remote-index/pom.xml b/fhir-remote-index/pom.xml
index 29db27a4181..7d5a4a50ea4 100644
--- a/fhir-remote-index/pom.xml
+++ b/fhir-remote-index/pom.xml
@@ -67,11 +67,6 @@
derbytools
true
-
- com.ibm.db2
- jcc
- true
-
org.postgresql
postgresql
diff --git a/fhir-server-test/src/test/java/com/ibm/fhir/server/test/db2/LargeResourceTest.java b/fhir-server-test/src/test/java/com/ibm/fhir/server/test/large/LargeResourceTest.java
similarity index 96%
rename from fhir-server-test/src/test/java/com/ibm/fhir/server/test/db2/LargeResourceTest.java
rename to fhir-server-test/src/test/java/com/ibm/fhir/server/test/large/LargeResourceTest.java
index fb7bd657d51..8a6b24bac1f 100644
--- a/fhir-server-test/src/test/java/com/ibm/fhir/server/test/db2/LargeResourceTest.java
+++ b/fhir-server-test/src/test/java/com/ibm/fhir/server/test/large/LargeResourceTest.java
@@ -1,10 +1,10 @@
/*
- * (C) Copyright IBM Corp. 2017, 2021
+ * (C) Copyright IBM Corp. 2017, 2022
*
* SPDX-License-Identifier: Apache-2.0
*/
-package com.ibm.fhir.server.test.db2;
+package com.ibm.fhir.server.test.large;
import static org.testng.AssertJUnit.assertNotNull;
@@ -25,7 +25,7 @@
import com.ibm.fhir.server.test.FHIRServerTestBase;
/**
- * This test checks LARGE files are branching correctly in the Stored Procedure
+ * This test checks LARGE resources are stored correctly
* this test is opaque to any other persistence layer and the history is properly returned.
*/
public class LargeResourceTest extends FHIRServerTestBase {