diff --git a/modules/core/src/main/scala/doobie/free/Aliases.scala b/modules/core/src/main/scala/doobie/free/Aliases.scala index 26d2861d8..66bbc0817 100644 --- a/modules/core/src/main/scala/doobie/free/Aliases.scala +++ b/modules/core/src/main/scala/doobie/free/Aliases.scala @@ -25,34 +25,34 @@ trait Types { } trait Modules { - /** @group Module Aliases - Free API */ lazy val FB = blob - /** @group Module Aliases - Free API */ lazy val FCS = callablestatement - /** @group Module Aliases - Free API */ lazy val FCL = clob - /** @group Module Aliases - Free API */ lazy val FC = connection - /** @group Module Aliases - Free API */ lazy val FDMD = databasemetadata - /** @group Module Aliases - Free API */ lazy val FD = driver - /** @group Module Aliases - Free API */ lazy val FNCL = nclob - /** @group Module Aliases - Free API */ lazy val FPS = preparedstatement - /** @group Module Aliases - Free API */ lazy val FREF = ref - /** @group Module Aliases - Free API */ lazy val FRS = resultset - /** @group Module Aliases - Free API */ lazy val FSD = sqldata - /** @group Module Aliases - Free API */ lazy val FSI = sqlinput - /** @group Module Aliases - Free API */ lazy val FSO = sqloutput - /** @group Module Aliases - Free API */ lazy val FS = statement + /** @group Module Aliases - Free API */ val FB = blob + /** @group Module Aliases - Free API */ val FCS = callablestatement + /** @group Module Aliases - Free API */ val FCL = clob + /** @group Module Aliases - Free API */ val FC = connection + /** @group Module Aliases - Free API */ val FDMD = databasemetadata + /** @group Module Aliases - Free API */ val FD = driver + /** @group Module Aliases - Free API */ val FNCL = nclob + /** @group Module Aliases - Free API */ val FPS = preparedstatement + /** @group Module Aliases - Free API */ val FREF = ref + /** @group Module Aliases - Free API */ val FRS = resultset + /** @group Module Aliases - Free API */ val FSD = sqldata + /** @group Module Aliases - Free API */ val FSI = sqlinput + /** @group Module Aliases - Free API */ val FSO = sqloutput + /** @group Module Aliases - Free API */ val FS = statement } trait Instances { - /** @group Typeclass Instances */ implicit lazy val WeakAsyncBlobIO: WeakAsync[BlobIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncBlobIO: WeakAsync[BlobIO] = blob.WeakAsyncBlobIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncCallableStatementIO: WeakAsync[CallableStatementIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncCallableStatementIO: WeakAsync[CallableStatementIO] = callablestatement.WeakAsyncCallableStatementIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncClobIO: WeakAsync[ClobIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncClobIO: WeakAsync[ClobIO] = clob.WeakAsyncClobIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncConnectionIO: WeakAsync[ConnectionIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncConnectionIO: WeakAsync[ConnectionIO] = connection.WeakAsyncConnectionIO /** @group Typeclass Instances */ implicit def MonoidConnectionIO[A: Monoid]: Monoid[ConnectionIO[A]] = @@ -61,34 +61,34 @@ trait Instances { /** @group Typeclass Instances */ implicit def SemigroupConnectionIO[A: Semigroup]: Semigroup[ConnectionIO[A]] = connection.SemigroupConnectionIO[A] - /** @group Typeclass Instances */ implicit lazy val WeakAsyncDatabaseMetaDataIO: WeakAsync[DatabaseMetaDataIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncDatabaseMetaDataIO: WeakAsync[DatabaseMetaDataIO] = databasemetadata.WeakAsyncDatabaseMetaDataIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncDriverIO: WeakAsync[DriverIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncDriverIO: WeakAsync[DriverIO] = driver.WeakAsyncDriverIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncNClobIO: WeakAsync[NClobIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncNClobIO: WeakAsync[NClobIO] = nclob.WeakAsyncNClobIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncPreparedStatementIO: WeakAsync[PreparedStatementIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncPreparedStatementIO: WeakAsync[PreparedStatementIO] = preparedstatement.WeakAsyncPreparedStatementIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncRefIO: WeakAsync[RefIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncRefIO: WeakAsync[RefIO] = ref.WeakAsyncRefIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncResultSetIO: WeakAsync[ResultSetIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncResultSetIO: WeakAsync[ResultSetIO] = resultset.WeakAsyncResultSetIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncSQLDataIO: WeakAsync[SQLDataIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncSQLDataIO: WeakAsync[SQLDataIO] = sqldata.WeakAsyncSQLDataIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncSQLInputIO: WeakAsync[SQLInputIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncSQLInputIO: WeakAsync[SQLInputIO] = sqlinput.WeakAsyncSQLInputIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncSQLOutputIO: WeakAsync[SQLOutputIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncSQLOutputIO: WeakAsync[SQLOutputIO] = sqloutput.WeakAsyncSQLOutputIO - /** @group Typeclass Instances */ implicit lazy val WeakAsyncStatementIO: WeakAsync[StatementIO] = + /** @group Typeclass Instances */ implicit val WeakAsyncStatementIO: WeakAsync[StatementIO] = statement.WeakAsyncStatementIO } diff --git a/modules/core/src/main/scala/doobie/hi/Aliases.scala b/modules/core/src/main/scala/doobie/hi/Aliases.scala index a1949913d..02d2f1cc8 100644 --- a/modules/core/src/main/scala/doobie/hi/Aliases.scala +++ b/modules/core/src/main/scala/doobie/hi/Aliases.scala @@ -5,8 +5,8 @@ package doobie.hi trait Modules { - /** @group Module Aliases - High level (safer) API */ lazy val HC = doobie.hi.connection - /** @group Module Aliases - High level (safer) API */ lazy val HS = doobie.hi.statement - /** @group Module Aliases - High level (safer) API */ lazy val HPS = doobie.hi.preparedstatement - /** @group Module Aliases - High level (safer) API */ lazy val HRS = doobie.hi.resultset + /** @group Module Aliases - High level (safer) API */ val HC = doobie.hi.connection + /** @group Module Aliases - High level (safer) API */ val HS = doobie.hi.statement + /** @group Module Aliases - High level (safer) API */ val HPS = doobie.hi.preparedstatement + /** @group Module Aliases - High level (safer) API */ val HRS = doobie.hi.resultset } diff --git a/modules/core/src/main/scala/doobie/hi/connection.scala b/modules/core/src/main/scala/doobie/hi/connection.scala index d4e2d5f5f..03dd8a926 100644 --- a/modules/core/src/main/scala/doobie/hi/connection.scala +++ b/modules/core/src/main/scala/doobie/hi/connection.scala @@ -22,6 +22,15 @@ import doobie.util.stream.repeatEvalChunks import doobie.util.{ Get, Put, Read, Write } import fs2.Stream import fs2.Stream.{ eval, bracket } +import doobie.hi.{preparedstatement => IHPS} +import doobie.free.{ + preparedstatement => IFPS, + connection => IFC, + resultset => IFRS, + databasemetadata => IFDMD, + statement => IFS, + callablestatement => IFCS +} import java.sql.{ Savepoint, PreparedStatement, ResultSet } import scala.collection.immutable.Map @@ -36,7 +45,7 @@ object connection { /** @group Lifting */ def delay[A](a: => A): ConnectionIO[A] = - FC.delay(a) + IFC.delay(a) private def liftStream[A: Read]( chunkSize: Int, @@ -46,18 +55,18 @@ object connection { def prepared(ps: PreparedStatement): Stream[ConnectionIO, PreparedStatement] = eval[ConnectionIO, PreparedStatement] { - val fs = FPS.setFetchSize(chunkSize) - FC.embed(ps, fs *> prep).map(_ => ps) + val fs = IFPS.setFetchSize(chunkSize) + IFC.embed(ps, fs *> prep).map(_ => ps) } def unrolled(rs: ResultSet): Stream[ConnectionIO, A] = - repeatEvalChunks(FC.embed(rs, resultset.getNextChunk[A](chunkSize))) + repeatEvalChunks(IFC.embed(rs, resultset.getNextChunk[A](chunkSize))) val preparedStatement: Stream[ConnectionIO, PreparedStatement] = - bracket(create)(FC.embed(_, FPS.close)).flatMap(prepared) + bracket(create)(IFC.embed(_, IFPS.close)).flatMap(prepared) def results(ps: PreparedStatement): Stream[ConnectionIO, A] = - bracket(FC.embed(ps, exec))(FC.embed(_, FRS.close)).flatMap(unrolled) + bracket(IFC.embed(ps, exec))(IFC.embed(_, IFRS.close)).flatMap(unrolled) preparedStatement.flatMap(results) @@ -69,7 +78,7 @@ object connection { * @group Prepared Statements */ def stream[A: Read](sql: String, prep: PreparedStatementIO[Unit], chunkSize: Int): Stream[ConnectionIO, A] = - liftStream(chunkSize, FC.prepareStatement(sql), prep, FPS.executeQuery) + liftStream(chunkSize, IFC.prepareStatement(sql), prep, IFPS.executeQuery) /** * Construct a prepared update statement with the given return columns (and readable destination @@ -79,31 +88,31 @@ object connection { * @group Prepared Statements */ def updateWithGeneratedKeys[A: Read](cols: List[String])(sql: String, prep: PreparedStatementIO[Unit], chunkSize: Int): Stream[ConnectionIO, A] = - liftStream(chunkSize, FC.prepareStatement(sql, cols.toArray), prep, FPS.executeUpdate *> FPS.getGeneratedKeys) + liftStream(chunkSize, IFC.prepareStatement(sql, cols.toArray), prep, IFPS.executeUpdate *> IFPS.getGeneratedKeys) /** @group Prepared Statements */ def updateManyWithGeneratedKeys[F[_]: Foldable, A: Write, B: Read](cols: List[String])(sql: String, prep: PreparedStatementIO[Unit], fa: F[A], chunkSize: Int): Stream[ConnectionIO, B] = - liftStream[B](chunkSize, FC.prepareStatement(sql, cols.toArray), prep, HPS.addBatchesAndExecute(fa) *> FPS.getGeneratedKeys) + liftStream[B](chunkSize, IFC.prepareStatement(sql, cols.toArray), prep, IHPS.addBatchesAndExecute(fa) *> IFPS.getGeneratedKeys) /** @group Transaction Control */ val commit: ConnectionIO[Unit] = - FC.commit + IFC.commit /** * Construct an analysis for the provided `sql` query, given writable parameter type `A` and * readable resultset row type `B`. */ def prepareQueryAnalysis[A: Write, B: Read](sql: String): ConnectionIO[Analysis] = - prepareAnalysis(sql, HPS.getParameterMappings[A], HPS.getColumnMappings[B]) + prepareAnalysis(sql, IHPS.getParameterMappings[A], IHPS.getColumnMappings[B]) def prepareQueryAnalysis0[B: Read](sql: String): ConnectionIO[Analysis] = - prepareAnalysis(sql, FPS.pure(Nil), HPS.getColumnMappings[B]) + prepareAnalysis(sql, IFPS.pure(Nil), IHPS.getColumnMappings[B]) def prepareUpdateAnalysis[A: Write](sql: String): ConnectionIO[Analysis] = - prepareAnalysis(sql, HPS.getParameterMappings[A], FPS.pure(Nil)) + prepareAnalysis(sql, IHPS.getParameterMappings[A], IFPS.pure(Nil)) def prepareUpdateAnalysis0(sql: String): ConnectionIO[Analysis] = - prepareAnalysis(sql, FPS.pure(Nil), FPS.pure(Nil)) + prepareAnalysis(sql, IFPS.pure(Nil), IFPS.pure(Nil)) private def prepareAnalysis( sql: String, @@ -113,7 +122,7 @@ object connection { val mappings = prepareStatement(sql) { (params, columns).tupled } - (HC.getMetaData(FDMD.getDriverName), mappings).mapN { case (driver, (p, c)) => + (getMetaData(IFDMD.getDriverName), mappings).mapN { case (driver, (p, c)) => Analysis(driver, sql, p, c) } } @@ -121,103 +130,103 @@ object connection { /** @group Statements */ def createStatement[A](k: StatementIO[A]): ConnectionIO[A] = - FC.createStatement.bracket(s => FC.embed(s, k))(s => FC.embed(s, FS.close)) + IFC.createStatement.bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFS.close)) /** @group Statements */ def createStatement[A](rst: ResultSetType, rsc: ResultSetConcurrency)(k: StatementIO[A]): ConnectionIO[A] = - FC.createStatement(rst.toInt, rsc.toInt).bracket(s => FC.embed(s, k))(s => FC.embed(s, FS.close)) + IFC.createStatement(rst.toInt, rsc.toInt).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFS.close)) /** @group Statements */ def createStatement[A](rst: ResultSetType, rsc: ResultSetConcurrency, rsh: Holdability)(k: StatementIO[A]): ConnectionIO[A] = - FC.createStatement(rst.toInt, rsc.toInt, rsh.toInt).bracket(s => FC.embed(s, k))(s => FC.embed(s, FS.close)) + IFC.createStatement(rst.toInt, rsc.toInt, rsh.toInt).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFS.close)) /** @group Connection Properties */ val getCatalog: ConnectionIO[String] = - FC.getCatalog + IFC.getCatalog /** @group Connection Properties */ def getClientInfo(key: String): ConnectionIO[Option[String]] = - FC.getClientInfo(key).map(Option(_)) + IFC.getClientInfo(key).map(Option(_)) /** @group Connection Properties */ val getClientInfo: ConnectionIO[Map[String, String]] = - FC.getClientInfo.map(propertiesToScala) + IFC.getClientInfo.map(propertiesToScala) /** @group Connection Properties */ val getHoldability: ConnectionIO[Holdability] = - FC.getHoldability.flatMap(Holdability.fromIntF[ConnectionIO]) + IFC.getHoldability.flatMap(Holdability.fromIntF[ConnectionIO]) /** @group Connection Properties */ def getMetaData[A](k: DatabaseMetaDataIO[A]): ConnectionIO[A] = - FC.getMetaData.flatMap(s => FC.embed(s, k)) + IFC.getMetaData.flatMap(s => IFC.embed(s, k)) /** @group Transaction Control */ val getTransactionIsolation: ConnectionIO[TransactionIsolation] = - FC.getTransactionIsolation.flatMap(TransactionIsolation.fromIntF[ConnectionIO]) + IFC.getTransactionIsolation.flatMap(TransactionIsolation.fromIntF[ConnectionIO]) /** @group Connection Properties */ val isReadOnly: ConnectionIO[Boolean] = - FC.isReadOnly + IFC.isReadOnly /** @group Callable Statements */ def prepareCall[A](sql: String, rst: ResultSetType, rsc: ResultSetConcurrency)(k: CallableStatementIO[A]): ConnectionIO[A] = - FC.prepareCall(sql, rst.toInt, rsc.toInt).bracket(s => FC.embed(s, k))(s => FC.embed(s, FCS.close)) + IFC.prepareCall(sql, rst.toInt, rsc.toInt).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFCS.close)) /** @group Callable Statements */ def prepareCall[A](sql: String)(k: CallableStatementIO[A]): ConnectionIO[A] = - FC.prepareCall(sql).bracket(s => FC.embed(s, k))(s => FC.embed(s, FCS.close)) + IFC.prepareCall(sql).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFCS.close)) /** @group Callable Statements */ def prepareCall[A](sql: String, rst: ResultSetType, rsc: ResultSetConcurrency, rsh: Holdability)(k: CallableStatementIO[A]): ConnectionIO[A] = - FC.prepareCall(sql, rst.toInt, rsc.toInt, rsh.toInt).bracket(s => FC.embed(s, k))(s => FC.embed(s, FCS.close)) + IFC.prepareCall(sql, rst.toInt, rsc.toInt, rsh.toInt).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFCS.close)) /** @group Prepared Statements */ def prepareStatement[A](sql: String, rst: ResultSetType, rsc: ResultSetConcurrency)(k: PreparedStatementIO[A]): ConnectionIO[A] = - FC.prepareStatement(sql, rst.toInt, rsc.toInt).bracket(s => FC.embed(s, k))(s => FC.embed(s, FPS.close)) + IFC.prepareStatement(sql, rst.toInt, rsc.toInt).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFPS.close)) /** @group Prepared Statements */ def prepareStatement[A](sql: String)(k: PreparedStatementIO[A]): ConnectionIO[A] = - FC.prepareStatement(sql).bracket(s => FC.embed(s, k))(s => FC.embed(s, FPS.close)) + IFC.prepareStatement(sql).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFPS.close)) /** @group Prepared Statements */ def prepareStatement[A](sql: String, rst: ResultSetType, rsc: ResultSetConcurrency, rsh: Holdability)(k: PreparedStatementIO[A]): ConnectionIO[A] = - FC.prepareStatement(sql, rst.toInt, rsc.toInt, rsh.toInt).bracket(s => FC.embed(s, k))(s => FC.embed(s, FPS.close)) + IFC.prepareStatement(sql, rst.toInt, rsc.toInt, rsh.toInt).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFPS.close)) /** @group Prepared Statements */ def prepareStatement[A](sql: String, agk: AutoGeneratedKeys)(k: PreparedStatementIO[A]): ConnectionIO[A] = - FC.prepareStatement(sql, agk.toInt).bracket(s => FC.embed(s, k))(s => FC.embed(s, FPS.close)) + IFC.prepareStatement(sql, agk.toInt).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFPS.close)) /** @group Prepared Statements */ def prepareStatementI[A](sql: String, columnIndexes: List[Int])(k: PreparedStatementIO[A]): ConnectionIO[A] = - FC.prepareStatement(sql, columnIndexes.toArray).bracket(s => FC.embed(s, k))(s => FC.embed(s, FPS.close)) + IFC.prepareStatement(sql, columnIndexes.toArray).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFPS.close)) /** @group Prepared Statements */ def prepareStatementS[A](sql: String, columnNames: List[String])(k: PreparedStatementIO[A]): ConnectionIO[A] = - FC.prepareStatement(sql, columnNames.toArray).bracket(s => FC.embed(s, k))(s => FC.embed(s, FPS.close)) + IFC.prepareStatement(sql, columnNames.toArray).bracket(s => IFC.embed(s, k))(s => IFC.embed(s, IFPS.close)) /** @group Transaction Control */ def releaseSavepoint(sp: Savepoint): ConnectionIO[Unit] = - FC.releaseSavepoint(sp) + IFC.releaseSavepoint(sp) /** @group Transaction Control */ def rollback(sp: Savepoint): ConnectionIO[Unit] = - FC.rollback(sp) + IFC.rollback(sp) /** @group Transaction Control */ val rollback: ConnectionIO[Unit] = - FC.rollback + IFC.rollback /** @group Connection Properties */ def setCatalog(catalog: String): ConnectionIO[Unit] = - FC.setCatalog(catalog) + IFC.setCatalog(catalog) /** @group Connection Properties */ def setClientInfo(key: String, value: String): ConnectionIO[Unit] = - FC.setClientInfo(key, value) + IFC.setClientInfo(key, value) /** @group Connection Properties */ def setClientInfo(info: Map[String, String]): ConnectionIO[Unit] = - FC.setClientInfo { + IFC.setClientInfo { // Java 11 overloads the `putAll` method with Map[*,*] along with the existing Map[Obj,Obj] val ps = new java.util.Properties info.foreach { case (k, v) => @@ -228,29 +237,29 @@ object connection { /** @group Connection Properties */ def setHoldability(h: Holdability): ConnectionIO[Unit] = - FC.setHoldability(h.toInt) + IFC.setHoldability(h.toInt) /** @group Connection Properties */ def setReadOnly(readOnly: Boolean): ConnectionIO[Unit] = - FC.setReadOnly(readOnly) + IFC.setReadOnly(readOnly) /** @group Transaction Control */ val setSavepoint: ConnectionIO[Savepoint] = - FC.setSavepoint + IFC.setSavepoint /** @group Transaction Control */ def setSavepoint(name: String): ConnectionIO[Savepoint] = - FC.setSavepoint(name) + IFC.setSavepoint(name) /** @group Transaction Control */ def setTransactionIsolation(ti: TransactionIsolation): ConnectionIO[Unit] = - FC.setTransactionIsolation(ti.toInt) + IFC.setTransactionIsolation(ti.toInt) // /** // * Compute a map from native type to closest-matching JDBC type. // * @group MetaData // */ // val nativeTypeMap: ConnectionIO[Map[String, JdbcType]] = { - // getMetaData(FDMD.getTypeInfo.flatMap(FDMD.embed(_, HRS.list[(String, JdbcType)].map(_.toMap)))) + // getMetaData(IFDMD.getTypeInfo.flatMap(IFDMD.embed(_, HRS.list[(String, JdbcType)].map(_.toMap)))) // } } diff --git a/modules/core/src/main/scala/doobie/hi/preparedstatement.scala b/modules/core/src/main/scala/doobie/hi/preparedstatement.scala index 1e50d1dcf..db499cade 100644 --- a/modules/core/src/main/scala/doobie/hi/preparedstatement.scala +++ b/modules/core/src/main/scala/doobie/hi/preparedstatement.scala @@ -18,6 +18,10 @@ import doobie.enumerated.ResultSetType import doobie.util.{ Read, Write } import doobie.util.analysis._ import doobie.util.stream.repeatEvalChunks +import doobie.free.{ + preparedstatement => IFPS, + resultset => IFRS +} import doobie.syntax.align._ @@ -43,26 +47,26 @@ object preparedstatement { // fs2 handler, not public private def unrolled[A: Read](rs: java.sql.ResultSet, chunkSize: Int): Stream[PreparedStatementIO, A] = - repeatEvalChunks(FPS.embed(rs, resultset.getNextChunk[A](chunkSize))) + repeatEvalChunks(IFPS.embed(rs, resultset.getNextChunk[A](chunkSize))) /** @group Execution */ def stream[A: Read](chunkSize: Int): Stream[PreparedStatementIO, A] = - bracket(FPS.executeQuery)(FPS.embed(_, FRS.close)).flatMap(unrolled[A](_, chunkSize)) + bracket(IFPS.executeQuery)(IFPS.embed(_, IFRS.close)).flatMap(unrolled[A](_, chunkSize)) /** * Non-strict unit for capturing effects. * @group Constructors (Lifting) */ def delay[A](a: => A): PreparedStatementIO[A] = - FPS.delay(a) + IFPS.delay(a) /** @group Batching */ val executeBatch: PreparedStatementIO[List[Int]] = - FPS.executeBatch.map(_.toIndexedSeq.toList) // intArrayOps does not have `toList` in 2.13 + IFPS.executeBatch.map(_.toIndexedSeq.toList) // intArrayOps does not have `toList` in 2.13 /** @group Batching */ val addBatch: PreparedStatementIO[Unit] = - FPS.addBatch + IFPS.addBatch /** * Add many sets of parameters and execute as a batch update, returning total rows updated. Note @@ -84,11 +88,11 @@ object preparedstatement { /** @group Execution */ def executeQuery[A](k: ResultSetIO[A]): PreparedStatementIO[A] = - FPS.executeQuery.bracket(s => FPS.embed(s, k))(s => FPS.embed(s, FRS.close)) + IFPS.executeQuery.bracket(s => IFPS.embed(s, k))(s => IFPS.embed(s, IFRS.close)) /** @group Execution */ val executeUpdate: PreparedStatementIO[Int] = - FPS.executeUpdate + IFPS.executeUpdate /** @group Execution */ def executeUpdateWithUniqueGeneratedKeys[A: Read]: PreparedStatementIO[A] = @@ -96,15 +100,15 @@ object preparedstatement { /** @group Execution */ def executeUpdateWithGeneratedKeys[A: Read](chunkSize: Int): Stream[PreparedStatementIO, A] = - bracket(FPS.executeUpdate *> FPS.getGeneratedKeys)(FPS.embed(_, FRS.close)).flatMap(unrolled[A](_, chunkSize)) + bracket(IFPS.executeUpdate *> IFPS.getGeneratedKeys)(IFPS.embed(_, IFRS.close)).flatMap(unrolled[A](_, chunkSize)) /** * Compute the column `JdbcMeta` list for this `PreparedStatement`. * @group Metadata */ def getColumnJdbcMeta: PreparedStatementIO[List[ColumnMeta]] = - FPS.getMetaData.flatMap { - case null => FPS.pure(Nil) // https://github.com/tpolecat/doobie/issues/262 + IFPS.getMetaData.flatMap { + case null => IFPS.pure(Nil) // https://github.com/tpolecat/doobie/issues/262 case md => (1 to md.getColumnCount).toList.traverse { i => for { @@ -128,15 +132,15 @@ object preparedstatement { /** @group Properties */ val getFetchDirection: PreparedStatementIO[FetchDirection] = - FPS.getFetchDirection.flatMap(FetchDirection.fromIntF[PreparedStatementIO]) + IFPS.getFetchDirection.flatMap(FetchDirection.fromIntF[PreparedStatementIO]) /** @group Properties */ val getFetchSize: PreparedStatementIO[Int] = - FPS.getFetchSize + IFPS.getFetchSize /** @group Results */ def getGeneratedKeys[A](k: ResultSetIO[A]): PreparedStatementIO[A] = - FPS.getGeneratedKeys.bracket(s => FPS.embed(s, k))(s => FPS.embed(s, FRS.close)) + IFPS.getGeneratedKeys.bracket(s => IFPS.embed(s, k))(s => IFPS.embed(s, IFRS.close)) /** @group Results */ def getUniqueGeneratedKeys[A: Read]: PreparedStatementIO[A] = @@ -147,7 +151,7 @@ object preparedstatement { * @group Metadata */ def getParameterJdbcMeta: PreparedStatementIO[List[ParameterMeta]] = - FPS.getParameterMetaData.flatMap { md => + IFPS.getParameterMetaData.flatMap { md => (1 to md.getParameterCount).toList.traverse { i => for { n <- ParameterNullable.fromIntF[PreparedStatementIO](md.isNullable(i)) @@ -170,39 +174,39 @@ object preparedstatement { /** @group Properties */ val getMaxFieldSize: PreparedStatementIO[Int] = - FPS.getMaxFieldSize + IFPS.getMaxFieldSize /** @group Properties */ val getMaxRows: PreparedStatementIO[Int] = - FPS.getMaxRows + IFPS.getMaxRows /** @group MetaData */ val getMetaData: PreparedStatementIO[ResultSetMetaData] = - FPS.getMetaData + IFPS.getMetaData /** @group MetaData */ val getParameterMetaData: PreparedStatementIO[ParameterMetaData] = - FPS.getParameterMetaData + IFPS.getParameterMetaData /** @group Properties */ val getQueryTimeout: PreparedStatementIO[Int] = - FPS.getQueryTimeout + IFPS.getQueryTimeout /** @group Properties */ val getResultSetConcurrency: PreparedStatementIO[ResultSetConcurrency] = - FPS.getResultSetConcurrency.flatMap(ResultSetConcurrency.fromIntF[PreparedStatementIO]) + IFPS.getResultSetConcurrency.flatMap(ResultSetConcurrency.fromIntF[PreparedStatementIO]) /** @group Properties */ val getResultSetHoldability: PreparedStatementIO[Holdability] = - FPS.getResultSetHoldability.flatMap(Holdability.fromIntF[PreparedStatementIO]) + IFPS.getResultSetHoldability.flatMap(Holdability.fromIntF[PreparedStatementIO]) /** @group Properties */ val getResultSetType: PreparedStatementIO[ResultSetType] = - FPS.getResultSetType.flatMap(ResultSetType.fromIntF[PreparedStatementIO]) + IFPS.getResultSetType.flatMap(ResultSetType.fromIntF[PreparedStatementIO]) /** @group Results */ val getWarnings: PreparedStatementIO[SQLWarning] = - FPS.getWarnings + IFPS.getWarnings /** * Set the given writable value, starting at column `n`. @@ -220,30 +224,30 @@ object preparedstatement { /** @group Properties */ def setCursorName(name: String): PreparedStatementIO[Unit] = - FPS.setCursorName(name) + IFPS.setCursorName(name) /** @group Properties */ def setEscapeProcessing(a: Boolean): PreparedStatementIO[Unit] = - FPS.setEscapeProcessing(a) + IFPS.setEscapeProcessing(a) /** @group Properties */ def setFetchDirection(fd: FetchDirection): PreparedStatementIO[Unit] = - FPS.setFetchDirection(fd.toInt) + IFPS.setFetchDirection(fd.toInt) /** @group Properties */ def setFetchSize(n: Int): PreparedStatementIO[Unit] = - FPS.setFetchSize(n) + IFPS.setFetchSize(n) /** @group Properties */ def setMaxFieldSize(n: Int): PreparedStatementIO[Unit] = - FPS.setMaxFieldSize(n) + IFPS.setMaxFieldSize(n) /** @group Properties */ def setMaxRows(n: Int): PreparedStatementIO[Unit] = - FPS.setMaxRows(n) + IFPS.setMaxRows(n) /** @group Properties */ def setQueryTimeout(a: Int): PreparedStatementIO[Unit] = - FPS.setQueryTimeout(a) + IFPS.setQueryTimeout(a) } diff --git a/modules/core/src/main/scala/doobie/hi/resultset.scala b/modules/core/src/main/scala/doobie/hi/resultset.scala index 124c82c16..06681ff41 100644 --- a/modules/core/src/main/scala/doobie/hi/resultset.scala +++ b/modules/core/src/main/scala/doobie/hi/resultset.scala @@ -14,6 +14,9 @@ import doobie.util.{ Read, Write } import doobie.util.compat.FactoryCompat import doobie.util.invariant._ import doobie.util.stream.repeatEvalChunks +import doobie.free.{ + resultset => IFRS +} import fs2.Stream @@ -32,35 +35,35 @@ object resultset { * @group Constructors (Lifting) */ def delay[A](a: => A): ResultSetIO[A] = - FRS.delay(a) + IFRS.delay(a) /** @group Cursor Control */ def absolute(row: Int): ResultSetIO[Boolean] = - FRS.absolute(row) + IFRS.absolute(row) /** @group Cursor Control */ val afterLast: ResultSetIO[Unit] = - FRS.afterLast + IFRS.afterLast /** @group Cursor Control */ val beforeFirst: ResultSetIO[Unit] = - FRS.beforeFirst + IFRS.beforeFirst /** @group Updating */ val cancelRowUpdates: ResultSetIO[Unit] = - FRS.cancelRowUpdates + IFRS.cancelRowUpdates /** @group Warnings */ val clearWarnings: ResultSetIO[Unit] = - FRS.clearWarnings + IFRS.clearWarnings /** @group Updating */ val deleteRow: ResultSetIO[Unit] = - FRS.deleteRow + IFRS.deleteRow /** @group Cursor Control */ val first: ResultSetIO[Boolean] = - FRS.first + IFRS.first /** * Read a value of type `A` starting at column `n`. @@ -84,7 +87,7 @@ object resultset { */ @SuppressWarnings(Array("org.wartremover.warts.While", "org.wartremover.warts.NonUnitStatements")) def build[F[_], A](implicit F: FactoryCompat[A, F[A]], A: Read[A]): ResultSetIO[F[A]] = - FRS.raw { rs => + IFRS.raw { rs => val b = F.newBuilder while (rs.next) b += A.unsafeGet(rs, 1) @@ -97,7 +100,7 @@ object resultset { * @group Results */ def buildPair[F[_, _], A, B](implicit F: FactoryCompat[(A, B), F[A, B]], A: Read[(A, B)]): ResultSetIO[F[A, B]] = - FRS.raw { rs => + IFRS.raw { rs => val b = F.newBuilder while (rs.next) b += A.unsafeGet(rs, 1) @@ -113,7 +116,7 @@ object resultset { */ @SuppressWarnings(Array("org.wartremover.warts.While", "org.wartremover.warts.NonUnitStatements")) def buildMap[F[_], A, B](f: A => B)(implicit F: FactoryCompat[B, F[B]], A: Read[A]): ResultSetIO[F[B]] = - FRS.raw { rs => + IFRS.raw { rs => val b = F.newBuilder while (rs.next) b += f(A.unsafeGet(rs, 1)) @@ -185,7 +188,7 @@ object resultset { */ @SuppressWarnings(Array("org.wartremover.warts.Var", "org.wartremover.warts.While", "org.wartremover.warts.NonUnitStatements")) def getNextChunkV[A](chunkSize: Int)(implicit A: Read[A]): ResultSetIO[Vector[A]] = - FRS.raw { rs => + IFRS.raw { rs => var n = chunkSize val b = Vector.newBuilder[A] while (n > 0 && rs.next) { @@ -202,9 +205,9 @@ object resultset { */ def getUnique[A: Read]: ResultSetIO[A] = (getNext[A], next).tupled.flatMap { - case (Some(a), false) => FRS.delay(a) - case (Some(_), true) => FRS.raiseError(UnexpectedContinuation) - case (None, _) => FRS.raiseError(UnexpectedEnd) + case (Some(a), false) => IFRS.delay(a) + case (Some(_), true) => IFRS.raiseError(UnexpectedContinuation) + case (None, _) => IFRS.raiseError(UnexpectedEnd) } /** @@ -214,9 +217,9 @@ object resultset { */ def getOption[A: Read]: ResultSetIO[Option[A]] = (getNext[A], next).tupled.flatMap { - case (a @ Some(_), false) => FRS.delay(a) - case (Some(_), true) => FRS.raiseError(UnexpectedContinuation) - case (None, _) => FRS.delay(None) + case (a @ Some(_), false) => IFRS.delay(a) + case (Some(_), true) => IFRS.raiseError(UnexpectedContinuation) + case (None, _) => IFRS.delay(None) } /** @@ -226,8 +229,8 @@ object resultset { */ def nel[A: Read]: ResultSetIO[NonEmptyList[A]] = (getNext[A], list).tupled.flatMap { - case (Some(a), as) => FRS.delay(NonEmptyList(a, as)) - case (None, _) => FRS.raiseError(UnexpectedEnd) + case (Some(a), as) => IFRS.delay(NonEmptyList(a, as)) + case (None, _) => IFRS.raiseError(UnexpectedEnd) } /** @@ -240,94 +243,94 @@ object resultset { /** @group Properties */ val getFetchDirection: ResultSetIO[FetchDirection] = - FRS.getFetchDirection.flatMap(FetchDirection.fromIntF[ResultSetIO]) + IFRS.getFetchDirection.flatMap(FetchDirection.fromIntF[ResultSetIO]) /** @group Properties */ val getFetchSize: ResultSetIO[Int] = - FRS.getFetchSize + IFRS.getFetchSize /** @group Properties */ val getHoldability: ResultSetIO[Holdability] = - FRS.getHoldability.flatMap(Holdability.fromIntF[ResultSetIO]) + IFRS.getHoldability.flatMap(Holdability.fromIntF[ResultSetIO]) /** @group Properties */ val getMetaData: ResultSetIO[ResultSetMetaData] = - FRS.getMetaData + IFRS.getMetaData /** @group Cursor Control */ val getRow: ResultSetIO[Int] = - FRS.getRow + IFRS.getRow /** @group Warnings */ val getWarnings: ResultSetIO[Option[SQLWarning]] = - FRS.getWarnings.map(Option(_)) + IFRS.getWarnings.map(Option(_)) /** @group Updating */ val insertRow: ResultSetIO[Unit] = - FRS.insertRow + IFRS.insertRow /** @group Cursor Control */ val isAfterLast: ResultSetIO[Boolean] = - FRS.isAfterLast + IFRS.isAfterLast /** @group Cursor Control */ val isBeforeFirst: ResultSetIO[Boolean] = - FRS.isBeforeFirst + IFRS.isBeforeFirst /** @group Cursor Control */ val isFirst: ResultSetIO[Boolean] = - FRS.isFirst + IFRS.isFirst /** @group Cursor Control */ val isLast: ResultSetIO[Boolean] = - FRS.isLast + IFRS.isLast /** @group Cursor Control */ val last: ResultSetIO[Boolean] = - FRS.last + IFRS.last /** @group Cursor Control */ val moveToCurrentRow: ResultSetIO[Unit] = - FRS.moveToCurrentRow + IFRS.moveToCurrentRow /** @group Cursor Control */ val moveToInsertRow: ResultSetIO[Unit] = - FRS.moveToInsertRow + IFRS.moveToInsertRow /** @group Cursor Control */ val next: ResultSetIO[Boolean] = - FRS.next + IFRS.next /** @group Cursor Control */ val previous: ResultSetIO[Boolean] = - FRS.previous + IFRS.previous /** @group Cursor Control */ val refreshRow: ResultSetIO[Unit] = - FRS.refreshRow + IFRS.refreshRow /** @group Cursor Control */ def relative(n: Int): ResultSetIO[Boolean] = - FRS.relative(n) + IFRS.relative(n) /** @group Cursor Control */ val rowDeleted: ResultSetIO[Boolean] = - FRS.rowDeleted + IFRS.rowDeleted /** @group Cursor Control */ val rowInserted: ResultSetIO[Boolean] = - FRS.rowInserted + IFRS.rowInserted /** @group Cursor Control */ val rowUpdated: ResultSetIO[Boolean] = - FRS.rowUpdated + IFRS.rowUpdated /** @group Properties */ def setFetchDirection(fd: FetchDirection): ResultSetIO[Unit] = - FRS.setFetchDirection(fd.toInt) + IFRS.setFetchDirection(fd.toInt) /** @group Properties */ def setFetchSize(n: Int): ResultSetIO[Unit] = - FRS.setFetchSize(n) + IFRS.setFetchSize(n) } diff --git a/modules/core/src/main/scala/doobie/hi/statement.scala b/modules/core/src/main/scala/doobie/hi/statement.scala index 5497cb11d..7bb252963 100644 --- a/modules/core/src/main/scala/doobie/hi/statement.scala +++ b/modules/core/src/main/scala/doobie/hi/statement.scala @@ -10,6 +10,10 @@ import doobie.enumerated.FetchDirection import doobie.enumerated.ResultSetConcurrency import doobie.enumerated.ResultSetType import doobie.implicits._ +import doobie.free.{ + statement => IFS, + resultset => IFRS +} import java.sql.SQLWarning @@ -23,43 +27,43 @@ object statement { /** @group Batching */ def addBatch(sql: String): StatementIO[Unit] = - FS.addBatch(sql) + IFS.addBatch(sql) /** @group Batching */ val clearBatch: StatementIO[Unit] = - FS.clearBatch + IFS.clearBatch /** @group Execution */ val executeBatch: StatementIO[List[Int]] = - FS.executeBatch.map(_.toIndexedSeq.toList) // intArrayOps does not have `toList` in 2.13 + IFS.executeBatch.map(_.toIndexedSeq.toList) // intArrayOps does not have `toList` in 2.13 /** @group Execution */ def executeQuery[A](sql: String)(k: ResultSetIO[A]): StatementIO[A] = - FS.executeQuery(sql).bracket(s => FS.embed(s, k))(s => FS.embed(s, FRS.close)) + IFS.executeQuery(sql).bracket(s => IFS.embed(s, k))(s => IFS.embed(s, IFRS.close)) /** @group Execution */ def executeUpdate(sql: String): StatementIO[Int] = - FS.executeUpdate(sql) + IFS.executeUpdate(sql) /** @group Properties */ val getFetchDirection: StatementIO[FetchDirection] = - FS.getFetchDirection.flatMap(FetchDirection.fromIntF[StatementIO]) + IFS.getFetchDirection.flatMap(FetchDirection.fromIntF[StatementIO]) /** @group Properties */ val getFetchSize: StatementIO[Int] = - FS.getFetchSize + IFS.getFetchSize /** @group Results */ def getGeneratedKeys[A](k: ResultSetIO[A]): StatementIO[A] = - FS.getGeneratedKeys.bracket(s => FS.embed(s, k))(s => FS.embed(s, FRS.close)) + IFS.getGeneratedKeys.bracket(s => IFS.embed(s, k))(s => IFS.embed(s, IFRS.close)) /** @group Properties */ val getMaxFieldSize: StatementIO[Int] = - FS.getMaxFieldSize + IFS.getMaxFieldSize /** @group Properties */ val getMaxRows: StatementIO[Int] = - FS.getMaxRows + IFS.getMaxRows // /** @group Batching */ // def getMoreResults(a: Int): StatementIO[Boolean] = @@ -67,62 +71,62 @@ object statement { /** @group Batching */ val getMoreResults: StatementIO[Boolean] = - FS.getMoreResults + IFS.getMoreResults /** @group Properties */ val getQueryTimeout: StatementIO[Int] = - FS.getQueryTimeout + IFS.getQueryTimeout /** @group Batching */ def getResultSet[A](k: ResultSetIO[A]): StatementIO[A] = - FS.getResultSet.flatMap(s => FS.embed(s, k)) + IFS.getResultSet.flatMap(s => IFS.embed(s, k)) /** @group Properties */ val getResultSetConcurrency: StatementIO[ResultSetConcurrency] = - FS.getResultSetConcurrency.flatMap(ResultSetConcurrency.fromIntF[StatementIO]) + IFS.getResultSetConcurrency.flatMap(ResultSetConcurrency.fromIntF[StatementIO]) /** @group Properties */ val getResultSetHoldability: StatementIO[Holdability] = - FS.getResultSetHoldability.flatMap(Holdability.fromIntF[StatementIO]) + IFS.getResultSetHoldability.flatMap(Holdability.fromIntF[StatementIO]) /** @group Properties */ val getResultSetType: StatementIO[ResultSetType] = - FS.getResultSetType.flatMap(ResultSetType.fromIntF[StatementIO]) + IFS.getResultSetType.flatMap(ResultSetType.fromIntF[StatementIO]) /** @group Results */ val getUpdateCount: StatementIO[Int] = - FS.getUpdateCount + IFS.getUpdateCount /** @group Results */ val getWarnings: StatementIO[SQLWarning] = - FS.getWarnings + IFS.getWarnings /** @group Properties */ def setCursorName(name: String): StatementIO[Unit] = - FS.setCursorName(name) + IFS.setCursorName(name) /** @group Properties */ def setEscapeProcessing(a: Boolean): StatementIO[Unit] = - FS.setEscapeProcessing(a) + IFS.setEscapeProcessing(a) /** @group Properties */ def setFetchDirection(fd: FetchDirection): StatementIO[Unit] = - FS.setFetchDirection(fd.toInt) + IFS.setFetchDirection(fd.toInt) /** @group Properties */ def setFetchSize(n: Int): StatementIO[Unit] = - FS.setFetchSize(n) + IFS.setFetchSize(n) /** @group Properties */ def setMaxFieldSize(n: Int): StatementIO[Unit] = - FS.setMaxFieldSize(n) + IFS.setMaxFieldSize(n) /** @group Properties */ def setMaxRows(n: Int): StatementIO[Unit] = - FS.setMaxRows(n) + IFS.setMaxRows(n) /** @group Properties */ def setQueryTimeout(a: Int): StatementIO[Unit] = - FS.setQueryTimeout(a) + IFS.setQueryTimeout(a) } diff --git a/modules/core/src/main/scala/doobie/syntax/connectionio.scala b/modules/core/src/main/scala/doobie/syntax/connectionio.scala index d1dc383e4..af7232f45 100644 --- a/modules/core/src/main/scala/doobie/syntax/connectionio.scala +++ b/modules/core/src/main/scala/doobie/syntax/connectionio.scala @@ -7,9 +7,10 @@ package doobie.syntax import cats.data.{EitherT, Kleisli, OptionT} import cats.effect.kernel.MonadCancelThrow import cats.syntax.functor._ -import doobie.{ ConnectionIO, HC } +import doobie.ConnectionIO import doobie.implicits._ import doobie.util.transactor.Transactor +import doobie.hi.{connection => IHC} class ConnectionIOOps[A](ma: ConnectionIO[A]) { def transact[M[_]: MonadCancelThrow](xa: Transactor[M]): M[A] = xa.trans.apply(ma) @@ -18,14 +19,14 @@ class ConnectionIOOps[A](ma: ConnectionIO[A]) { class OptionTConnectionIOOps[A](ma: OptionT[ConnectionIO, A]) { def transact[M[_]: MonadCancelThrow](xa: Transactor[M]): OptionT[M, A] = OptionT( - xa.trans.apply(ma.orElseF(HC.rollback.as(None)).value) + xa.trans.apply(ma.orElseF(IHC.rollback.as(None)).value) ) } class EitherTConnectionIOOps[E, A](ma: EitherT[ConnectionIO, E, A]) { def transact[M[_]: MonadCancelThrow](xa: Transactor[M]): EitherT[M, E, A] = EitherT( - xa.trans.apply(ma.leftSemiflatMap(HC.rollback.as(_)).value) + xa.trans.apply(ma.leftSemiflatMap(IHC.rollback.as(_)).value) ) } diff --git a/modules/core/src/main/scala/doobie/util/fragment.scala b/modules/core/src/main/scala/doobie/util/fragment.scala index 72321942a..a7cf25240 100644 --- a/modules/core/src/main/scala/doobie/util/fragment.scala +++ b/modules/core/src/main/scala/doobie/util/fragment.scala @@ -11,6 +11,7 @@ import cats.syntax.all._ import doobie._, doobie.implicits._ import doobie.enumerated.Nullability._ import doobie.util.pos.Pos +import doobie.hi.{connection => IHC} import java.sql.{ PreparedStatement, ResultSet } import scala.Predef.{ augmentString, implicitly } @@ -79,7 +80,7 @@ object fragment { * further handling delegated to the provided program. */ def execWith[B](fa: PreparedStatementIO[B]): ConnectionIO[B] = - HC.prepareStatement(sql)(write.set(1, elems) *> fa) + IHC.prepareStatement(sql)(write.set(1, elems) *> fa) /** Concatenate this fragment with another, yielding a larger fragment. */ def ++(fb: Fragment): Fragment = diff --git a/modules/core/src/main/scala/doobie/util/query.scala b/modules/core/src/main/scala/doobie/util/query.scala index 66161e6aa..e1d65a82e 100644 --- a/modules/core/src/main/scala/doobie/util/query.scala +++ b/modules/core/src/main/scala/doobie/util/query.scala @@ -15,6 +15,16 @@ import doobie.util.analysis.Analysis import doobie.util.compat.FactoryCompat import doobie.util.log.{ LogEvent, ExecFailure, ProcessingFailure, Success } import doobie.util.pos.Pos +import doobie.free.{ + preparedstatement => IFPS, + resultset => IFRS, +} +import doobie.hi.{ + connection => IHC, + preparedstatement => IHPS, + resultset => IHRS +} + import fs2.Stream import scala.Predef.longWrapper import scala.concurrent.duration.{ FiniteDuration, NANOSECONDS } @@ -37,24 +47,24 @@ object query { protected implicit val read: Read[B] private val now: PreparedStatementIO[Long] = - FPS.delay(System.nanoTime) + IFPS.delay(System.nanoTime) - // Equivalent to HPS.executeQuery(k) but with logging + // Equivalent to IHPS.executeQuery(k) but with logging private def executeQuery[T](a: A, k: ResultSetIO[T]): PreparedStatementIO[T] = { val args = write.toList(a) def diff(a: Long, b: Long) = FiniteDuration((a - b).abs, NANOSECONDS) def log(e: LogEvent): PreparedStatementIO[Unit] = for { - _ <- FPS.performLogging(e) + _ <- IFPS.performLogging(e) } yield () for { t0 <- now - eet <- FPS.executeQuery.flatMap(rs => (for { + eet <- IFPS.executeQuery.flatMap(rs => (for { t1 <- now - et <- FPS.embed(rs, k).attempt + et <- IFPS.embed(rs, k).attempt t2 <- now - } yield (t1, et, t2)).guarantee(FPS.embed(rs, FRS.close))).attempt + } yield (t1, et, t2)).guarantee(IFPS.embed(rs, IFRS.close))).attempt tuple <- eet.liftTo[PreparedStatementIO].onError { case e => for { t1 <- now @@ -93,14 +103,14 @@ object query { * @group Diagnostics */ def analysis: ConnectionIO[Analysis] = - HC.prepareQueryAnalysis[A, B](sql) + IHC.prepareQueryAnalysis[A, B](sql) /** * Program to construct an analysis of this query's SQL statement and result set column types. * @group Diagnostics */ def outputAnalysis: ConnectionIO[Analysis] = - HC.prepareQueryAnalysis0[B](sql) + IHC.prepareQueryAnalysis0[B](sql) /** * Program to construct an inspection of the query. Given arguments `a`, calls `f` with the SQL @@ -110,7 +120,7 @@ object query { * @group Diagnostics */ def inspect[R](a: A)(f: (String, PreparedStatementIO[Unit]) => ConnectionIO[R]): ConnectionIO[R] = - f(sql, HPS.set(a)) + f(sql, IHPS.set(a)) /** * Apply the argument `a` to construct a `Stream` with the given chunking factor, with @@ -119,7 +129,7 @@ object query { * @group Results */ def streamWithChunkSize(a: A, chunkSize: Int): Stream[ConnectionIO, B] = - HC.stream[B](sql, HPS.set(a), chunkSize) + IHC.stream[B](sql, IHPS.set(a), chunkSize) /** * Apply the argument `a` to construct a `Stream` with `DefaultChunkSize`, with @@ -137,7 +147,7 @@ object query { * @group Results */ def to[F[_]](a: A)(implicit f: FactoryCompat[B, F[B]]): ConnectionIO[F[B]] = - HC.prepareStatement(sql)(HPS.set(a) *> executeQuery(a, HRS.build[F,B])) + IHC.prepareStatement(sql)(IHPS.set(a) *> executeQuery(a, IHRS.build[F,B])) /** * Apply the argument `a` to construct a program in @@ -147,7 +157,7 @@ object query { * @group Results */ def toMap[K, V](a: A)(implicit ev: B =:= (K, V), f: FactoryCompat[(K, V), Map[K, V]]): ConnectionIO[Map[K, V]] = - HC.prepareStatement(sql)(HPS.set(a) *> executeQuery(a, HRS.buildPair[Map, K, V](f, read.map(ev)))) + IHC.prepareStatement(sql)(IHPS.set(a) *> executeQuery(a, IHRS.buildPair[Map, K, V](f, read.map(ev)))) /** * Apply the argument `a` to construct a program in @@ -156,7 +166,7 @@ object query { * @group Results */ def accumulate[F[_]: Alternative](a: A): ConnectionIO[F[B]] = - HC.prepareStatement(sql)(HPS.set(a) *> executeQuery(a, HRS.accumulate[F, B])) + IHC.prepareStatement(sql)(IHPS.set(a) *> executeQuery(a, IHRS.accumulate[F, B])) /** * Apply the argument `a` to construct a program in @@ -165,7 +175,7 @@ object query { * @group Results */ def unique(a: A): ConnectionIO[B] = - HC.prepareStatement(sql)(HPS.set(a) *> executeQuery(a, HRS.getUnique[B])) + IHC.prepareStatement(sql)(IHPS.set(a) *> executeQuery(a, IHRS.getUnique[B])) /** * Apply the argument `a` to construct a program in @@ -174,7 +184,7 @@ object query { * @group Results */ def option(a: A): ConnectionIO[Option[B]] = - HC.prepareStatement(sql)(HPS.set(a) *> executeQuery(a, HRS.getOption[B])) + IHC.prepareStatement(sql)(IHPS.set(a) *> executeQuery(a, IHRS.getOption[B])) /** * Apply the argument `a` to construct a program in @@ -183,7 +193,7 @@ object query { * @group Results */ def nel(a: A): ConnectionIO[NonEmptyList[B]] = - HC.prepareStatement(sql)(HPS.set(a) *> executeQuery(a, HRS.nel[B])) + IHC.prepareStatement(sql)(IHPS.set(a) *> executeQuery(a, IHRS.nel[B])) /** @group Transformations */ def map[C](f: B => C): Query[A, C] = diff --git a/modules/core/src/main/scala/doobie/util/read.scala b/modules/core/src/main/scala/doobie/util/read.scala index 0f99b6d7c..a94c88e67 100644 --- a/modules/core/src/main/scala/doobie/util/read.scala +++ b/modules/core/src/main/scala/doobie/util/read.scala @@ -5,10 +5,13 @@ package doobie.util import cats._ -import doobie.free.{ FRS, ResultSetIO } +import doobie.free.{ ResultSetIO } import doobie.enumerated.Nullability._ import java.sql.ResultSet import scala.annotation.implicitNotFound +import doobie.free.{ + resultset => IFRS +} @implicitNotFound(""" Cannot find or construct a Read instance for type: @@ -54,7 +57,7 @@ sealed abstract class Read[A]( new Read(ff.gets ++ gets, (rs, n) => ff.unsafeGet(rs, n)(unsafeGet(rs, n + ff.length))) {} def get(n: Int): ResultSetIO[A] = - FRS.raw(unsafeGet(_, n)) + IFRS.raw(unsafeGet(_, n)) } diff --git a/modules/core/src/main/scala/doobie/util/transactor.scala b/modules/core/src/main/scala/doobie/util/transactor.scala index 45bee7ec9..8b3631982 100644 --- a/modules/core/src/main/scala/doobie/util/transactor.scala +++ b/modules/core/src/main/scala/doobie/util/transactor.scala @@ -11,6 +11,7 @@ import doobie.implicits._ import doobie.util.lens._ import doobie.util.log.LogHandler import doobie.util.yolo.Yolo +import doobie.free.{connection => IFC} import cats.{Monad, ~>} import cats.data.Kleisli import cats.effect.kernel.{Async, MonadCancelThrow, Resource} @@ -42,7 +43,7 @@ object transactor { always: ConnectionIO[Unit] ) { val resource: Resource[ConnectionIO, Unit] = for { - _ <- Resource.make(doobie.FC.unit)(_ => always) + _ <- Resource.make(IFC.unit)(_ => always) _ <- Resource.makeCase(before) { case (_, exitCase) => exitCase match { case ExitCase.Succeeded => after diff --git a/modules/core/src/main/scala/doobie/util/update.scala b/modules/core/src/main/scala/doobie/util/update.scala index c0f42652c..9b9e7a957 100644 --- a/modules/core/src/main/scala/doobie/util/update.scala +++ b/modules/core/src/main/scala/doobie/util/update.scala @@ -11,6 +11,11 @@ import doobie.implicits._ import doobie.util.analysis.Analysis import doobie.util.log.{ Success, ExecFailure, LogEvent } import doobie.util.pos.Pos +import doobie.free.{preparedstatement => IFPS} +import doobie.hi.{ + connection => IHC, + preparedstatement => IHPS, +} import fs2.Stream import scala.Predef.longWrapper import scala.concurrent.duration.{ FiniteDuration, NANOSECONDS } @@ -40,7 +45,7 @@ object update { protected implicit val write: Write[A] private val now: PreparedStatementIO[Long] = - FPS.delay(System.nanoTime) + IFPS.delay(System.nanoTime) // Equivalent to HPS.executeUpdate(k) but with logging if logHandler is defined private def executeUpdate[T](a: A): PreparedStatementIO[Int] = { @@ -48,12 +53,12 @@ object update { def diff(a: Long, b: Long) = FiniteDuration((a - b).abs, NANOSECONDS) def log(e: LogEvent): PreparedStatementIO[Unit] = for { - _ <- FPS.performLogging(e) + _ <- IFPS.performLogging(e) } yield () for { t0 <- now - en <- FPS.executeUpdate.attempt + en <- IFPS.executeUpdate.attempt t1 <- now n <- en.liftTo[PreparedStatementIO].onError { case e => log(ExecFailure(sql, args, label, diff(t1, t0), e)) } _ <- log(Success(sql, args, label, diff(t1, t0), FiniteDuration(0L, NANOSECONDS))) @@ -85,14 +90,14 @@ object update { * @group Diagnostics */ def analysis: ConnectionIO[Analysis] = - HC.prepareUpdateAnalysis[A](sql) + IHC.prepareUpdateAnalysis[A](sql) /** * Program to construct an analysis of this query's SQL statement and result set column types. * @group Diagnostics */ def outputAnalysis: ConnectionIO[Analysis] = - HC.prepareUpdateAnalysis0(sql) + IHC.prepareUpdateAnalysis0(sql) /** * Program to construct an inspection of the query. Given arguments `a`, calls `f` with the SQL @@ -102,7 +107,7 @@ object update { * @group Diagnostics */ def inspect[R](a: A)(f: (String, PreparedStatementIO[Unit]) => ConnectionIO[R]): ConnectionIO[R] = - f(sql, HPS.set(a)) + f(sql, IHPS.set(a)) /** * Construct a program to execute the update and yield a count of affected rows, given the @@ -110,7 +115,7 @@ object update { * @group Execution */ def run(a: A): ConnectionIO[Int] = - HC.prepareStatement(sql)(HPS.set(a) *> executeUpdate(a)) + IHC.prepareStatement(sql)(IHPS.set(a) *> executeUpdate(a)) /** * Program to execute a batch update and yield a count of affected rows. Note that failed @@ -119,7 +124,7 @@ object update { * @group Execution */ def updateMany[F[_]: Foldable](fa: F[A]): ConnectionIO[Int] = - HC.prepareStatement(sql)(HPS.addBatchesAndExecute(fa)) + IHC.prepareStatement(sql)(IHPS.addBatchesAndExecute(fa)) /** * Construct a stream that performs a batch update as with `updateMany`, yielding generated @@ -130,7 +135,7 @@ object update { def updateManyWithGeneratedKeys[K](columns: String*): UpdateManyWithGeneratedKeysPartiallyApplied[A, K] = new UpdateManyWithGeneratedKeysPartiallyApplied[A, K] { def withChunkSize[F[_]](as: F[A], chunkSize: Int)(implicit F: Foldable[F], K: Read[K]): Stream[ConnectionIO, K] = - HC.updateManyWithGeneratedKeys[List,A,K](columns.toList)(sql, FPS.unit, as.toList, chunkSize) + IHC.updateManyWithGeneratedKeys[List,A,K](columns.toList)(sql, IFPS.unit, as.toList, chunkSize) } /** @@ -149,7 +154,7 @@ object update { * @group Execution */ def withGeneratedKeysWithChunkSize[K: Read](columns: String*)(a: A, chunkSize: Int): Stream[ConnectionIO, K] = - HC.updateWithGeneratedKeys[K](columns.toList)(sql, HPS.set(a), chunkSize) + IHC.updateWithGeneratedKeys[K](columns.toList)(sql, IHPS.set(a), chunkSize) /** * Construct a program that performs the update, yielding a single set of generated keys of @@ -158,7 +163,7 @@ object update { * @group Execution */ def withUniqueGeneratedKeys[K: Read](columns: String*)(a: A): ConnectionIO[K] = - HC.prepareStatementS(sql, columns.toList)(HPS.set(a) *> HPS.executeUpdateWithUniqueGeneratedKeys) + IHC.prepareStatementS(sql, columns.toList)(IHPS.set(a) *> IHPS.executeUpdateWithUniqueGeneratedKeys) /** * Update is a contravariant functor. diff --git a/modules/core/src/main/scala/doobie/util/write.scala b/modules/core/src/main/scala/doobie/util/write.scala index 27aac86f8..464cb4d09 100644 --- a/modules/core/src/main/scala/doobie/util/write.scala +++ b/modules/core/src/main/scala/doobie/util/write.scala @@ -6,11 +6,15 @@ package doobie.util import cats.ContravariantSemigroupal import doobie.enumerated.Nullability._ -import doobie.free.{FPS, FRS, PreparedStatementIO, ResultSetIO} +import doobie.free.{PreparedStatementIO, ResultSetIO} import java.sql.{PreparedStatement, ResultSet} import doobie.util.fragment.Fragment import doobie.util.fragment.Elem +import doobie.free.{ + preparedstatement => IFPS, + resultset => IFRS +} import scala.annotation.implicitNotFound @@ -52,10 +56,10 @@ sealed abstract class Write[A]( lazy val length = puts.length def set(n: Int, a: A): PreparedStatementIO[Unit] = - FPS.raw(unsafeSet(_, n, a)) + IFPS.raw(unsafeSet(_, n, a)) def update(n: Int, a: A): ResultSetIO[Unit] = - FRS.raw(unsafeUpdate(_, n, a)) + IFRS.raw(unsafeUpdate(_, n, a)) def contramap[B](f: B => A): Write[B] = new Write[B]( diff --git a/modules/core/src/test/scala/doobie/util/TestTypes.scala b/modules/core/src/test/scala/doobie/util/TestTypes.scala index 66a167b9a..8a83105a8 100644 --- a/modules/core/src/test/scala/doobie/util/TestTypes.scala +++ b/modules/core/src/test/scala/doobie/util/TestTypes.scala @@ -4,7 +4,7 @@ package doobie.util -import doobie.Meta +import doobie.util.meta.Meta object TestTypes { case class LenStr1(n: Int, s: String) diff --git a/modules/postgres/src/main/scala/doobie/postgres/free/aliases.scala b/modules/postgres/src/main/scala/doobie/postgres/free/aliases.scala index 591c65855..28fe08ec6 100644 --- a/modules/postgres/src/main/scala/doobie/postgres/free/aliases.scala +++ b/modules/postgres/src/main/scala/doobie/postgres/free/aliases.scala @@ -16,32 +16,32 @@ trait Types { } trait Modules { - lazy val PFCI = copyin - lazy val PFCM = copymanager - lazy val PFCO = copyout - lazy val PFLO = largeobject - lazy val PFLOM = largeobjectmanager - lazy val PFPC = pgconnection + val PFCI = copyin + val PFCM = copymanager + val PFCO = copyout + val PFLO = largeobject + val PFLOM = largeobjectmanager + val PFPC = pgconnection } trait Instances { - implicit lazy val WeakAsyncCopyInIO: WeakAsync[copyin.CopyInIO] = + implicit val WeakAsyncCopyInIO: WeakAsync[copyin.CopyInIO] = copyin.WeakAsyncCopyInIO - implicit lazy val WeakAsyncCopyManagerIO: WeakAsync[copymanager.CopyManagerIO] = + implicit val WeakAsyncCopyManagerIO: WeakAsync[copymanager.CopyManagerIO] = copymanager.WeakAsyncCopyManagerIO - implicit lazy val WeakAsyncCopyOutIO: WeakAsync[copyout.CopyOutIO] = + implicit val WeakAsyncCopyOutIO: WeakAsync[copyout.CopyOutIO] = copyout.WeakAsyncCopyOutIO - implicit lazy val WeakAsyncLargeObjectIO: WeakAsync[largeobject.LargeObjectIO] = + implicit val WeakAsyncLargeObjectIO: WeakAsync[largeobject.LargeObjectIO] = largeobject.WeakAsyncLargeObjectIO - implicit lazy val WeakAsyncLargeObjectManagerIO: WeakAsync[largeobjectmanager.LargeObjectManagerIO] = + implicit val WeakAsyncLargeObjectManagerIO: WeakAsync[largeobjectmanager.LargeObjectManagerIO] = largeobjectmanager.WeakAsyncLargeObjectManagerIO - implicit lazy val WeakAsyncPGConnectionIO: WeakAsync[pgconnection.PGConnectionIO] = + implicit val WeakAsyncPGConnectionIO: WeakAsync[pgconnection.PGConnectionIO] = pgconnection.WeakAsyncPGConnectionIO } diff --git a/modules/postgres/src/main/scala/doobie/postgres/hi/aliases.scala b/modules/postgres/src/main/scala/doobie/postgres/hi/aliases.scala index 869a185b3..896d8a439 100644 --- a/modules/postgres/src/main/scala/doobie/postgres/hi/aliases.scala +++ b/modules/postgres/src/main/scala/doobie/postgres/hi/aliases.scala @@ -5,9 +5,9 @@ package doobie.postgres.hi trait Modules { - lazy val PHPC = pgconnection - lazy val PHC = connection - lazy val PHLO = largeobject - lazy val PHLOM = largeobjectmanager - lazy val PHLOS = lostreaming + val PHPC = pgconnection + val PHC = connection + val PHLO = largeobject + val PHLOM = largeobjectmanager + val PHLOS = lostreaming } diff --git a/modules/postgres/src/main/scala/doobie/postgres/hi/connection.scala b/modules/postgres/src/main/scala/doobie/postgres/hi/connection.scala index b3d21899b..89df35ca0 100644 --- a/modules/postgres/src/main/scala/doobie/postgres/hi/connection.scala +++ b/modules/postgres/src/main/scala/doobie/postgres/hi/connection.scala @@ -10,37 +10,39 @@ import cats.free.Free import org.postgresql.{ PGConnection, PGNotification } import doobie._, doobie.implicits._ import doobie.postgres.free.{ Embeddable, KleisliInterpreter } +import doobie.postgres.free.{pgconnection => IPFPC} +import doobie.postgres.hi.{pgconnection => IPHPC} /** Module of safe `PGConnectionIO` operations lifted into `ConnectionIO`. */ object connection { // An intepreter for lifting PGConnectionIO into ConnectionIO - val defaultInterpreter: PFPC.PGConnectionOp ~> Kleisli[ConnectionIO, PGConnection, *] = + val defaultInterpreter: IPFPC.PGConnectionOp ~> Kleisli[ConnectionIO, PGConnection, *] = KleisliInterpreter[ConnectionIO](LogHandler.noop).PGConnectionInterpreter val pgGetBackendPID: ConnectionIO[Int] = - pgGetConnection(PFPC.getBackendPID) + pgGetConnection(IPFPC.getBackendPID) def pgGetConnection[A](k: PGConnectionIO[A]): ConnectionIO[A] = FC.unwrap(classOf[PGConnection]).flatMap(k.foldMap(defaultInterpreter).run) def embed[F[_], J, B](j: J, op: Free[F, B])(implicit ev: Embeddable[F, J]): ConnectionIO[B] = - pgGetConnection(PFPC.embed(j, op)) + pgGetConnection(IPFPC.embed(j, op)) def pgGetCopyAPI[A](k: CopyManagerIO[A]): ConnectionIO[A] = - pgGetConnection(PHPC.getCopyAPI(k)) + pgGetConnection(IPHPC.getCopyAPI(k)) def pgGetLargeObjectAPI[A](k: LargeObjectManagerIO[A]): ConnectionIO[A] = - pgGetConnection(PHPC.getLargeObjectAPI(k)) + pgGetConnection(IPHPC.getLargeObjectAPI(k)) val pgGetNotifications: ConnectionIO[List[PGNotification]] = - pgGetConnection(PHPC.getNotifications) + pgGetConnection(IPHPC.getNotifications) val pgGetPrepareThreshold: ConnectionIO[Int] = - pgGetConnection(PHPC.getPrepareThreshold) + pgGetConnection(IPHPC.getPrepareThreshold) def pgSetPrepareThreshold(threshold: Int): ConnectionIO[Unit] = - pgGetConnection(PHPC.setPrepareThreshold(threshold)) + pgGetConnection(IPHPC.setPrepareThreshold(threshold)) /** * Construct a program that notifies on the given channel. Note that the channel is NOT sanitized; diff --git a/modules/postgres/src/main/scala/doobie/postgres/hi/largeobject.scala b/modules/postgres/src/main/scala/doobie/postgres/hi/largeobject.scala index 71ca957f8..da89c87ca 100644 --- a/modules/postgres/src/main/scala/doobie/postgres/hi/largeobject.scala +++ b/modules/postgres/src/main/scala/doobie/postgres/hi/largeobject.scala @@ -7,6 +7,7 @@ package doobie.postgres.hi import cats.syntax.all._ import doobie.util.io.IOActions import java.io.{File, InputStream, OutputStream} +import doobie.postgres.free.{largeobject => IPFLO} object largeobject { @@ -15,18 +16,18 @@ object largeobject { lazy val io = new IOActions[LargeObjectIO] def copyFromFile(blockSize: Int, file: File): LargeObjectIO[Unit] = - PFLO.getOutputStream.flatMap { os => io.copyFileToStream(blockSize, file, os) *> io.flush(os) } + IPFLO.getOutputStream.flatMap { os => io.copyFileToStream(blockSize, file, os) *> io.flush(os) } def copyToFile(blockSize: Int, file: File): LargeObjectIO[Unit] = - PFLO.getInputStream.flatMap { is => io.copyStreamToFile(blockSize, file, is) } + IPFLO.getInputStream.flatMap { is => io.copyStreamToFile(blockSize, file, is) } def copyFromStream(blockSize: Int, is: InputStream): LargeObjectIO[Unit] = - PFLO.getOutputStream.flatMap { os => + IPFLO.getOutputStream.flatMap { os => io.copyStream(new Array[Byte](blockSize))(is, os) } def copyToStream(blockSize: Int, os: OutputStream): LargeObjectIO[Unit] = - PFLO.getInputStream.flatMap { is => + IPFLO.getInputStream.flatMap { is => io.copyStream(new Array[Byte](blockSize))(is, os) } } diff --git a/modules/postgres/src/main/scala/doobie/postgres/hi/largeobjectmanager.scala b/modules/postgres/src/main/scala/doobie/postgres/hi/largeobjectmanager.scala index 688d936d5..18e8b79d0 100644 --- a/modules/postgres/src/main/scala/doobie/postgres/hi/largeobjectmanager.scala +++ b/modules/postgres/src/main/scala/doobie/postgres/hi/largeobjectmanager.scala @@ -7,41 +7,43 @@ package doobie.postgres.hi import cats.syntax.all._ import doobie.postgres.implicits._ import java.io.{ File, OutputStream, InputStream } +import doobie.postgres.free.{largeobjectmanager => IPFLOM, largeobject => IPFLO} +import doobie.postgres.hi.{largeobjectmanager => IPHLOM, largeobject => IPHLO} object largeobjectmanager { val createLO: LargeObjectManagerIO[Long] = - PFLOM.createLO + IPFLOM.createLO @SuppressWarnings(Array("org.wartremover.warts.Overloading")) def createLO(a: Int): LargeObjectManagerIO[Long] = - PFLOM.createLO(a) + IPFLOM.createLO(a) def delete(a: Long): LargeObjectManagerIO[Unit] = - PFLOM.delete(a) + IPFLOM.delete(a) @SuppressWarnings(Array("org.wartremover.warts.Overloading")) def open[A](a: Long, b: Int)(k: LargeObjectIO[A]): LargeObjectManagerIO[A] = - PFLOM.open(a, b) >>= (PFLOM.embed(_, k <* PFLO.close)) + IPFLOM.open(a, b) >>= (IPFLOM.embed(_, k <* IPFLO.close)) @SuppressWarnings(Array("org.wartremover.warts.Overloading")) def open[A](a: Long)(k: LargeObjectIO[A]): LargeObjectManagerIO[A] = - PFLOM.open(a) >>= (PFLOM.embed(_, k <* PFLO.close)) + IPFLOM.open(a) >>= (IPFLOM.embed(_, k <* IPFLO.close)) def unlink(a: Long): LargeObjectManagerIO[Unit] = - PFLOM.unlink(a) + IPFLOM.unlink(a) def createLOFromFile(blockSize: Int, file: File): LargeObjectManagerIO[Long] = - createLO >>= { oid => open(oid)(PHLO.copyFromFile(blockSize, file)).as(oid) } + createLO >>= { oid => open(oid)(IPHLO.copyFromFile(blockSize, file)).as(oid) } def createFileFromLO(blockSize: Int, oid: Long, file: File): LargeObjectManagerIO[Unit] = - open(oid)(PHLO.copyToFile(blockSize, file)) + open(oid)(IPHLO.copyToFile(blockSize, file)) def createLOFromStream(blockSize: Int, is: InputStream): LargeObjectManagerIO[Long] = - PHLOM.createLO >>= { oid => - PHLOM.open(oid)(PHLO.copyFromStream(blockSize, is)).as(oid) + IPHLOM.createLO >>= { oid => + IPHLOM.open(oid)(IPHLO.copyFromStream(blockSize, is)).as(oid) } def createStreamFromLO(blockSize: Int, oid: Long, os: OutputStream): LargeObjectManagerIO[Unit] = - open(oid)(PHLO.copyToStream(blockSize, os)) + open(oid)(IPHLO.copyToStream(blockSize, os)) } diff --git a/modules/postgres/src/main/scala/doobie/postgres/hi/lostreaming.scala b/modules/postgres/src/main/scala/doobie/postgres/hi/lostreaming.scala index 901276115..4ea14b89c 100644 --- a/modules/postgres/src/main/scala/doobie/postgres/hi/lostreaming.scala +++ b/modules/postgres/src/main/scala/doobie/postgres/hi/lostreaming.scala @@ -7,6 +7,8 @@ package doobie.postgres.hi import cats.syntax.functor._ import doobie.ConnectionIO import doobie.implicits._ +import doobie.postgres.free.{largeobjectmanager => IIPFLOM, largeobject => IPFLO} +import doobie.postgres.hi.{connection => IPHC} import fs2.Stream import java.io.{InputStream, OutputStream} import org.postgresql.largeobject.LargeObject @@ -25,17 +27,17 @@ object lostreaming { .flatMap(lo => fs2.io.readInputStream(getInputStream(lo), chunkSize)) private val createLO: ConnectionIO[Long] = - PHC.pgGetLargeObjectAPI(PFLOM.createLO) + IPHC.pgGetLargeObjectAPI(IIPFLOM.createLO) private def openLO(oid: Long): ConnectionIO[LargeObject] = - PHC.pgGetLargeObjectAPI(PFLOM.open(oid)) + IPHC.pgGetLargeObjectAPI(IIPFLOM.open(oid)) private def closeLO(lo: LargeObject): ConnectionIO[Unit] = - PHC.pgGetLargeObjectAPI(PFLOM.embed(lo, PFLO.close)) + IPHC.pgGetLargeObjectAPI(IIPFLOM.embed(lo, IPFLO.close)) private def getOutputStream(lo: LargeObject): ConnectionIO[OutputStream] = - PHC.pgGetLargeObjectAPI(PFLOM.embed(lo, PFLO.getOutputStream)) + IPHC.pgGetLargeObjectAPI(IIPFLOM.embed(lo, IPFLO.getOutputStream)) private def getInputStream(lo: LargeObject): ConnectionIO[InputStream] = - PHC.pgGetLargeObjectAPI(PFLOM.embed(lo, PFLO.getInputStream)) + IPHC.pgGetLargeObjectAPI(IIPFLOM.embed(lo, IPFLO.getInputStream)) } diff --git a/modules/postgres/src/main/scala/doobie/postgres/hi/pgconnection.scala b/modules/postgres/src/main/scala/doobie/postgres/hi/pgconnection.scala index 3ed98fa77..e0390a638 100644 --- a/modules/postgres/src/main/scala/doobie/postgres/hi/pgconnection.scala +++ b/modules/postgres/src/main/scala/doobie/postgres/hi/pgconnection.scala @@ -5,28 +5,29 @@ package doobie.postgres.hi import org.postgresql.PGNotification +import doobie.postgres.free.{pgconnection => IPFPC} object pgconnection { val getBackendPID: PGConnectionIO[Int] = - PFPC.getBackendPID + IPFPC.getBackendPID def getCopyAPI[A](k: CopyManagerIO[A]): PGConnectionIO[A] = - PFPC.getCopyAPI.flatMap(s => PFPC.embed(s, k)) // N.B. no need to close() + IPFPC.getCopyAPI.flatMap(s => IPFPC.embed(s, k)) // N.B. no need to close() def getLargeObjectAPI[A](k: LargeObjectManagerIO[A]): PGConnectionIO[A] = - PFPC.getLargeObjectAPI.flatMap(s => PFPC.embed(s, k)) // N.B. no need to close() + IPFPC.getLargeObjectAPI.flatMap(s => IPFPC.embed(s, k)) // N.B. no need to close() val getNotifications: PGConnectionIO[List[PGNotification]] = - PFPC.getNotifications map { + IPFPC.getNotifications map { case null => Nil case ns => ns.toList } val getPrepareThreshold: PGConnectionIO[Int] = - PFPC.getPrepareThreshold + IPFPC.getPrepareThreshold def setPrepareThreshold(threshold: Int): PGConnectionIO[Unit] = - PFPC.setPrepareThreshold(threshold) + IPFPC.setPrepareThreshold(threshold) } diff --git a/modules/postgres/src/main/scala/doobie/postgres/rangeinstances.scala b/modules/postgres/src/main/scala/doobie/postgres/rangeinstances.scala index 6a203cd84..500b70f17 100644 --- a/modules/postgres/src/main/scala/doobie/postgres/rangeinstances.scala +++ b/modules/postgres/src/main/scala/doobie/postgres/rangeinstances.scala @@ -4,7 +4,7 @@ package doobie.postgres -import doobie.Meta +import doobie.util.meta.Meta import doobie.postgres.types.Range import org.postgresql.util.PGobject diff --git a/modules/postgres/src/main/scala/doobie/postgres/syntax/FragmentSyntax.scala b/modules/postgres/src/main/scala/doobie/postgres/syntax/FragmentSyntax.scala index 1f8120fe1..23aa81685 100644 --- a/modules/postgres/src/main/scala/doobie/postgres/syntax/FragmentSyntax.scala +++ b/modules/postgres/src/main/scala/doobie/postgres/syntax/FragmentSyntax.scala @@ -11,6 +11,8 @@ import cats.syntax.all._ import doobie._ import doobie.implicits._ import doobie.postgres._ +import doobie.postgres.hi.{connection => IPHC} +import doobie.postgres.free.{copymanager => IPFCM, copyin => IPFCI} import fs2._ import java.io.StringReader @@ -29,7 +31,7 @@ class FragmentOps(f: Fragment) { // TODO: stream this rather than constructing the string in memory. if (fa.isEmpty) 0L.pure[ConnectionIO] else { val data = foldToString(fa) - PHC.pgGetCopyAPI(PFCM.copyIn(f.query[Unit].sql, new StringReader(data))) + IPHC.pgGetCopyAPI(IPFCM.copyIn(f.query[Unit].sql, new StringReader(data))) } } @@ -50,14 +52,14 @@ class FragmentOps(f: Fragment) { // we need to run that in the finalizer of the `bracket`, and the result from that is ignored. Ref.of[ConnectionIO, Long](-1L).flatMap { numRowsRef => val copyAll: ConnectionIO[Unit] = - Stream.bracketCase(PHC.pgGetCopyAPI(PFCM.copyIn(f.query[Unit].sql))){ + Stream.bracketCase(IPHC.pgGetCopyAPI(IPFCM.copyIn(f.query[Unit].sql))){ case (copyIn, Resource.ExitCase.Succeeded) => - PHC.embed(copyIn, PFCI.endCopy).flatMap(numRowsRef.set) + IPHC.embed(copyIn, IPFCI.endCopy).flatMap(numRowsRef.set) case (copyIn, _) => - PHC.embed(copyIn, PFCI.cancelCopy) + IPHC.embed(copyIn, IPFCI.cancelCopy) }.flatMap { copyIn => byteStream.chunks.evalMap(bytes => - PHC.embed(copyIn, PFCI.writeToCopy(bytes.toArray, 0, bytes.size)) + IPHC.embed(copyIn, IPFCI.writeToCopy(bytes.toArray, 0, bytes.size)) ) }.compile.drain