From c0a86bf6763fa8a80b09918cbe73a48773210e3e Mon Sep 17 00:00:00 2001 From: augustnagro Date: Tue, 12 Nov 2024 23:46:23 -0800 Subject: [PATCH 1/3] new pg-module.md doc, add new json & xml codec docs --- PG-MODULE.md | 143 +++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 64 +---------------------- 2 files changed, 144 insertions(+), 63 deletions(-) create mode 100644 PG-MODULE.md diff --git a/PG-MODULE.md b/PG-MODULE.md new file mode 100644 index 0000000..e06e23b --- /dev/null +++ b/PG-MODULE.md @@ -0,0 +1,143 @@ +## Postgres Module + +The Postgres Module adds support for [Geometric Types](https://www.postgresql.org/docs/current/datatype-geometric.html), [Arrays](https://www.postgresql.org/docs/current/arrays.html), [Json/JsonB](https://www.postgresql.org/docs/current/datatype-json.html), and [xml](https://www.postgresql.org/docs/current/datatype-xml.html). Postgres Arrays can be decoded into Scala List/Vector/IArray, etc; multi-dimensionality is also supported. + +``` +"com.augustnagro" %% "magnumpg" % "1.3.0" +``` + +Example: Insert into a table with a `point[]` type column. + +With table: + +```sql +create table my_geo ( + id bigint primary key, + pnts point[] not null +); +``` + +```scala +import org.postgresql.geometric.PGpoint +import com.augustnagro.magnum.{Table, PostgresDbType, Id, DbCodec, Transactor, Repo, transact} +import com.augustnagro.magnum.pg.PgCodec.given + +@Table(PostgresDbType) +case class MyGeo(@Id id: Long, pnts: IArray[PGpoint]) derives DbCodec + +val dataSource: javax.sql.DataSource = ??? +val xa = Transactor(dataSource) + +val myGeoRepo = Repo[MyGeo, MyGeo, Long] + +transact(xa): + myGeoRepo.insert(MyGeo(1L, IArray(PGpoint(1, 1), PGPoint(2, 2)))) +``` + +The import of `PgCodec.given` is required to bring Geo/Array DbCodecs into scope. + +#### Arrays of Enums + +The `pg` module supports arrays of simple (non-ADT) enums. + +If you want to map an array of [Postgres enums](https://www.postgresql.org/docs/current/datatype-enum.html) to a sequence of Scala enums, use the following import when deriving the DbCodec: + +```scala +import com.augustnagro.magnum.pg.PgCodec.given +import com.augustnagro.magnum.pg.enums.PgEnumToScalaEnumSqlArrayCodec + +// in postgres: `create type Color as enum ('Red', 'Green', 'Blue');` +enum Color derives DbCodec: + case Red, Green, Blue + +@Table(PostgresDbType) +case class Car(@Id id: Long, colors: Vector[Color]) derives DbCodec +``` + +If instead your Postgres type is an array of varchar or text, use the following import: + +```scala +import com.augustnagro.magnum.pg.enums.PgStringToScalaEnumSqlArrayCodec +``` + +#### Json, JsonB, XML + +You can map `json`, `jsonb`, and `xml` columns to Scala classes by implementing `JsonDbCodec`, `JsonBDbCodec`, and `XmlDbCodec` respectively. + +As an example, assume we have table `car`: + +```sql +CREATE TABLE car ( + id bigint primary key, + last_service json not null +); +``` + +And `last_service` looks like: + +```json +{"mechanic": "Bob", "date": "2024-05-04"} +``` + +We can model the relation in Scala with: + +```scala +@Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) +case class Car( + @Id id: Long, + lastService: LastService +) derives DbCodec + +case class LastService(mechanic: String, date: LocalDate) +``` + +However, this won't compile because we're missing a given `DbCodec[LastService]`. To get there, first we have to pick a Scala JSON library. Nearly all of them support creating derived codecs; the example below shows how it's done in [Circe](https://circe.github.io/circe): + +```scala +import io.circe.Codec +import java.time.LocalDate + +case class LastService(mechanic: String, date: LocalDate) derives Codec.AsObject +``` + +Next, we should extend `JsonDbCodec` to implement our own `CirceDbCodec`: + +```scala +import com.augustnagro.magnum.pg.json.JsonDbCodec +import io.circe.{Codec, Decoder, Encoder, JsonObject} +import io.circe.parser.{decode as circeDecode, *} +import io.circe.syntax.* + +trait CirceDbCodec[A] extends JsonDbCodec[A] + +object CirceDbCodec: + def derived[A: Encoder: Decoder]: CirceDbCodec[A] = new: + def encode(a: A): String = a.asJson.toString + def decode(json: String): A = circeDecode[A](json) match + case Right(a) => a + case Left(err) => throw err +``` + +Note the `derived` method in the companion object; this allows us to use `derives PlayJsonDbCodec` on our JSON class, like so: + +```scala +case class LastService(mechanic: String, date: LocalDate) + derives Codec.AsObject, CirceDbCodec +``` + +The `Car` example will now compile and work as expected. + +For XML, there a few options. If using a library that maps XML to case classes like [scalaxb](https://github.com/eed3si9n/scalaxb), we can follow the JSON pattern above, but using `XmlDbCodec`. If the case classes are generated sources, we can't put the DbCodec givens in their companion objects. Instead, put them in the entity companion object. + +Another pattern is to use a library like [scala-xml](https://github.com/scala/scala-xml) directly and encapsulate the NodeSeq. Then, we can define our DbCodec on the wrapper: + +```scala +class LastService(val xml: Elem): + def mechanic: String = (xml \ "mechanic").head.text.trim + def date: LocalDate = LocalDate.parse((xml \ "date").head.text.trim) + +object LastService: + given XmlDbCodec[LastService] with + def encode(a: LastService): String = a.xml.toString + def decode(xml: String): LastService = LastService(XML.loadString(xml)) +``` diff --git a/README.md b/README.md index 0438161..a98a97a 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Yet another database client for Scala. No dependencies, high productivity. * [`DbCodec`: Typeclass for JDBC reading & writing](#dbcodec-typeclass-for-jdbc-reading--writing) * [Future-Proof Queries](#future-proof-queries) * [Splicing Literal Values into Frags](#splicing-literal-values-into-frags) - * [Postgres Module](#postgres-module) + * [Postgres Module](/PG-MODULE.md) * [Logging](#logging-sql-queries) * [Motivation](#motivation) * [Feature List And Database Support](#feature-list) @@ -467,68 +467,6 @@ sql"select * from $table" This feature should be used sparingly and never with untrusted input. -### Postgres Module - -The Postgres Module adds support for [Geometric Types](https://www.postgresql.org/docs/current/datatype-geometric.html) and [Arrays](https://www.postgresql.org/docs/current/arrays.html). Postgres Arrays can be decoded into Scala List/Vector/IArray, etc; multi-dimensionality is also supported. - -``` -"com.augustnagro" %% "magnumpg" % "1.3.0" -``` - -Example: Insert into a table with a `point[]` type column. - -With table: - -```sql -create table my_geo ( - id bigint primary key, - pnts point[] not null -); -``` - -```scala -import org.postgresql.geometric.* -import com.augustnagro.magnum.* -import com.augustnagro.magnum.pg.PgCodec.given - -@Table(PostgresDbType) -case class MyGeo(@Id id: Long, pnts: IArray[PGpoint]) derives DbCodec - -val dataSource: javax.sql.DataSource = ??? -val xa = Transactor(dataSource) - -val myGeoRepo = Repo[MyGeo, MyGeo, Long] - -transact(xa): - myGeoRepo.insert(MyGeo(1L, IArray(PGpoint(1, 1), PGPoint(2, 2)))) -``` - -The import of `PgCodec.given` is required to bring Geo/Array DbCodecs into scope. - -#### Arrays of Enums - -The `pg` module supports arrays of simple (non-ADT) enums. - -If you want to map an array of [Postgres enums](https://www.postgresql.org/docs/current/datatype-enum.html) to a sequence of Scala enums, use the following import when deriving the DbCodec: - -```scala -import com.augustnagro.magnum.pg.PgCodec.given -import com.augustnagro.magnum.pg.enums.PgEnumToScalaEnumSqlArrayCodec - -// in postgres: `create type Color as enum ('Red', 'Green', 'Blue');` -enum Color derives DbCodec: - case Red, Green, Blue - -@Table(PostgresDbType) -case class Car(@Id id: Long, colors: Vector[Color]) derives DbCodec -``` - -If instead your Postgres type is an array of varchar or text, use the following import: - -```scala -import com.augustnagro.magnum.pg.enums.PgStringToScalaEnumSqlArrayCodec -``` - ### Logging SQL queries If you set the java.util Logging level to DEBUG, all SQL queries will be logged. From 0a89c57d009c30a9081e13b2cd69491ff4a05c05 Mon Sep 17 00:00:00 2001 From: augustnagro Date: Sun, 1 Dec 2024 16:17:13 -0800 Subject: [PATCH 2/3] Spec changes --- README.md | 93 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 49 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index a98a97a..24b5aa0 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ Yet another database client for Scala. No dependencies, high productivity. ## Installing ``` -"com.augustnagro" %% "magnum" % "1.3.0" +"com.augustnagro" %% "magnum" % "2.0.0" ``` Magnum requires Scala >= 3.3.0 @@ -218,7 +218,7 @@ The optional `@Id` annotation denotes the table's primary key. Not setting `@Id` It is a best practice to extend ImmutableRepo to encapsulate your SQL in repositories. This way, it's easier to maintain since they're grouped together. ```scala -class UserRepo extends ImmutableRepo[User, Long]: +object UserRepo extends ImmutableRepo[User, Long]: def firstNamesForLast(lastName: String)(using DbCon): Vector[String] = sql""" SELECT DISTINCT first_name @@ -229,18 +229,22 @@ class UserRepo extends ImmutableRepo[User, Long]: // other User-related queries here ``` +If you don't want to expose all the ImmutableRepo methods, use a Scala 3 [exports clause](https://docs.scala-lang.org/scala3/reference/other-new-features/export.html): + +```scala +object UserRepo: + private val repo = ImmutableRepo[User, Long] + + export repo.{count, existsById} + + // other User-related queries here +``` + ### Repositories -The `Repo` class auto-generates the following methods at compile-time: +The `Repo` class extends ImmutableRepo and also defines: ```scala - def count(using DbCon): Long - def existsById(id: ID)(using DbCon): Boolean - def findAll(using DbCon): Vector[E] - def findAll(spec: Spec[E])(using DbCon): Vector[E] - def findById(id: ID)(using DbCon): Option[E] - def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] - def delete(entity: E)(using DbCon): Unit def deleteById(id: ID)(using DbCon): Unit def truncate()(using DbCon): Unit @@ -275,7 +279,7 @@ val countAfterUpdate = transact(xa): It is a best practice to encapsulate your SQL in repositories. ```scala -class UserRepo extends Repo[User, User, Long] +object UserRepo extends Repo[User, User, Long] ``` Also note that Repo extends ImmutableRepo. Some databases cannot support every method, and will throw UnsupportedOperationException. @@ -315,18 +319,9 @@ val newUser: User = transact(xa): Specifications help you write safe, dynamic queries. An example use-case would be a search results page that allows users to sort and filter the paginated data. -1. If you need to perform joins to get the data needed, first create a database view. -2. Next, create an entity class that derives DbCodec. -3. Finally, use the Spec class to create a specification. - Here's an example: ```scala -val partialName = "Ja" -val lastNameOpt = Option("Brown") -val searchDate = OffsetDateTime.now.minusDays(2) -val idPosition = 42L - val spec = Spec[User] .where(sql"first_name ILIKE '$partialName%'") .where(lastNameOpt.map(ln => sql"last_name = $ln").getOrElse(sql"")) @@ -335,9 +330,14 @@ val spec = Spec[User] .limit(10) val users: Vector[User] = userRepo.findAll(spec) + +def partialName = "Ja" +def lastNameOpt = Option("Brown") +def searchDate = OffsetDateTime.now.minusDays(2) +def idPosition = 42L ``` -Note that both [seek pagination](https://blog.jooq.org/faster-sql-paging-with-jooq-using-the-seek-method/) and offset pagination is supported. +Note that both [seek pagination](https://blog.jooq.org/faster-sql-paging-with-jooq-using-the-seek-method/) and offset pagination is supported. If you need to use joins to select the columns, use the `Spec.prefix` method. ### Scala 3 Enum & NewType Support @@ -403,7 +403,7 @@ A common problem when writing SQL queries is that they're difficult to refactor. There's also lots of repetition when writing SQL. Magnum's repositories help scrap the boilerplate, but writing `SELECT a, b, c, d, ...` for a large table quickly gets tiring. -To help with this, Magnum offers a `TableInfo` class to enable 'future-proof' queries. An important caveat is that these queries are harder to copy/paste into SQL editors like PgAdmin or DbBeaver. +To help with this, Magnum offers a `TableInfo` class to enable 'future-proof' queries. An important caveat is that these queries are harder to copy/paste into SQL editors like PgAdmin or DbBeaver (of course, you can still find them in [DEBUG logs](#logging-sql-queries)) Here's some examples: @@ -415,29 +415,26 @@ case class UserCreator(firstName: String, age: Int) derives DbCodec @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) case class User(id: Long, firstName: String, age: Int) derives DbCodec -object User: - val Table = TableInfo[UserCreator, User, Long] +object UserSql: + private val u = TableInfo[UserCreator, User, Long] -def allUsers(using DbCon): Vector[User] = - val u = User.Table - // equiv to - // SELECT id, first_name, age FROM user - sql"SELECT ${u.all} FROM $u".query[User].run() + def allUsers(using DbCon): Vector[User] = + // equiv to + // SELECT id, first_name, age FROM user + sql"SELECT ${u.all} FROM $u".query[User].run() -def firstNamesForLast(lastName: String)(using DbCon): Vector[String] = - val u = User.Table - // equiv to - // SELECT DISTINCT first_name FROM user WHERE last_name = ? - sql""" - SELECT DISTINCT ${u.firstName} FROM $u - WHERE ${u.lastName} = $lastName - """.query[String].run() + def firstNamesForLast(lastName: String)(using DbCon): Vector[String] = + // equiv to + // SELECT DISTINCT first_name FROM user WHERE last_name = ? + sql""" + SELECT DISTINCT ${u.firstName} FROM $u + WHERE ${u.lastName} = $lastName + """.query[String].run() -def insertOrIgnore(creator: UserCreator)(using DbCon): Unit = - val u = User.Table - // equiv to - // INSERT OR IGNORE INTO user (first_name, age) VALUES (?, ?) - sql"INSERT OR IGNORE INTO $u ${u.insertCols} VALUES ($creator)".update.run() + def insertOrIgnore(creator: UserCreator)(using DbCon): Unit = + // equiv to + // INSERT OR IGNORE INTO user (first_name, age) VALUES (?, ?) + sql"INSERT OR IGNORE INTO $u ${u.insertCols} VALUES ($creator)".update.run() ``` It's important that `val Table = TableInfo[X, Y, Z]` is not explicitly typed, otherwise its structural typing will be destroyed. @@ -562,6 +559,16 @@ case class Address( zipCode: String, country: String ) derives DbCodec + +def companyInfo(companyName: String)(using DbCon): Vector[(Company, Address)] = + val c = TableInfo[Company, Company, String].alias("c") + val a = TableInfo[Address, Address, Long].alias("a") + sql""" + SELECT ${c.all}, ${a.all}" + FROM $c + JOIN $a ON ${a.id} = ${c.addressId} + WHERE ${c.name} = $companyName + """.query[(Company, Address)].run() ``` #### UUID DbCodec doesn't work for my database @@ -580,7 +587,5 @@ case class Person(@Id id: Long, name: String, tracking_id: Option[UUID]) derives ``` ## Todo -* JSON / XML support * Support MSSql * Cats Effect & ZIO modules -* Explicit Nulls support From 3ae47e1cc1dfe1c3dad66978472af29db640a77a Mon Sep 17 00:00:00 2001 From: augustnagro Date: Tue, 14 Jan 2025 22:09:27 -0800 Subject: [PATCH 3/3] update readme after merging utility improvements --- README.md | 70 ++++++++++++++++++++++++------------------------------- 1 file changed, 31 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index 6af713b..a4ffd16 100644 --- a/README.md +++ b/README.md @@ -54,36 +54,33 @@ https://javadoc.io/doc/com.augustnagro/magnum_3 ### `connect` creates a database connection. -`connect` takes two parameters; the database Transactor, -and a context function with a given `DbCon` connection. -For example: - ```scala -import com.augustnagro.magnum.* +import com.augustnagro.magnum.common.* -val dataSource: javax.sql.DataSource = ??? -val xa = Transactor(dataSource) +val xa = Transactor(dataSource: javax.sql.DataSource) -val users: Vector[User] = connect(xa): +val users: Vector[User] = xa.connect: sql"SELECT * FROM user".query[User].run() ``` +The `connect` method accepts a context function of type `DbCon ?=> A` (essentially `implicit DbCon => A` in Scala 2). + ### `transact` creates a database transaction. -Like `connect`, `transact` accepts a Transactor and context function. -The context function provides a `DbTx` instance. +Like `connect`, `transact` accepts a context function. +The context function provides a `DbTx` capability. If the function throws, the transaction will be rolled back. ```scala // update is rolled back -transact(xa): +xa.transact: sql"UPDATE user SET first_name = $firstName WHERE id = $id".update.run() thisMethodThrows() ``` ### Type-safe Transaction & Connection Management -Annotate transactional methods with `using DbTx`, and connections with `using DbCon`. +Annotate transactional methods with `using DbTx`, and ones that require connections with `using DbCon`. Since `DbTx <: DbCon`, it's impossible to call a method with the wrong context. @@ -117,7 +114,7 @@ val xa = Transactor( con.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ) ) -transact(xa): +xa.transact: sql"SELECT id from myUser".query[Long].run() ``` @@ -164,7 +161,7 @@ val updateReturning: Returning = All are executed via `run()(using DbCon)`: ```scala -transact(xa): +xa.transact: val tuples: Vector[(Long, String)] = query.run() val updatedRows: Int = update.run() val updatedIds: Vector[Long] = updateReturning.run() @@ -175,7 +172,7 @@ transact(xa): Batch updates are supported via `batchUpdate` method in package `com.augustnagro.magnum`. ```scala -connect(xa): +xa.connect: val users: Iterable[User] = ??? val updateResult: BatchUpdateResult = batchUpdate(users): user => @@ -210,7 +207,7 @@ case class User( val userRepo = ImmutableRepo[User, Long] -transact(xa): +xa.transact: val cnt = userRepo.count val userOpt = userRepo.findById(2L) ``` @@ -219,7 +216,7 @@ Importantly, class User is annotated with `@Table`, which defines the table's da The optional `@Id` annotation denotes the table's primary key. Not setting `@Id` will default to using the first field. If there is no logical id, then remove the annotation and use Null in the ID type parameter of Repositories (see next). -It is a best practice to extend ImmutableRepo to encapsulate your SQL in repositories. This way, it's easier to maintain since they're grouped together. +You can choose to use composition or inheritance to encapsulate your SQL in repositories (Scala 3 [exports clauses](https://docs.scala-lang.org/scala3/reference/other-new-features/export.html) are a hidden gem): ```scala object UserRepo extends ImmutableRepo[User, Long]: @@ -229,19 +226,19 @@ object UserRepo extends ImmutableRepo[User, Long]: FROM user WHERE last_name = $lastName """.query[String].run() - - // other User-related queries here -``` + +// alternatively +object UserSql: + private val userRepo = ImmutableRepo[User, Long] -If you don't want to expose all the ImmutableRepo methods, use a Scala 3 [exports clause](https://docs.scala-lang.org/scala3/reference/other-new-features/export.html): + export userRepo.{findById, count} -```scala -object UserRepo: - private val repo = ImmutableRepo[User, Long] - - export repo.{count, existsById} - - // other User-related queries here + def firstNamesForLast(lastName: String)(using DbCon): Vector[String] = + sql""" + SELECT DISTINCT first_name + FROM user + WHERE last_name = $lastName + """.query[String].run() ``` ### Repositories @@ -275,17 +272,11 @@ case class User( val userRepo = Repo[User, User, Long] -val countAfterUpdate = transact(xa): +val countAfterUpdate = xa.transact: userRepo.deleteById(2L) userRepo.count ``` -It is a best practice to encapsulate your SQL in repositories. - -```scala -object UserRepo extends Repo[User, User, Long] -``` - Also note that Repo extends ImmutableRepo. Some databases cannot support every method, and will throw UnsupportedOperationException. ### Database generated columns @@ -312,7 +303,7 @@ case class User( val userRepo = Repo[UserCreator, User, Long] -val newUser: User = transact(xa): +val newUser: User = xa.transact: userRepo.insertReturning( UserCreator(Some("Adam"), "Smith") ) @@ -378,7 +369,7 @@ object MyId: given DbCodec[MyId] = DbCodec[Long].biMap(MyId.apply, _.underlying) -transact(xa): +xa.transact: val id = MyId(123L) sql"UPDATE my_table SET x = true WHERE id = $id".update.run() ``` @@ -412,7 +403,7 @@ To help with this, Magnum offers a `TableInfo` class to enable 'future-proof' qu Here's some examples: ```scala -import com.augustnagro.magnum.* +import com.augustnagro.magnum.common.* case class UserCreator(firstName: String, age: Int) derives DbCodec @@ -531,6 +522,7 @@ The tests are written using TestContainers, which requires Docker be installed. ## Talks and Blogs * Scala Days 2023: [slides](/Magnum-Slides-to-Share.pdf), [talk](https://www.youtube.com/watch?v=iKNRS5b1zAY) +* Functional Scala 2024: https://www.youtube.com/watch?v=pkBfdHkeTtA ## Frequently Asked Questions @@ -599,7 +591,7 @@ Some databases directly support the UUID type; these include Postgres, Clickhous Other databases like MySql, Oracle, and Sqlite, however, do not natively support UUID columns. Users have to choose an alternate datatype to store the UUID: most commonly `varchar(36)` or `binary(16)`. The JDBC drivers for these databases do not support direct serialization and deserialization of `java.util.UUID`, therefore the default `DbCodec[UUID]` will not be sufficient. Instead, import the appropriate codec from `com.augustnagro.magnum.UUIDCodec`. For example, ```scala -import com.augustnagro.magnum.* +import com.augustnagro.magnum.common.* import com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec import java.util.UUID