title | date | categories | tags | ||
---|---|---|---|---|---|
Advanced Topics |
2021-12-03 |
|
|
DBResolver adds muliple databases support to GORM, the following features are supported
-
Multiple sources, replicas
-
Read/Write Splitting
-
Automatic connection switching based on the working table/struct
-
Manual connection switching
-
Sources/Replicas load balancing
-
Works for RAW SQL
import (
"gorm.io/gorm"
"gorm.io/plugin/dbresolver"
"gorm.io/driver/mysql"
)
db, err := gorm.Open(mysql.Open("db1_dsn"), &gorm.Config{})
db.Use(dbresolver.Register(dbresolver.Config{
// use `db2` as sources, `db3`, `db4` as replicas
Sources: []gorm.Dialector{mysql.Open("db2_dsn")},
Replicas: []gorm.Dialector{mysql.Open("db3_dsn"), mysql.Open("db4_dsn")},
// sources/replicas load balancing policy
Policy: dbresolver.RandomPolicy{},
}).Register(dbresolver.Config{
// use `db1` as sources (DB's default connection), `db5` as replicas for `User`, `Address`
Replicas: []gorm.Dialector{mysql.Open("db5_dsn")},
}, &User{}, &Address{}).Register(dbresolver.Config{
// use `db6`, `db7` as sources, `db8` as replicas for `orders`, `Product`
Sources: []gorm.Dialector{mysql.Open("db6_dsn"), mysql.Open("db7_dsn")},
Replicas: []gorm.Dialector{mysql.Open("db8_dsn")},
}, "orders", &Product{}, "secondary"))
when using transaction, DBResolver will use the transaction and won’t switch to sources/replicas
DBResolver will automatically switch connection based on the working table/struct
For RAW SQL, DBResolver will extract the table name from the SQL to match the resolver, and will use sources
unless the SQL begins with SELECT
(excepts SELECT... FOR UPDATE
), for example:
// `User` Resolver Examples
db.Table("users").Rows() // replicas `db5`
db.Model(&User{}).Find(&AdvancedUser{}) // replicas `db5`
db.Exec("update users set name = ?", "jinzhu") // sources `db1`
db.Raw("select name from users").Row().Scan(&name) // replicas `db5`
db.Create(&user) // sources `db1`
db.Delete(&User{}, "name = ?", "jinzhu") // sources `db1`
db.Table("users").Update("name", "jinzhu") // sources `db1`
// Global Resolver Examples
db.Find(&Pet{}) // replicas `db3`/`db4`
db.Save(&Pet{}) // sources `db2`
// Orders Resolver Examples
db.Find(&Order{}) // replicas `db8`
db.Table("orders").Find(&Report{}) // replicas `db8`
Read/Write splitting with DBResolver based on the current used GORM callbacks.
For Query
, Row
callback, will use replicas
unless Write
mode specified
For Raw
callback, statements are considered read-only and will use replicas
if the SQL starts with SELECT
// Use Write Mode: read user from sources `db1`
db.Clauses(dbresolver.Write).First(&user)
// Specify Resolver: read user from `secondary`'s replicas: db8
db.Clauses(dbresolver.Use("secondary")).First(&user)
// Specify Resolver and Write Mode: read user from `secondary`'s sources: db6 or db7
db.Clauses(dbresolver.Use("secondary"), dbresolver.Write).First(&user)
GORM supports load balancing sources/relicas based on policy, the policy should be a struct implements following interface:
type Policy interface {
Resolve([]gorm.ConnPool) gorm.ConnPool
}
Currently only the RandomPolicy
implemented and it is the default option if no other policy specified.
db.Use(
dbresolver.Register(dbresolver.Config{ /* xxx */ }).
SetConnMaxIdleTime(time.Hour).
SetConnMaxLifetime(24 * time.Hour).
SetMaxIdleConns(100)
SetMaxOpenConns(200)
)
GORM provides Prometheus plugin to collect DBStats or user-defined metrics
https://github.com/go-gorm/prometheus
import (
"gorm.io/gorm"
"gorm.io/driver/sqlite"
"gorm.io/plugin/prometheus"
)
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{})
db.Use(prometheus.New(prometheus.Config{
DBName: "db1", // use `DBName` as metrics label
RefreshInterval: 15, // Refresh metrics interval (default 15 seconds)
PushAddr: "prometheus pusher address", // push metrics if `PushAddr` configured
StartServer: true, // start http server to expose metrics
HTTPServerPort: 8080, // configure http server port, default port 8080 (if you have configured multiple instances, only the first `HTTPServerPort` will be used to start server)
MetricsCollector: []prometheus.MetricsCollector {
&prometheus.MySQL{
VariableNames: []string{"Threads_running"},
},
}, // user defined metrics
}))
You can define your metrics and collect them with GORM Prometheus plugin, which needs to implements MetricsCollector
interface
type MetricsCollector interface {
Metrics(*Prometheus) []prometheus.Collector
}
GORM provides an example for how to collect MySQL Status as metrics, check it out prometheus.MySQL
&prometheus.MySQL{
Prefix: "gorm_status_",
// Metrics name prefix, default is `gorm_status_`
// For example, Threads_running's metric name is `gorm_status_Threads_running`
Interval: 100,
// Fetch interval, default use Prometheus's RefreshInterval
VariableNames: []string{"Threads_running"},
// Select variables from SHOW STATUS, if not set, uses all status variables
}
GORM provides optimizer/index/comment hints support
https://github.com/go-gorm/hints
import "gorm.io/hints"
db.Clauses(hints.New("hint")).Find(&User{})
// SELECT * /*+ hint */ FROM `users`
import "gorm.io/hints"
db.Clauses(hints.UseIndex("idx_user_name")).Find(&User{})
// SELECT * FROM `users` USE INDEX (`idx_user_name`)
db.Clauses(hints.ForceIndex("idx_user_name", "idx_user_id").ForJoin()).Find(&User{})
// SELECT * FROM `users` FORCE INDEX FOR JOIN (`idx_user_name`,`idx_user_id`)"
db.Clauses(
hints.ForceIndex("idx_user_name", "idx_user_id").ForOrderBy(),
hints.IgnoreIndex("idx_user_name").ForGroupBy(),
).Find(&User{})
// SELECT * FROM `users` FORCE INDEX FOR ORDER BY (`idx_user_name`,`idx_user_id`) IGNORE INDEX FOR GROUP BY (`idx_user_name`)"
import "gorm.io/hints"
db.Clauses(hints.Comment("select", "master")).Find(&User{})
// SELECT /*master*/ * FROM `users`;
db.Clauses(hints.CommentBefore("insert", "node2")).Create(&user)
// /*node2*/ INSERT INTO `users` ...;
db.Clauses(hints.CommentAfter("select", "node2")).Create(&user)
// /*node2*/ INSERT INTO `users` ...;
db.Clauses(hints.CommentAfter("where", "hint")).Find(&User{}, "id = ?", 1)
// SELECT * FROM `users` WHERE id = ? /* hint */
GORM allows create databases index with tag index
,uniqueIndex
, those indexes will be created when AutoMigrate or CreateTable with GORM
GORM accepts lots of index settings, like class
, type
, where
, comment
, expression
, sort
, collate
, option
Check the following example for how to use it
type User struct {
Name string `gorm:"index"`
Name2 string `gorm:"index:idx_name,unique"`
Name3 string `gorm:"index:,sort:desc,collate:utf8,type:btree,length:10,where:name3 != 'jinzhu'"`
Name4 string `gorm:"uniqueIndex"`
Age int64 `gorm:"index:,class:FULLTEXT,comment:hello \\, world,where:age > 10"`
Age2 int64 `gorm:"index:,expression:ABS(age)"`
}
// MySQL option
type User struct {
Name string `gorm:"index:,class:FULLTEXT,option:WITH PARSER ngram INVISIBLE"`
}
// PostgreSQL option
type User struct {
Name string `gorm:"index:,option:CONCURRENTLY"`
}
tag uniqueIndex
works similar like index
, it equals to index:,unique
type User struct {
Name1 string `gorm:"uniqueIndex"`
Name2 string `gorm:"uniqueIndex:idx_name,sort:desc"`
}
Use same index name for two fields will creates composite indexes, for example:
type User struct {
Name string `gorm:"index:idx_member"`
Number string `gorm:"index:idx_member"`
}
The column order of a composite index has an impact on its performance so it must be chosen carefully
You can specify the order with the priority
option, the default priority value is 10
, if priority value is the same, the order will be based on model struct’s field index
type User struct {
Name string `gorm:"index:idx_member"`
Number string `gorm:"index:idx_member"`
}
// column order: name, number
type User struct {
Name string `gorm:"index:idx_member,priority:2"`
Number string `gorm:"index:idx_member,priority:1"`
}
// column order: number, name
type User struct {
Name string `gorm:"index:idx_member,priority:12"`
Number string `gorm:"index:idx_member"`
}
// column order: number, name
A field accepts multiple index
, uniqueIndex
tags that will create multiple indexes on a field
type UserIndex struct {
OID int64 `gorm:"index:idx_id;index:idx_oid,unique"`
MemberNumber string `gorm:"index:idx_id"`
}
GORM allows create database constraints with tag, constraints will be created when AutoMigrate or CreateTable with GORM
Create CHECK constraints with tag check
type UserIndex struct {
Name string `gorm:"check:name_checker,name <> 'jinzhu'"`
Name2 string `gorm:"check:name <> 'jinzhu'"`
Name3 string `gorm:"check:,name <> 'jinzhu'"`
}
Checkout Database Indexes
GORM will creates foreign keys constraints for associations, you can disable this feature during initialization:
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{
DisableForeignKeyConstraintWhenMigrating: true,
})
GORM allows you setup FOREIGN KEY constraints’s OnDelete
, OnUpdate
option with tag constraint
, for example:
type User struct {
gorm.Model
CompanyID int
Company Company `gorm:"constraint:OnUpdate:CASCADE,OnDelete:SET NULL;"`
CreditCard CreditCard `gorm:"constraint:OnUpdate:CASCADE,OnDelete:SET NULL;"`
}
type CreditCard struct {
gorm.Model
Number string
UserID uint
}
type Company struct {
ID int
Name string
}
Set multiple fields as primary key creates composite primary key, for example:
type Product struct {
ID string `gorm:"primaryKey"`
LanguageCode string `gorm:"primaryKey"`
Code string
Name string
}
Note
integer PrioritizedPrimaryField
enables AutoIncrement
by default, to disable it, you need to turn off autoIncrement
for the int fields:
type Product struct {
CategoryID uint64 `gorm:"primaryKey;autoIncrement:false"`
TypeID uint64 `gorm:"primaryKey;autoIncrement:false"`
}
GORM uses the database/sql
‘s argument placeholders to construct the SQL statement, which will automatically escape arguments to avoid SQL injection
NOTE
The SQL from Logger is not fully escaped like the one executed, be careful when copying and executing it in SQL console
User’s input should be only used as an argument, for example:
userInput := "jinzhu;drop table users;"
// safe, will be escaped
db.Where("name = ?", userInput).First(&user)
// SQL injection
db.Where(fmt.Sprintf("name = %v", userInput)).First(&user)
// will be escaped
db.First(&user, "name = ?", userInput)
// SQL injection
db..First(&user, fmt.Sprintf("name = %v", userInput))
To support some features, some inputs are not escaped, be careful when using user’s input with those methods
db.Select("name; drop table users;").First(&user)
db.Distinct("name; drop table users;").First(&user)
db.Model(&user).Pluck("name; drop table users;", &names)
db.Group("name; drop table users;").First(&user)
db.Group("name").Having("1 = 1;drop table users;").First(&user)
db.Raw("select name from users; drop table users;").First(&user)
db.Exec("select name from users; drop table users;")
db.Order("name; drop table users;").First(&user)
The general rule to avoid SQL injection is don’t trust user-submitted data, you can perform whitelist validation to test user input against an existing set of known, approved, and defined input, and when using user’s input, only use them as an argument.
GORM provides Config can be used during initialization
type Config struct {
SkipDefaultTransaction bool
NamingStrategy schema.Namer
Logger logger.Interface
NowFunc func() time.Time
DryRun bool
PrepareStmt bool
DisableNestedTransaction bool
AllowGlobalUpdate bool
DisableAutomaticPing bool
DisableForeignKeyConstraintWhenMigrating bool
}
GORM perform write (create/update/delete) operations run inside a transaction to ensure data consistency, you can disable it during initialization if it is not required
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{
SkipDefaultTransaction: true,
})
GORM allows users to change the naming conventions by overriding the default NamingStrategy
which need to implements interface Namer
type Namer interface {
TableName(table string) string
SchemaName(table string) string
ColumnName(table, column string) string
JoinTableName(table string) string
RelationshipFKName(Relationship) string
CheckerName(table, column string) string
IndexName(table, column string) string
}
The default NamingStrategy
also provides few options, like:
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{
NamingStrategy: schema.NamingStrategy{
TablePrefix: "t_", // table name prefix, table for `User` would be `t_users`
SingularTable: true, // use singular table name, table for `User` would be `user` with this option enabled
NameReplacer: strings.NewReplacer("CID", "Cid"), // use name replacer to change struct/field name before convert it to db name
},
})
Allow to change GORM’s default logger by overriding this option, refer Logger for more details
Change the function to be used when creating a new timestamp
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{
NowFunc: func() time.Time {
return time.Now().Local()
},
})
Generate SQL
without executing, can be used to prepare or test generated SQL, refer Session for details
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{
DryRun: false,
})
PreparedStmt
creates a prepared statement when executing any SQL and caches them to speed up future calls, refer Session for details
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{
PrepareStmt: false,
})
When using Transaction
method inside a db transaction, GORM will use SavePoint(savedPointName)
, RollbackTo(savedPointName)
to give you the nested transaction support, you could disable it by using the DisableNestedTransaction
option, refer Session for details
Enable global update/delete, refer Session for details
GORM automatically ping database after initialized to check database availability, disable it by setting it to true
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{
DisableAutomaticPing: true,
})
GORM creates database foreign key constraints automatically when AutoMigrate
or CreateTable
, disable this by setting it to true
, refer Migration for details
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{
DisableForeignKeyConstraintWhenMigrating: true,
})
GORM itself is powered by Callbacks
, it has callbacks for Create
, Query
, Update
, Delete
, Row
, Raw
, you could fully customize GORM with them as you want
Callbacks are registered into the global *gorm.DB
, not the session-level, if you require *gorm.DB
with different callbacks, you need to initialize another *gorm.DB
Register a callback into callbacks
func cropImage(db *gorm.DB) {
if db.Statement.Schema != nil {
// crop image fields and upload them to CDN, dummy code
for _, field := range db.Statement.Schema.Fields {
switch db.Statement.ReflectValue.Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < db.Statement.ReflectValue.Len(); i++ {
// Get value from field
if fieldValue, isZero := field.ValueOf(db.Statement.ReflectValue.Index(i)); !isZero {
if crop, ok := fieldValue.(CropInterface); ok {
crop.Crop()
}
}
}
case reflect.Struct:
// Get value from field
if fieldValue, isZero := field.ValueOf(db.Statement.ReflectValue); !isZero {
if crop, ok := fieldValue.(CropInterface); ok {
crop.Crop()
}
}
// Set value to field
err := field.Set(db.Statement.ReflectValue, "newValue")
}
}
// All fields for current model
db.Statement.Schema.Fields
// All primary key fields for current model
db.Statement.Schema.PrimaryFields
// Prioritized primary key field: field with DB name `id` or the first defined primary key
db.Statement.Schema.PrioritizedPrimaryField
// All relationships for current model
db.Statement.Schema.Relationships
// Find field with field name or db name
field := db.Statement.Schema.LookUpField("Name")
// processing
}
}
db.Callback().Create().Register("crop_image", cropImage)
// register a callback for Create process
Delete a callback from callbacks
db.Callback().Create().Remove("gorm:create")
// delete callback `gorm:create` from Create callbacks
Replace a callback having the same name with the new one
db.Callback().Create().Replace("gorm:create", newCreateFunction)
// replace callback `gorm:create` with new function `newCreateFunction` for Create process
Register callbacks with orders
// before gorm:create
db.Callback().Create().Before("gorm:create").Register("update_created_at", updateCreated)
// after gorm:create
db.Callback().Create().After("gorm:create").Register("update_created_at", updateCreated)
// after gorm:query
db.Callback().Query().After("gorm:query").Register("my_plugin:after_query", afterQuery)
// after gorm:delete
db.Callback().Delete().After("gorm:delete").Register("my_plugin:after_delete", afterDelete)
// before gorm:update
db.Callback().Update().Before("gorm:update").Register("my_plugin:before_update", beforeUpdate)
// before gorm:create and after gorm:before_create
db.Callback().Create().Before("gorm:create").After("gorm:before_create").Register("my_plugin:before_create", beforeCreate)
// before any other callbacks
db.Callback().Create().Before("*").Register("update_created_at", updateCreated)
// after any other callbacks
db.Callback().Create().After("*").Register("update_created_at", updateCreated)
GORM has defined some callbacks to power current GORM features, check them out before starting your plugins
GORM provides a Use
method to register plugins, the plugin needs to implement the Plugin
interface
type Plugin interface {
Name() string
Initialize(*gorm.DB) error
}
The Initialize
method will be invoked when registering the plugin into GORM first time, and GORM will save the registered plugins, access them like:
db.Config.Plugins[pluginName]
Checkout Prometheus as example
GORM provides official support for sqlite
, mysql
, postgres
, sqlserver
.
Some databases may be compatible with the mysql
or postgres
dialect, in which case you could just use the dialect for those databases.
For others, you can create a new driver, it needs to implement the dialect interface.
type Dialector interface {
Name() string
Initialize(*DB) error
Migrator(db *DB) Migrator
DataTypeOf(*schema.Field) string
DefaultValueOf(*schema.Field) clause.Expression
BindVarTo(writer clause.Writer, stmt *Statement, v interface{})
QuoteTo(clause.Writer, string)
Explain(sql string, vars ...interface{}) string
}
Checkout the MySQL Driver as example
- gorm
- What is the difference between
Find
andScan
, Can I replaceScan
withFind
? grom issue - What is the syntax for SELECT IN statement for SQLITE? stackoverflow
- SQLite FULL OUTER JOIN Emulation sqlitetutorial