A lightweight Go package for PostgreSQL query insights and performance monitoring. Inspired by PlanetScale's Insights feature.
- Query Performance Tracking: Automatically tracks query execution time, rows affected, and errors
- Query Normalization: Groups similar queries by replacing literals with placeholders
- Latency Percentiles: Calculates P50, P95, P99 latency metrics
- Multiple Export Formats: JSON, Prometheus metrics, or custom callbacks
- Pluggable Adapters: Works with pgx, sqlx, and GORM
- HTTP Handler: Built-in HTTP endpoints for exposing metrics
- Minimal Overhead: Designed for production use with configurable sampling
go get github.com/thealish/pginsightspackage main
import (
"context"
"fmt"
"os"
"github.com/jackc/pgx/v5"
"github.com/thealish/pginsights"
"github.com/thealish/pginsights/adapters/pgxadapter"
)
func main() {
collector := pginsights.NewCollector(pginsights.DefaultConfig())
config, _ := pgx.ParseConfig(os.Getenv("DATABASE_URL"))
config.Tracer = pgxadapter.New(collector)
conn, _ := pgx.ConnectConfig(context.Background(), config)
defer conn.Close(context.Background())
// Your queries are now tracked
conn.Exec(context.Background(), "SELECT * FROM users WHERE id = $1", 1)
// Get insights
snapshot := collector.Snapshot()
fmt.Printf("Total queries: %d\n", snapshot.TotalQueries)
fmt.Printf("Avg duration: %v\n", snapshot.AvgDuration)
}package main
import (
"context"
"fmt"
_ "github.com/lib/pq"
"github.com/thealish/pginsights"
"github.com/thealish/pginsights/adapters/sqlxadapter"
)
func main() {
collector := pginsights.NewCollector(pginsights.DefaultConfig())
db, _ := sqlxadapter.Open("postgres", "postgres://localhost/mydb", collector)
defer db.Close()
// Your queries are now tracked
db.ExecContext(context.Background(), "INSERT INTO users (name) VALUES ($1)", "Alice")
// Get insights
for _, q := range collector.TopQueries(10, pginsights.SortByTotalDuration) {
fmt.Printf("Query: %s, Calls: %d, Avg: %v\n", q.NormalizedQuery, q.TotalCalls, q.AvgDuration)
}
}package main
import (
"fmt"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"github.com/thealish/pginsights"
"github.com/thealish/pginsights/adapters/gormadapter"
)
func main() {
collector := pginsights.NewCollector(pginsights.DefaultConfig())
db, _ := gorm.Open(postgres.Open("postgres://localhost/mydb"), &gorm.Config{})
db.Use(gormadapter.New(collector))
// Your queries are now tracked
var users []User
db.Find(&users)
// Get insights
snapshot := collector.Snapshot()
fmt.Printf("Queries per second: %.2f\n", snapshot.QueriesPerSec)
}Expose metrics via HTTP:
package main
import (
"net/http"
"github.com/thealish/pginsights"
)
func main() {
collector := pginsights.NewCollector(pginsights.DefaultConfig())
// ... setup your database with an adapter ...
http.Handle("/insights/", pginsights.NewHandler(collector))
http.ListenAndServe(":8080", nil)
}Available endpoints:
GET /insights/- Full snapshot with all metricsGET /insights/top?n=10&sort=avg_duration- Top N queries (sort:avg_duration,calls,errors,rows)GET /insights/metrics- Prometheus-compatible metrics
snapshot := collector.Snapshot()
exporter := pginsights.NewJSONExporter(os.Stdout, true)
exporter.Export(snapshot)exporter := pginsights.NewFileExporter("/tmp/insights.json", true)
exporter.Export(collector.Snapshot())exporter := pginsights.NewPrometheusExporter(os.Stdout, "myapp")
exporter.Export(collector.Snapshot())exporter := pginsights.NewFileExporter("/tmp/insights.json", true)
periodic := pginsights.NewPeriodicExporter(collector, exporter, 5*time.Minute)
periodic.Start()
defer periodic.Stop()config := pginsights.Config{
MaxSamples: 1000, // Max samples for percentile calculation
RetentionPeriod: 24 * time.Hour, // Data retention period
EnableNormalize: true, // Normalize queries for grouping
SampleRate: 1.0, // Sample rate (1.0 = 100%)
}
collector := pginsights.NewCollector(config)Each query provides the following metrics:
| Metric | Description |
|---|---|
TotalCalls |
Number of times the query was executed |
TotalDuration |
Cumulative execution time |
AvgDuration |
Average execution time |
MinDuration |
Minimum execution time |
MaxDuration |
Maximum execution time |
P50Duration |
50th percentile latency |
P95Duration |
95th percentile latency |
P99Duration |
99th percentile latency |
TotalRowsRead |
Total rows affected/read |
AvgRowsRead |
Average rows per query |
ErrorCount |
Number of errors |
ErrorRate |
Error rate (0-1) |
Queries are normalized by replacing literals with placeholders:
-- Original
SELECT * FROM users WHERE id = 123 AND name = 'Alice'
-- Normalized
SELECT * FROM USERS WHERE ID = ? AND NAME = '?'This allows grouping of similar queries for aggregated statistics.
MIT License