Skip to content

Commit

Permalink
Merge branch 'master' into patch-1
Browse files Browse the repository at this point in the history
  • Loading branch information
NikolayS authored Nov 6, 2023
2 parents 8b3c160 + 4f85773 commit a65a996
Show file tree
Hide file tree
Showing 19 changed files with 762 additions and 268 deletions.
10 changes: 5 additions & 5 deletions .github/workflows/default.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@ on:
jobs:
test:
runs-on: ubuntu-latest
container: golang:1.16
container: golang:1.19
services:
postgres:
image: postgres:13-alpine
image: postgres:15-alpine
env:
POSTGRES_DB: noisia_fixtures
POSTGRES_USER: noisia
Expand All @@ -27,9 +27,9 @@ jobs:
steps:
- name: Set up golangci-lint
run: |
wget -q https://github.com/golangci/golangci-lint/releases/download/v1.33.0/golangci-lint-1.33.0-linux-amd64.tar.gz
tar xvzf golangci-lint-1.33.0-linux-amd64.tar.gz
mv golangci-lint-1.33.0-linux-amd64/golangci-lint /usr/local/bin
wget -q https://github.com/golangci/golangci-lint/releases/download/v1.50.0/golangci-lint-1.50.0-linux-amd64.tar.gz
tar xvzf golangci-lint-1.50.0-linux-amd64.tar.gz
mv golangci-lint-1.50.0-linux-amd64/golangci-lint /usr/local/bin
- name: Checkout code
uses: actions/checkout@v2
- name: Run lint
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ on:
jobs:
test:
runs-on: ubuntu-latest
container: golang:1.16
container: golang:1.19
services:
postgres:
image: postgres:13-alpine
image: postgres:15-alpine
env:
POSTGRES_DB: noisia_fixtures
POSTGRES_USER: noisia
Expand All @@ -25,9 +25,9 @@ jobs:
steps:
- name: Set up golangci-lint
run: |
wget -q https://github.com/golangci/golangci-lint/releases/download/v1.33.0/golangci-lint-1.33.0-linux-amd64.tar.gz
tar xvzf golangci-lint-1.33.0-linux-amd64.tar.gz
mv golangci-lint-1.33.0-linux-amd64/golangci-lint /usr/local/bin
wget -q https://github.com/golangci/golangci-lint/releases/download/v1.50.0/golangci-lint-1.50.0-linux-amd64.tar.gz
tar xvzf golangci-lint-1.50.0-linux-amd64.tar.gz
mv golangci-lint-1.50.0-linux-amd64/golangci-lint /usr/local/bin
- name: Checkout code
uses: actions/checkout@v2
- name: Run lint
Expand Down
67 changes: 42 additions & 25 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,12 @@
#### Supported workloads:
- `idle transactions` - active transactions on hot-write tables that do nothing during their lifetime.
- `rollbacks` - fake invalid queries that generate errors and increase rollbacks counter.
- `waiting transactions` - transactions that lock hot-write tables and then idle, that lead to stuck other transactions.
- `deadlocks` - simultaneous transactions where each hold locks that the other transactions want.
- `waiting transactions` - transactions that lock hot-write tables and then idle, leading to other transactions getting stuck
- `deadlocks` - simultaneous transactions where each holds locks that the other transactions want.
- `temporary files` - queries that produce on-disk temporary files due to lack of `work_mem`.
- `terminate backends` - terminate random backends (or queries) using `pg_terminate_backend()`, `pg_cancel_backend()`.
- `failed connections` - exhaust all available connections (other clients unable to connect to Postgres)
- `failed connections` - exhaust all available connections (other clients unable to connect to Postgres).
- `fork connections` - execute single, short query in a dedicated connection (lead to excessive forking of Postgres backends).
- ...see built-in help for more runtime options.

#### Disclaimer
Expand All @@ -32,43 +33,59 @@ docker run --rm -ti lesovsky/noisia:latest noisia --help

#### Using in your own code
You can import `noisia` and use necessary workloads in your code. Always use contexts to avoid infinite run. See tiny example below:

```go
package main

import (
"context"
"fmt"
"github.com/lesovsky/noisia/waitxacts"
"time"
"context"
"fmt"
"github.com/lesovsky/noisia/waitxacts"
"github.com/rs/zerolog"
"log"
"os"
"time"
)

func main() {
config := &waitxacts.Config{
PostgresConninfo: "host=127.0.0.1",
Jobs: 2,
}

ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
defer cancel()

w := waitxacts.NewWorkload(config)
if err := w.Run(ctx); err != nil {
fmt.Println(err)
}
config := waitxacts.Config{
Conninfo: "host=127.0.0.1",
Jobs: 2,
LocktimeMin: 5*time.Second,
LocktimeMax: 20*time.Second,
}

logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339}).Level(zerolog.InfoLevel).With().Timestamp().Logger()

ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
defer cancel()

w, err := waitxacts.NewWorkload(config, logger)
if err != nil {
log.Panicln(err)
}

err = w.Run(ctx)
if err != nil {
fmt.Println(err)
}
}
```

#### Workload impact

Running workloads could impact on already running workload produced by other applications. This impact might be expressed as performance degradation, transactions stuck, canceled queries, disconnected clients, etc.
Running workloads could impact already running workloads produced by other applications. This impact might be expressed as performance degradation, transactions getting stuck, cancelled queries, disconnected clients, etc.

| Workload | Impact? |
| :--- | :---: |
| deadlocks | No |
| failconns | **Yes** |
| idlexacts | **Yes** |
| failconns | **Yes**: exhaust `max_connections` limit; this leads to other clients are unable to connect to Postgres |
| forkconns | **Yes**: excessive creation of Postgres child processes; potentially might lead to `max_connections` exhaustion |
| idlexacts | **Yes**: might lead to tables and indexes bloat |
| rollbacks | No |
| tempfiles | No |
| terminate | **Yes** |
| waitxacts | **Yes** |
| tempfiles | **Yes**: might increase storage utilization and degrade storage performance |
| terminate | **Yes**: already established database connections could be terminated accidentally |
| waitxacts | **Yes**: locks heavy-write tables; this leads to blocking concurrently executed queries |

#### Contribution
- PR's are welcome.
Expand Down
48 changes: 37 additions & 11 deletions cmd/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"github.com/lesovsky/noisia/deadlocks"
"github.com/lesovsky/noisia/failconns"
"github.com/lesovsky/noisia/forkconns"
"github.com/lesovsky/noisia/idlexacts"
"github.com/lesovsky/noisia/log"
"github.com/lesovsky/noisia/rollbacks"
Expand All @@ -23,16 +24,14 @@ type config struct {
idleXactsNaptimeMin time.Duration
idleXactsNaptimeMax time.Duration
rollbacks bool
rollbacksMinRate uint16
rollbacksMaxRate uint16
rollbacksRate float64
waitXacts bool
waitXactsFixture bool
waitXactsLocktimeMin time.Duration
waitXactsLocktimeMax time.Duration
deadlocks bool
tempFiles bool
tempFilesRate uint16
tempFilesScaleFactor uint16
tempFilesRate float64
terminate bool
terminateInterval time.Duration
terminateRate uint16
Expand All @@ -43,6 +42,8 @@ type config struct {
terminateDatabase string
terminateAppName string
failconns bool
forkconns bool
forkconnsRate uint16
}

func runApplication(ctx context.Context, c config, log log.Logger) error {
Expand All @@ -64,7 +65,7 @@ func runApplication(ctx context.Context, c config, log log.Logger) error {
}

if c.rollbacks {
log.Info("start rollbacks workload")
log.Infof("start rollbacks workload for %s", c.duration)
wg.Add(1)
go func() {
err := startRollbacksWorkload(ctx, c, log)
Expand Down Expand Up @@ -135,6 +136,18 @@ func runApplication(ctx context.Context, c config, log log.Logger) error {
}()
}

if c.forkconns {
log.Info("start fork connections workload")
wg.Add(1)
go func() {
err := startForkconnsWorkload(ctx, c, log)
if err != nil {
log.Errorf("fork connections workload failed: %s", err)
}
wg.Done()
}()
}

wg.Wait()

return nil
Expand Down Expand Up @@ -162,8 +175,7 @@ func startRollbacksWorkload(ctx context.Context, c config, logger log.Logger) er
rollbacks.Config{
Conninfo: c.postgresConninfo,
Jobs: c.jobs,
MinRate: c.rollbacksMinRate,
MaxRate: c.rollbacksMaxRate,
Rate: c.rollbacksRate,
}, logger,
)
if err != nil {
Expand Down Expand Up @@ -207,10 +219,9 @@ func startDeadlocksWorkload(ctx context.Context, c config, logger log.Logger) er
func startTempFilesWorkload(ctx context.Context, c config, logger log.Logger) error {
workload, err := tempfiles.NewWorkload(
tempfiles.Config{
Conninfo: c.postgresConninfo,
Jobs: c.jobs,
Rate: c.tempFilesRate,
ScaleFactor: c.tempFilesScaleFactor,
Conninfo: c.postgresConninfo,
Jobs: c.jobs,
Rate: c.tempFilesRate,
}, logger,
)
if err != nil {
Expand Down Expand Up @@ -253,3 +264,18 @@ func startFailconnsWorkload(ctx context.Context, c config, logger log.Logger) er

return workload.Run(ctx)
}

func startForkconnsWorkload(ctx context.Context, c config, logger log.Logger) error {
workload, err := forkconns.NewWorkload(
forkconns.Config{
Conninfo: c.postgresConninfo,
Rate: c.forkconnsRate,
Jobs: c.jobs,
}, logger,
)
if err != nil {
return err
}

return workload.Run(ctx)
}
16 changes: 8 additions & 8 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,14 @@ func main() {
idleXactsNaptimeMin = kingpin.Flag("idle-xacts.naptime-min", "Min transactions naptime").Default("5s").Envar("NOISIA_IDLE_XACTS_NAPTIME_MIN").Duration()
idleXactsNaptimeMax = kingpin.Flag("idle-xacts.naptime-max", "Max transactions naptime").Default("20s").Envar("NOISIA_IDLE_XACTS_NAPTIME_MAX").Duration()
rollbacks = kingpin.Flag("rollbacks", "Run rollbacks workload").Default("false").Envar("NOISIA_ROLLBACKS").Bool()
rollbacksMinRate = kingpin.Flag("rollbacks.min-rate", "Approximate minimum number of rollbacks per second (per worker)").Default("10").Envar("NOISIA_ROLLBACKS_MIN_RATE").Uint16()
rollbacksMaxRate = kingpin.Flag("rollbacks.max-rate", "Approximate maximum number of rollbacks per second (per worker)").Default("10").Envar("NOISIA_ROLLBACKS_MAX_RATE").Uint16()
rollbacksRate = kingpin.Flag("rollbacks.rate", "Rollbacks rate per second (per worker)").Default("1").Envar("NOISIA_ROLLBACKS_RATE").Float64()
waitXacts = kingpin.Flag("wait-xacts", "Run waiting transactions workload").Default("false").Envar("NOISIA_IDLE_XACTS").Bool()
waitXactsFixture = kingpin.Flag("wait-xacts.fixture", "Run workload using fixture table").Default("false").Envar("NOISIA_WAIT_XACTS_FIXTURE").Bool()
waitXactsLocktimeMin = kingpin.Flag("wait-xacts.locktime-min", "Min transactions locking time").Default("5s").Envar("NOISIA_WAIT_XACTS_LOCKTIME_MIN").Duration()
waitXactsLocktimeMax = kingpin.Flag("wait-xacts.locktime-max", "Max transactions locking time").Default("20s").Envar("NOISIA_WAIT_XACTS_LOCKTIME_MAX").Duration()
deadlocks = kingpin.Flag("deadlocks", "Run deadlocks workload").Default("false").Envar("NOISIA_DEADLOCKS").Bool()
tempFiles = kingpin.Flag("temp-files", "Run temporary files workload").Default("false").Envar("NOISIA_TEMP_FILES").Bool()
tempFilesRate = kingpin.Flag("temp-files.rate", "Number of queries per second (per worker)").Default("10").Envar("NOISIA_TEMP_FILES_RATE").Uint16()
tempFilesScaleFactor = kingpin.Flag("temp-files.scale-factor", "Test data multiplier, 1 = 1000 rows").Default("10").Envar("NOISIA_TEMP_FILES_SCALE_FACTOR").Uint16()
tempFiles = kingpin.Flag("tempfiles", "Run temporary files workload").Default("false").Envar("NOISIA_TEMP_FILES").Bool()
tempFilesRate = kingpin.Flag("tempfiles.rate", "Number of queries per second (per worker)").Default("1").Envar("NOISIA_TEMP_FILES_RATE").Float64()
terminate = kingpin.Flag("terminate", "Run terminate workload").Default("false").Envar("NOISIA_TERMINATE").Bool()
terminateRate = kingpin.Flag("terminate.rate", "Number of backends/queries terminate per interval").Default("1").Envar("NOISIA_TERMINATE_RATE").Uint16()
terminateInterval = kingpin.Flag("terminate.interval", "Time interval of single round of termination").Default("1s").Envar("NOISIA_TERMINATE_INTERVAL").Duration()
Expand All @@ -46,6 +44,8 @@ func main() {
terminateDatabase = kingpin.Flag("terminate.database", "Terminate backends connected to specific database").Default("").Envar("NOISIA_TERMINATE_DATABASE").String()
terminateAppName = kingpin.Flag("terminate.appname", "Terminate backends created from specific applications").Default("").Envar("NOISIA_TERMINATE_APPNAME").String()
failconns = kingpin.Flag("failconns", "Run connections exhaustion workload").Default("false").Envar("NOISIA_FAILCONNS").Bool()
forkconns = kingpin.Flag("forkconns", "Run queries in dedicated connections").Default("false").Envar("NOISIA_FORKCONNS").Bool()
forkconnsRate = kingpin.Flag("forkconns.rate", "Number of connections made per second").Default("1").Envar("NOISIA_FORKCONNS_RATE").Uint16()
)
kingpin.Parse()

Expand All @@ -65,16 +65,14 @@ func main() {
idleXactsNaptimeMin: *idleXactsNaptimeMin,
idleXactsNaptimeMax: *idleXactsNaptimeMax,
rollbacks: *rollbacks,
rollbacksMinRate: *rollbacksMinRate,
rollbacksMaxRate: *rollbacksMaxRate,
rollbacksRate: *rollbacksRate,
waitXacts: *waitXacts,
waitXactsFixture: *waitXactsFixture,
waitXactsLocktimeMin: *waitXactsLocktimeMin,
waitXactsLocktimeMax: *waitXactsLocktimeMax,
deadlocks: *deadlocks,
tempFiles: *tempFiles,
tempFilesRate: *tempFilesRate,
tempFilesScaleFactor: *tempFilesScaleFactor,
terminate: *terminate,
terminateRate: *terminateRate,
terminateInterval: *terminateInterval,
Expand All @@ -85,6 +83,8 @@ func main() {
terminateDatabase: *terminateDatabase,
terminateAppName: *terminateAppName,
failconns: *failconns,
forkconns: *forkconns,
forkconnsRate: *forkconnsRate,
}

ctx, cancel := context.WithCancel(context.Background())
Expand Down
26 changes: 25 additions & 1 deletion deadlocks/deadlocks.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,22 @@
// Copyright 2021 The Noisia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package deadlocks defines implementation of workload which creates deadlocks
// between several concurrent transactions, which finally leads to Postgres have
// to resolve the deadlock and terminate an one participant of the deadlock.
//
// Before starting the workload, some prepare steps have to be made - a special
// working table should be created. When the workload is finished this table should
// be dropped. For more info see prepare and cleanup methods.
// When working table is created, the workload is allowed to start. The number of
// necessary workers could be started (accordingly to Config.Jobs). Each worker calls
// a deadlock routine in a separate goroutine. Deadlock routine inserts to unique rows
// into the working table and than starts two transactions which tries to make a
// cross-update of these rows. Obviously, this update fails with a deadlock, which
// forces Postgres to resolve it. Postgres resolves the deadlock by terminating a
// single participant of the deadlock. As a result the second survived transaction
// can continue its work and return.
package deadlocks

import (
Expand All @@ -13,7 +32,7 @@ import (

// Config defines configuration settings for deadlocks workload.
type Config struct {
// Conninfo defines connections string used for connecting to Postgres.
// Conninfo defines connection string used for connecting to Postgres.
Conninfo string
// Jobs defines how many workers should be created for producing deadlocks.
Jobs uint16
Expand Down Expand Up @@ -89,6 +108,7 @@ func (w *workload) Run(ctx context.Context) error {
}
}

// prepare method creates working table required for deadlocks workload.
func (w *workload) prepare(ctx context.Context) error {
_, _, err := w.pool.Exec(ctx, "CREATE TABLE IF NOT EXISTS _noisia_deadlocks_workload (id bigint, payload text)")
if err != nil {
Expand All @@ -97,6 +117,7 @@ func (w *workload) prepare(ctx context.Context) error {
return nil
}

// cleanup method drops working table after workload has been done.
func (w *workload) cleanup() error {
_, _, err := w.pool.Exec(context.Background(), "DROP TABLE IF EXISTS _noisia_deadlocks_workload")
if err != nil {
Expand All @@ -105,6 +126,8 @@ func (w *workload) cleanup() error {
return nil
}

// executeDeadlock make two database connections, inserts necessary rows to the working table
// and executes transactions which update the rows and collides in a deadlock.
func executeDeadlock(ctx context.Context, log log.Logger, conninfo string) error {
conn1, err := db.Connect(ctx, conninfo)
if err != nil {
Expand Down Expand Up @@ -156,6 +179,7 @@ func executeDeadlock(ctx context.Context, log log.Logger, conninfo string) error
return nil
}

// runUpdateXact receives rows IDs and tries to update these rows inside the transaction.
func runUpdateXact(ctx context.Context, conn db.Conn, id1 int, id2 int) error {
tx, err := conn.Begin(ctx)
if err != nil {
Expand Down
Loading

0 comments on commit a65a996

Please sign in to comment.