Skip to content

Commit

Permalink
Refactor stats to use atomics (#375)
Browse files Browse the repository at this point in the history
* Refactor stats to use atomics

When we are dealing with a high number of connections, generated
stats cannot be consumed fast enough by the stats collector loop.
This makes the stats subsystem inconsistent and a log of
warning messages are thrown due to unregistered server/clients.

This change refactors the stats subsystem so it uses atomics:

- Now counters are handled using U64 atomics
- Event system is dropped and averages are calculated using a loop
  every 15 seconds.
- Now, instead of snapshots being generated ever second we keep track of servers/clients
  that have registered. Each pool/server/client has its own instance of the counter and
  makes changes directly, instead of adding an event that gets processed later.

* Manually mplement Hash/Eq in `config::Address` ignoring stats

* Add tests for client connection counters

* Allow connecting to dockerized dev pgcat from the host

* stats: Decrease cl_idle when idle socket disconnects
  • Loading branch information
magec authored Mar 28, 2023
1 parent 9a2076a commit 58ce76d
Show file tree
Hide file tree
Showing 19 changed files with 1,299 additions and 1,178 deletions.
12 changes: 12 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ exitcode = "1.1.2"
futures = "0.3"
socket2 = { version = "0.4.7", features = ["all"] }
nix = "0.26.2"
atomic_enum = "0.2.0"

[target.'cfg(not(target_env = "msvc"))'.dependencies]
jemallocator = "0.5.0"
2 changes: 2 additions & 0 deletions dev/docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ x-common-env-pg:
services:
main:
image: kubernetes/pause
ports:
- 6432

pg1:
<<: *common-definition-pg
Expand Down
121 changes: 65 additions & 56 deletions src/admin.rs
Original file line number Diff line number Diff line change
@@ -1,20 +1,19 @@
use crate::pool::BanReason;
/// Admin database.
use bytes::{Buf, BufMut, BytesMut};
use log::{error, info, trace};
use nix::sys::signal::{self, Signal};
use nix::unistd::Pid;
use std::collections::HashMap;
/// Admin database.
use std::sync::atomic::Ordering;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::time::Instant;

use crate::config::{get_config, reload_config, VERSION};
use crate::errors::Error;
use crate::messages::*;
use crate::pool::{get_all_pools, get_pool};
use crate::stats::{
get_address_stats, get_client_stats, get_pool_stats, get_server_stats, ClientState, ServerState,
};
use crate::stats::{get_client_stats, get_pool_stats, get_server_stats, ClientState, ServerState};
use crate::ClientServerMap;

pub fn generate_server_info_for_admin() -> BytesMut {
Expand Down Expand Up @@ -158,15 +157,29 @@ where
"free_clients".to_string(),
client_stats
.keys()
.filter(|client_id| client_stats.get(client_id).unwrap().state == ClientState::Idle)
.filter(|client_id| {
client_stats
.get(client_id)
.unwrap()
.state
.load(Ordering::Relaxed)
== ClientState::Idle
})
.count()
.to_string(),
]));
res.put(data_row(&vec![
"used_clients".to_string(),
client_stats
.keys()
.filter(|client_id| client_stats.get(client_id).unwrap().state == ClientState::Active)
.filter(|client_id| {
client_stats
.get(client_id)
.unwrap()
.state
.load(Ordering::Relaxed)
== ClientState::Active
})
.count()
.to_string(),
]));
Expand All @@ -178,15 +191,29 @@ where
"free_servers".to_string(),
server_stats
.keys()
.filter(|server_id| server_stats.get(server_id).unwrap().state == ServerState::Idle)
.filter(|server_id| {
server_stats
.get(server_id)
.unwrap()
.state
.load(Ordering::Relaxed)
== ServerState::Idle
})
.count()
.to_string(),
]));
res.put(data_row(&vec![
"used_servers".to_string(),
server_stats
.keys()
.filter(|server_id| server_stats.get(server_id).unwrap().state == ServerState::Active)
.filter(|server_id| {
server_stats
.get(server_id)
.unwrap()
.state
.load(Ordering::Relaxed)
== ServerState::Active
})
.count()
.to_string(),
]));
Expand Down Expand Up @@ -248,28 +275,15 @@ where

let mut res = BytesMut::new();
res.put(row_description(&columns));
for (user_pool, pool) in get_all_pools() {
let def = HashMap::default();
let pool_stats = all_pool_stats
.get(&(user_pool.db.clone(), user_pool.user.clone()))
.unwrap_or(&def);

let pool_config = &pool.settings;
for ((_user_pool, _pool), pool_stats) in all_pool_stats {
let mut row = vec![
user_pool.db.clone(),
user_pool.user.clone(),
pool_config.pool_mode.to_string(),
pool_stats.database(),
pool_stats.user(),
pool_stats.pool_mode().to_string(),
];
for column in &columns[3..columns.len()] {
let value = match column.0 {
"maxwait" => (pool_stats.get("maxwait_us").unwrap_or(&0) / 1_000_000).to_string(),
"maxwait_us" => {
(pool_stats.get("maxwait_us").unwrap_or(&0) % 1_000_000).to_string()
}
_other_values => pool_stats.get(column.0).unwrap_or(&0).to_string(),
};
row.push(value);
}
pool_stats.populate_row(&mut row);
pool_stats.clear_maxwait();
res.put(data_row(&row));
}

Expand Down Expand Up @@ -400,7 +414,7 @@ where
for (id, pool) in get_all_pools().iter() {
for address in pool.get_addresses_from_host(host) {
if !pool.is_banned(&address) {
pool.ban(&address, BanReason::AdminBan(duration_seconds), -1);
pool.ban(&address, BanReason::AdminBan(duration_seconds), None);
res.put(data_row(&vec![
id.db.clone(),
id.user.clone(),
Expand Down Expand Up @@ -617,23 +631,17 @@ where
("avg_wait_time", DataType::Numeric),
];

let all_stats = get_address_stats();
let mut res = BytesMut::new();
res.put(row_description(&columns));

for (user_pool, pool) in get_all_pools() {
for shard in 0..pool.shards() {
for server in 0..pool.servers(shard) {
let address = pool.address(shard, server);
let stats = match all_stats.get(&address.id) {
Some(stats) => stats.clone(),
None => HashMap::new(),
};

let mut row = vec![address.name(), user_pool.db.clone(), user_pool.user.clone()];
for column in &columns[3..] {
row.push(stats.get(column.0).unwrap_or(&0).to_string());
}
let stats = address.stats.clone();
stats.populate_row(&mut row);

res.put(data_row(&row));
}
Expand Down Expand Up @@ -673,16 +681,16 @@ where

for (_, client) in new_map {
let row = vec![
format!("{:#010X}", client.client_id),
client.pool_name,
client.username,
client.application_name.clone(),
client.state.to_string(),
client.transaction_count.to_string(),
client.query_count.to_string(),
client.error_count.to_string(),
format!("{:#010X}", client.client_id()),
client.pool_name(),
client.username(),
client.application_name(),
client.state.load(Ordering::Relaxed).to_string(),
client.transaction_count.load(Ordering::Relaxed).to_string(),
client.query_count.load(Ordering::Relaxed).to_string(),
client.error_count.load(Ordering::Relaxed).to_string(),
Instant::now()
.duration_since(client.connect_time)
.duration_since(client.connect_time())
.as_secs()
.to_string(),
];
Expand Down Expand Up @@ -724,19 +732,20 @@ where
res.put(row_description(&columns));

for (_, server) in new_map {
let application_name = server.application_name.read();
let row = vec![
format!("{:#010X}", server.server_id),
server.pool_name,
server.username,
server.address_name,
server.application_name,
server.state.to_string(),
server.transaction_count.to_string(),
server.query_count.to_string(),
server.bytes_sent.to_string(),
server.bytes_received.to_string(),
format!("{:#010X}", server.server_id()),
server.pool_name(),
server.username(),
server.address_name(),
application_name.clone(),
server.state.load(Ordering::Relaxed).to_string(),
server.transaction_count.load(Ordering::Relaxed).to_string(),
server.query_count.load(Ordering::Relaxed).to_string(),
server.bytes_sent.load(Ordering::Relaxed).to_string(),
server.bytes_received.load(Ordering::Relaxed).to_string(),
Instant::now()
.duration_since(server.connect_time)
.duration_since(server.connect_time())
.as_secs()
.to_string(),
];
Expand Down
Loading

0 comments on commit 58ce76d

Please sign in to comment.