Skip to content

Commit

Permalink
Remove target RSS_BYTE tracking
Browse files Browse the repository at this point in the history
We previously had a mechanism that would allow the generators to back off
if the target was determined to be nearing a preconfigured memory consumption.
We decoupled this at some point -- I'm not sure when -- and then forgot to
remove the interface. It does not appear this was used in practice.

Signed-off-by: Brian L. Troutwine <brian.troutwine@datadoghq.com>
  • Loading branch information
blt committed Dec 6, 2024
1 parent 291b710 commit 21e0c34
Show file tree
Hide file tree
Showing 5 changed files with 7 additions and 68 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [Unreleased]
## Removed
- Removed the unused target-rss-byte-limit from the command line, internal stub of.
## Changed
- logrotate_fs is now behind a feature flag and not enabled in the default
build. It remains enabled in the release artifact.
Expand Down
8 changes: 0 additions & 8 deletions lading/src/bin/lading.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,6 @@ enum Error {
SerdeYaml(#[from] serde_yaml::Error),
#[error("Lading failed to sync servers {0}")]
Send(#[from] tokio::sync::broadcast::error::SendError<Option<i32>>),
#[error("Maximum RSS bytes limit exceeds u64::MAX {0}")]
Meta(#[from] lading::target::MetaError),
#[error("Parsing Prometheus address failed: {0}")]
PrometheusAddr(#[from] std::net::AddrParseError),
#[error("Invalid capture path")]
Expand Down Expand Up @@ -179,9 +177,6 @@ struct Opts {
/// the path to write target's stderr
#[clap(long, default_value_t = default_target_behavior(), requires = "binary-target")]
target_stderr_path: Behavior,
/// the maximum amount of RSS bytes the target may consume before lading backs off load
#[clap(long)]
target_rss_bytes_limit: Option<byte_unit::Byte>,
/// path on disk to write captures, exclusive of prometheus-path and
/// prometheus-addr
#[clap(long)]
Expand Down Expand Up @@ -261,9 +256,6 @@ fn get_config(ops: &Opts, config: Option<String>) -> Result<Config, Error> {

let mut config: Config = serde_yaml::from_str(&contents)?;

if let Some(rss_bytes_limit) = ops.target_rss_bytes_limit {
target::Meta::set_rss_bytes_limit(rss_bytes_limit)?;
}
let target = if ops.no_target {
None
} else if let Some(pid) = ops.target_pid {
Expand Down
7 changes: 1 addition & 6 deletions lading/src/observer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,14 @@
//! writes out key details about memory and CPU consumption into the capture
//! data. On non-Linux systems the observer, if enabled, will emit a warning.
use std::{io, sync::atomic::AtomicU64};
use std::io;

use crate::target::TargetPidReceiver;
use serde::Deserialize;

#[cfg(target_os = "linux")]
mod linux;

#[allow(dead_code)] // used on Linux
/// Expose the process' current RSS consumption, allowing abstractions to be
/// built on top in the Target implementation.
pub(crate) static RSS_BYTES: AtomicU64 = AtomicU64::new(0);

#[derive(thiserror::Error, Debug)]
/// Errors produced by [`Server`]
pub enum Error {
Expand Down
6 changes: 3 additions & 3 deletions lading/src/observer/linux/procfs.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/// Sampler implementation for procfs filesystems
mod memory;

use std::{collections::VecDeque, io, sync::atomic::Ordering};
use std::{collections::VecDeque, io};

use metrics::gauge;
use nix::errno::Errno;
Expand All @@ -10,7 +10,6 @@ use procfs::{process::Process, Current};
use rustc_hash::{FxHashMap, FxHashSet};
use tracing::{error, warn};

use crate::observer::RSS_BYTES;
use memory::{Regions, Rollup};

const BYTES_PER_KIBIBYTE: u64 = 1024;
Expand Down Expand Up @@ -332,6 +331,7 @@ impl Sampler {
// then we assume smaps operations will fail as well.
if has_ptrace_perm {
joinset.spawn(async move {
// TODO this code reads smaps
let memory_regions = match Regions::from_pid(pid) {
Ok(memory_regions) => memory_regions,
Err(e) => {
Expand Down Expand Up @@ -369,6 +369,7 @@ impl Sampler {
gauge!("smaps.size.sum", &labels).set(measures.size as f64);
gauge!("smaps.swap.sum", &labels).set(measures.swap as f64);

// This code reads smaps_rollup
let rollup = match Rollup::from_pid(pid) {
Ok(rollup) => rollup,
Err(e) => {
Expand Down Expand Up @@ -399,7 +400,6 @@ impl Sampler {
}

gauge!("num_processes").set(total_processes as f64);
RSS_BYTES.store(total_rss, Ordering::Relaxed); // stored for the purposes of throttling

// Now we loop through our just collected samples and calculate CPU
// utilization. This require memory and we will now reference -- and
Expand Down
52 changes: 1 addition & 51 deletions lading/src/target.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ use std::{
num::NonZeroI32,
path::PathBuf,
process::{ExitStatus, Stdio},
sync::atomic::{AtomicU64, Ordering},
time::Duration,
};

Expand All @@ -36,12 +35,8 @@ use rustc_hash::FxHashMap;
use tokio::{process::Command, time};
use tracing::{error, info};

use crate::common::stdio;
pub use crate::common::{Behavior, Output};
use crate::{common::stdio, observer::RSS_BYTES};

/// Expose the process' current RSS consumption, allowing abstractions to be
/// built on top in the Target implementation.
pub(crate) static RSS_BYTES_LIMIT: AtomicU64 = AtomicU64::new(u64::MAX);

/// Type used to receive the target PID once it is running.
#[allow(clippy::module_name_repetitions)]
Expand All @@ -50,51 +45,6 @@ pub type TargetPidReceiver = tokio::sync::broadcast::Receiver<Option<i32>>;
#[allow(clippy::module_name_repetitions)]
type TargetPidSender = tokio::sync::broadcast::Sender<Option<i32>>;

/// Errors produced by [`Meta`]
#[derive(thiserror::Error, Debug, Clone, Copy)]
pub enum MetaError {
/// Unable to support byte limits greater than `u64::MAX`
#[error("unable to support bytes greater than u64::MAX")]
ByteLimitTooLarge,
}

/// Source for live metadata about the running target.
#[derive(Debug, Clone, Copy)]
pub struct Meta {}

impl Meta {
/// Set the maximum RSS bytes limit
///
/// # Errors
///
/// Will fail if the `byte_unit::Byte` value given is larger than `u64::MAX`
/// bytes.
#[inline]
#[allow(clippy::cast_possible_truncation)]
pub fn set_rss_bytes_limit(limit: byte_unit::Byte) -> Result<(), MetaError> {
let raw_limit: u128 = limit.get_bytes();
if raw_limit > u128::from(u64::MAX) {
return Err(MetaError::ByteLimitTooLarge);
}

gauge!("rss_bytes_limit").set(raw_limit as f64);

RSS_BYTES_LIMIT.store(raw_limit as u64, Ordering::Relaxed);
Ok(())
}

#[allow(dead_code)] // used on Linux
#[inline]
pub(crate) fn rss_bytes_limit_exceeded() -> bool {
let limit: u64 = RSS_BYTES_LIMIT.load(Ordering::Relaxed);
let current: u64 = RSS_BYTES.load(Ordering::Relaxed);

gauge!("rss_bytes_limit_overage").set(current.saturating_sub(limit) as f64);

current > limit
}
}

/// Errors produced by [`Server`]
#[derive(thiserror::Error, Debug)]
pub enum Error {
Expand Down

0 comments on commit 21e0c34

Please sign in to comment.