Skip to content

Commit

Permalink
feat: Add results_kafka_topic config (#51)
Browse files Browse the repository at this point in the history
  • Loading branch information
evanpurkhiser authored Jun 13, 2024
1 parent c976d19 commit 0aebeee
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 3 deletions.
10 changes: 8 additions & 2 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ pub struct Config {
serialize_with = "list_serializer"
)]
pub results_kafka_cluster: Vec<String>,

/// The topic to produce uptime checks into.
pub results_kafka_topic: String,
}

impl Default for Config {
Expand All @@ -44,6 +47,7 @@ impl Default for Config {
log_level: logging::Level::Warn,
log_format: logging::LogFormat::Auto,
results_kafka_cluster: vec![],
results_kafka_topic: "uptime-results".to_owned(),
}
}
}
Expand Down Expand Up @@ -131,7 +135,8 @@ mod tests {
sentry_env: Some(Cow::from("my_env")),
log_level: logging::Level::Warn,
log_format: logging::LogFormat::Auto,
results_kafka_cluster: vec!["10.0.0.1".to_owned(), "10.0.0.2:9000".to_owned()]
results_kafka_cluster: vec!["10.0.0.1".to_owned(), "10.0.0.2:9000".to_owned()],
results_kafka_topic: "uptime-results".to_owned(),
}
);
Ok(())
Expand Down Expand Up @@ -172,7 +177,8 @@ mod tests {
sentry_env: Some(Cow::from("my_env_override")),
log_level: logging::Level::Trace,
log_format: logging::LogFormat::Json,
results_kafka_cluster: vec!["10.0.0.1".to_owned(), "10.0.0.2:7000".to_owned()]
results_kafka_cluster: vec!["10.0.0.1".to_owned(), "10.0.0.2:7000".to_owned()],
results_kafka_topic: "uptime-results".to_owned(),
}
);
Ok(())
Expand Down
2 changes: 1 addition & 1 deletion src/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ pub async fn run_scheduler(config: &Config) -> Result<(), JobSchedulerError> {
let checker = Arc::new(Checker::new(Default::default()));

let producer = Arc::new(ResultProducer::new(
"uptime-results",
&config.results_kafka_topic,
KafkaConfig::new_config(config.results_kafka_cluster.to_owned(), None),
));

Expand Down

0 comments on commit 0aebeee

Please sign in to comment.