diff --git a/Cargo.lock b/Cargo.lock index 210f76025d..d44ab75db0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -461,7 +461,7 @@ dependencies = [ "diesel", "diesel_full_text_search", "diesel_migrations", - "dotenv", + "dotenvy", "flate2", "futures-channel", "futures-util", @@ -516,7 +516,7 @@ dependencies = [ "anyhow", "base64 0.13.1", "claims", - "dotenv", + "dotenvy", "git2", "serde", "serde_json", @@ -967,10 +967,10 @@ dependencies = [ ] [[package]] -name = "dotenv" -version = "0.15.0" +name = "dotenvy" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] name = "either" diff --git a/Cargo.toml b/Cargo.toml index 7ec24ac5d3..a97b6b1f44 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,7 @@ dialoguer = "=0.10.4" diesel = { version = "=2.0.4", features = ["postgres", "serde_json", "chrono", "r2d2"] } diesel_full_text_search = "=2.0.0" diesel_migrations = { version = "=2.0.0", features = ["postgres"] } -dotenv = "=0.15.0" +dotenvy = "=0.15.7" flate2 = "=1.0.26" futures-channel = { version = "=0.3.28", default-features = false } futures-util = "=0.3.28" @@ -102,4 +102,4 @@ tower-service = "=0.3.2" [build-dependencies] diesel = { version = "=2.0.4", features = ["postgres"] } diesel_migrations = { version = "=2.0.0", features = ["postgres"] } -dotenv = "=0.15.0" +dotenvy = "=0.15.7" diff --git a/build.rs b/build.rs index 7cf3f4caf1..7516e7cb1c 100644 --- a/build.rs +++ b/build.rs @@ -7,7 +7,7 @@ fn main() { println!("cargo:rerun-if-changed=.env"); println!("cargo:rerun-if-changed=migrations/"); if env::var("PROFILE") == Ok("debug".into()) { - if let Ok(database_url) = dotenv::var("TEST_DATABASE_URL") { + if let Ok(database_url) = dotenvy::var("TEST_DATABASE_URL") { let connection = &mut PgConnection::establish(&database_url) .expect("Could not connect to TEST_DATABASE_URL"); let migrations = FileBasedMigrations::find_migrations_directory() diff --git a/cargo-registry-index/Cargo.toml b/cargo-registry-index/Cargo.toml index 7009e564b1..e7f0906356 100644 --- a/cargo-registry-index/Cargo.toml +++ b/cargo-registry-index/Cargo.toml @@ -15,7 +15,7 @@ testing = [] [dependencies] anyhow = "=1.0.71" base64 = "=0.13.1" -dotenv = "=0.15.0" +dotenvy = "=0.15.7" git2 = "=0.17.1" serde = { version = "=1.0.162", features = ["derive"] } serde_json = "=1.0.96" diff --git a/cargo-registry-index/lib.rs b/cargo-registry-index/lib.rs index bb165f580e..fda32846fb 100644 --- a/cargo-registry-index/lib.rs +++ b/cargo-registry-index/lib.rs @@ -230,12 +230,12 @@ pub struct RepositoryConfig { impl RepositoryConfig { pub fn from_environment() -> Self { - let username = dotenv::var("GIT_HTTP_USER"); - let password = dotenv::var("GIT_HTTP_PWD"); - let http_url = dotenv::var("GIT_REPO_URL"); + let username = dotenvy::var("GIT_HTTP_USER"); + let password = dotenvy::var("GIT_HTTP_PWD"); + let http_url = dotenvy::var("GIT_REPO_URL"); - let ssh_key = dotenv::var("GIT_SSH_KEY"); - let ssh_url = dotenv::var("GIT_SSH_REPO_URL"); + let ssh_key = dotenvy::var("GIT_SSH_KEY"); + let ssh_url = dotenvy::var("GIT_SSH_REPO_URL"); match (username, password, http_url, ssh_key, ssh_url) { (extra_user, extra_pass, extra_http_url, Ok(encoded_key), Ok(ssh_url)) => { diff --git a/src/admin/on_call.rs b/src/admin/on_call.rs index 28f953ac29..0f7493a099 100644 --- a/src/admin/on_call.rs +++ b/src/admin/on_call.rs @@ -25,8 +25,8 @@ impl Event { /// If the variant is `Trigger`, this will page whoever is on call /// (potentially waking them up at 3 AM). pub fn send(self) -> Result<()> { - let api_token = dotenv::var("PAGERDUTY_API_TOKEN")?; - let service_key = dotenv::var("PAGERDUTY_INTEGRATION_KEY")?; + let api_token = dotenvy::var("PAGERDUTY_API_TOKEN")?; + let service_key = dotenvy::var("PAGERDUTY_INTEGRATION_KEY")?; let response = Client::new() .post("https://events.pagerduty.com/generic/2010-04-15/create_event.json") diff --git a/src/app.rs b/src/app.rs index aa36f3fc8e..8787d2acaf 100644 --- a/src/app.rs +++ b/src/app.rs @@ -92,14 +92,14 @@ impl App { ), ); - let db_helper_threads = match (dotenv::var("DB_HELPER_THREADS"), config.env()) { + let db_helper_threads = match (dotenvy::var("DB_HELPER_THREADS"), config.env()) { (Ok(num), _) => num.parse().expect("couldn't parse DB_HELPER_THREADS"), (_, Env::Production) => 3, _ => 1, }; // Used as the connection and statement timeout value for the database pool(s) - let db_connection_timeout = match (dotenv::var("DB_TIMEOUT"), config.env()) { + let db_connection_timeout = match (dotenvy::var("DB_TIMEOUT"), config.env()) { (Ok(num), _) => num.parse().expect("couldn't parse DB_TIMEOUT"), (_, Env::Production) => 10, (_, Env::Test) => 1, @@ -170,7 +170,7 @@ impl App { .time_to_live(config.version_id_cache_ttl) .build(); - let fastboot_client = match dotenv::var("USE_FASTBOOT") { + let fastboot_client = match dotenvy::var("USE_FASTBOOT") { Ok(val) if val == "staging-experimental" => Some(reqwest::Client::new()), _ => None, }; diff --git a/src/bin/background-worker.rs b/src/bin/background-worker.rs index 2c1bfc2308..21eaed4dbb 100644 --- a/src/bin/background-worker.rs +++ b/src/bin/background-worker.rs @@ -51,14 +51,14 @@ fn main() { let db_url = db::connection_url(&config.db, &config.db.primary.url); - let job_start_timeout = dotenv::var("BACKGROUND_JOB_TIMEOUT") + let job_start_timeout = dotenvy::var("BACKGROUND_JOB_TIMEOUT") .unwrap_or_else(|_| "30".into()) .parse() .expect("Invalid value for `BACKGROUND_JOB_TIMEOUT`"); info!("Cloning index"); - if dotenv::var("HEROKU").is_ok() { + if dotenvy::var("HEROKU").is_ok() { ssh::write_known_hosts_file().unwrap(); } diff --git a/src/bin/monitor.rs b/src/bin/monitor.rs index 8c4494dc77..0c1ab0a123 100644 --- a/src/bin/monitor.rs +++ b/src/bin/monitor.rs @@ -37,7 +37,7 @@ fn check_failing_background_jobs(conn: &mut PgConnection) -> Result<()> { println!("Checking for failed background jobs"); // Max job execution time in minutes - let max_job_time = dotenv::var("MAX_JOB_TIME") + let max_job_time = dotenvy::var("MAX_JOB_TIME") .map(|s| s.parse::().unwrap()) .unwrap_or(15); @@ -78,7 +78,7 @@ fn check_stalled_update_downloads(conn: &mut PgConnection) -> Result<()> { println!("Checking for stalled background jobs"); // Max job execution time in minutes - let max_job_time = dotenv::var("MONITOR_MAX_UPDATE_DOWNLOADS_TIME") + let max_job_time = dotenvy::var("MONITOR_MAX_UPDATE_DOWNLOADS_TIME") .map(|s| s.parse::().unwrap() as i64) .unwrap_or(120); @@ -113,7 +113,7 @@ fn check_spam_attack(conn: &mut PgConnection) -> Result<()> { println!("Checking for crates indicating someone is spamming us"); - let bad_crate_names = dotenv::var("SPAM_CRATE_NAMES"); + let bad_crate_names = dotenvy::var("SPAM_CRATE_NAMES"); let bad_crate_names: Vec<_> = bad_crate_names .as_ref() .map(|s| s.split(',').collect()) diff --git a/src/bin/server.rs b/src/bin/server.rs index dab1c17e71..a209d126fc 100644 --- a/src/bin/server.rs +++ b/src/bin/server.rs @@ -43,9 +43,9 @@ fn main() -> Result<(), Box> { let normalize_path = axum::middleware::from_fn(normalize_path); let axum_router = normalize_path.layer(axum_router); - let heroku = dotenv::var("HEROKU").is_ok(); - let fastboot = dotenv::var("USE_FASTBOOT").is_ok(); - let dev_docker = dotenv::var("DEV_DOCKER").is_ok(); + let heroku = dotenvy::var("HEROKU").is_ok(); + let fastboot = dotenvy::var("USE_FASTBOOT").is_ok(); + let dev_docker = dotenvy::var("DEV_DOCKER").is_ok(); let ip = if dev_docker { [0, 0, 0, 0] @@ -57,7 +57,7 @@ fn main() -> Result<(), Box> { _ => 8888, }; - let threads = dotenv::var("SERVER_THREADS") + let threads = dotenvy::var("SERVER_THREADS") .map(|s| s.parse().expect("SERVER_THREADS was not a valid number")) .unwrap_or_else(|_| match env { Env::Development => 5, diff --git a/src/config.rs b/src/config.rs index 287dacb7d4..7cad0c2c3c 100644 --- a/src/config.rs +++ b/src/config.rs @@ -128,7 +128,7 @@ impl Default for Server { excluded_crate_names, domain_name: domain_name(), allowed_origins, - downloads_persist_interval_ms: dotenv::var("DOWNLOADS_PERSIST_INTERVAL_MS") + downloads_persist_interval_ms: dotenvy::var("DOWNLOADS_PERSIST_INTERVAL_MS") .map(|interval| { interval .parse() @@ -136,10 +136,10 @@ impl Default for Server { }) .unwrap_or(60_000), // 1 minute ownership_invitations_expiration_days: 30, - metrics_authorization_token: dotenv::var("METRICS_AUTHORIZATION_TOKEN").ok(), + metrics_authorization_token: dotenvy::var("METRICS_AUTHORIZATION_TOKEN").ok(), use_test_database_pool: false, instance_metrics_log_every_seconds: env_optional("INSTANCE_METRICS_LOG_EVERY_SECONDS"), - force_unconditional_redirects: dotenv::var("FORCE_UNCONDITIONAL_REDIRECTS").is_ok(), + force_unconditional_redirects: dotenvy::var("FORCE_UNCONDITIONAL_REDIRECTS").is_ok(), blocked_routes: env_optional("BLOCKED_ROUTES") .map(|routes: String| routes.split(',').map(|s| s.into()).collect()) .unwrap_or_else(HashSet::new), @@ -148,7 +148,7 @@ impl Default for Server { version_id_cache_ttl: Duration::from_secs( env_optional("VERSION_ID_CACHE_TTL").unwrap_or(DEFAULT_VERSION_ID_CACHE_TTL), ), - cdn_user_agent: dotenv::var("WEB_CDN_USER_AGENT") + cdn_user_agent: dotenvy::var("WEB_CDN_USER_AGENT") .unwrap_or_else(|_| "Amazon CloudFront".into()), balance_capacity: BalanceCapacityConfig::from_environment(), } @@ -166,7 +166,7 @@ impl Server { } pub(crate) fn domain_name() -> String { - dotenv::var("DOMAIN_NAME").unwrap_or_else(|_| "crates.io".into()) + dotenvy::var("DOMAIN_NAME").unwrap_or_else(|_| "crates.io".into()) } /// Parses a CIDR block string to a valid `IpNetwork` struct. @@ -196,10 +196,10 @@ fn parse_cidr_block(block: &str) -> anyhow::Result { } fn blocked_traffic() -> Vec<(String, Vec)> { - let pattern_list = dotenv::var("BLOCKED_TRAFFIC").unwrap_or_default(); + let pattern_list = dotenvy::var("BLOCKED_TRAFFIC").unwrap_or_default(); parse_traffic_patterns(&pattern_list) .map(|(header, value_env_var)| { - let value_list = dotenv::var(value_env_var).unwrap_or_default(); + let value_list = dotenvy::var(value_env_var).unwrap_or_default(); let values = value_list.split(',').map(String::from).collect(); (header.into(), values) }) diff --git a/src/config/base.rs b/src/config/base.rs index 6ef3eec58d..bc44ec9597 100644 --- a/src/config/base.rs +++ b/src/config/base.rs @@ -17,7 +17,7 @@ pub struct Base { impl Base { pub fn from_environment() -> Self { - let heroku = dotenv::var("HEROKU").is_ok(); + let heroku = dotenvy::var("HEROKU").is_ok(); let env = if heroku { Env::Production } else { @@ -33,7 +33,7 @@ impl Base { } // In Development mode, either running as a primary instance or a read-only mirror _ => { - if dotenv::var("S3_BUCKET").is_ok() { + if dotenvy::var("S3_BUCKET").is_ok() { // If we've set the `S3_BUCKET` variable to any value, use all of the values // for the related S3 environment variables and configure the app to upload to // and read from S3 like production does. All values except for bucket are @@ -60,8 +60,8 @@ impl Base { bucket: Box::new(s3::Bucket::new( String::from("alexcrichton-test"), None, - dotenv::var("AWS_ACCESS_KEY").unwrap_or_default(), - dotenv::var("AWS_SECRET_KEY").unwrap_or_default(), + dotenvy::var("AWS_ACCESS_KEY").unwrap_or_default(), + dotenvy::var("AWS_SECRET_KEY").unwrap_or_default(), // When testing we route all API traffic over HTTP so we can // sniff/record it, but everywhere else we use https "http", @@ -69,8 +69,8 @@ impl Base { index_bucket: Some(Box::new(s3::Bucket::new( String::from("alexcrichton-test"), None, - dotenv::var("AWS_ACCESS_KEY").unwrap_or_default(), - dotenv::var("AWS_SECRET_KEY").unwrap_or_default(), + dotenvy::var("AWS_ACCESS_KEY").unwrap_or_default(), + dotenvy::var("AWS_SECRET_KEY").unwrap_or_default(), // When testing we route all API traffic over HTTP so we can // sniff/record it, but everywhere else we use https "http", @@ -88,10 +88,10 @@ impl Base { } fn s3_panic_if_missing_keys() -> Uploader { - let index_bucket = match dotenv::var("S3_INDEX_BUCKET") { + let index_bucket = match dotenvy::var("S3_INDEX_BUCKET") { Ok(name) => Some(Box::new(s3::Bucket::new( name, - dotenv::var("S3_INDEX_REGION").ok(), + dotenvy::var("S3_INDEX_REGION").ok(), env("AWS_ACCESS_KEY"), env("AWS_SECRET_KEY"), "https", @@ -101,23 +101,23 @@ impl Base { Uploader::S3 { bucket: Box::new(s3::Bucket::new( env("S3_BUCKET"), - dotenv::var("S3_REGION").ok(), + dotenvy::var("S3_REGION").ok(), env("AWS_ACCESS_KEY"), env("AWS_SECRET_KEY"), "https", )), index_bucket, - cdn: dotenv::var("S3_CDN").ok(), + cdn: dotenvy::var("S3_CDN").ok(), } } fn s3_maybe_read_only() -> Uploader { - let index_bucket = match dotenv::var("S3_INDEX_BUCKET") { + let index_bucket = match dotenvy::var("S3_INDEX_BUCKET") { Ok(name) => Some(Box::new(s3::Bucket::new( name, - dotenv::var("S3_INDEX_REGION").ok(), - dotenv::var("AWS_ACCESS_KEY").unwrap_or_default(), - dotenv::var("AWS_SECRET_KEY").unwrap_or_default(), + dotenvy::var("S3_INDEX_REGION").ok(), + dotenvy::var("AWS_ACCESS_KEY").unwrap_or_default(), + dotenvy::var("AWS_SECRET_KEY").unwrap_or_default(), "https", ))), Err(_) => None, @@ -125,13 +125,13 @@ impl Base { Uploader::S3 { bucket: Box::new(s3::Bucket::new( env("S3_BUCKET"), - dotenv::var("S3_REGION").ok(), - dotenv::var("AWS_ACCESS_KEY").unwrap_or_default(), - dotenv::var("AWS_SECRET_KEY").unwrap_or_default(), + dotenvy::var("S3_REGION").ok(), + dotenvy::var("AWS_ACCESS_KEY").unwrap_or_default(), + dotenvy::var("AWS_SECRET_KEY").unwrap_or_default(), "https", )), index_bucket, - cdn: dotenv::var("S3_CDN").ok(), + cdn: dotenvy::var("S3_CDN").ok(), } } } diff --git a/src/config/database_pools.rs b/src/config/database_pools.rs index a28b0cb8b8..e5654c8a41 100644 --- a/src/config/database_pools.rs +++ b/src/config/database_pools.rs @@ -54,37 +54,37 @@ impl DatabasePools { /// This function panics if `DB_OFFLINE=leader` but `READ_ONLY_REPLICA_URL` is unset. pub fn full_from_environment(base: &Base) -> Self { let leader_url = env("DATABASE_URL"); - let follower_url = dotenv::var("READ_ONLY_REPLICA_URL").ok(); - let read_only_mode = dotenv::var("READ_ONLY_MODE").is_ok(); + let follower_url = dotenvy::var("READ_ONLY_REPLICA_URL").ok(); + let read_only_mode = dotenvy::var("READ_ONLY_MODE").is_ok(); - let primary_pool_size = match dotenv::var("DB_PRIMARY_POOL_SIZE") { + let primary_pool_size = match dotenvy::var("DB_PRIMARY_POOL_SIZE") { Ok(num) => num.parse().expect("couldn't parse DB_PRIMARY_POOL_SIZE"), _ => Self::DEFAULT_POOL_SIZE, }; - let replica_pool_size = match dotenv::var("DB_REPLICA_POOL_SIZE") { + let replica_pool_size = match dotenvy::var("DB_REPLICA_POOL_SIZE") { Ok(num) => num.parse().expect("couldn't parse DB_REPLICA_POOL_SIZE"), _ => Self::DEFAULT_POOL_SIZE, }; - let primary_min_idle = match dotenv::var("DB_PRIMARY_MIN_IDLE") { + let primary_min_idle = match dotenvy::var("DB_PRIMARY_MIN_IDLE") { Ok(num) => Some(num.parse().expect("couldn't parse DB_PRIMARY_MIN_IDLE")), _ => None, }; - let replica_min_idle = match dotenv::var("DB_REPLICA_MIN_IDLE") { + let replica_min_idle = match dotenvy::var("DB_REPLICA_MIN_IDLE") { Ok(num) => Some(num.parse().expect("couldn't parse DB_REPLICA_MIN_IDLE")), _ => None, }; - let tcp_timeout_ms = match dotenv::var("DB_TCP_TIMEOUT_MS") { + let tcp_timeout_ms = match dotenvy::var("DB_TCP_TIMEOUT_MS") { Ok(num) => num.parse().expect("couldn't parse DB_TCP_TIMEOUT_MS"), Err(_) => 15 * 1000, // 15 seconds }; let enforce_tls = base.env == Env::Production; - match dotenv::var("DB_OFFLINE").as_deref() { + match dotenvy::var("DB_OFFLINE").as_deref() { // The actual leader is down, use the follower in read-only mode as the primary and // don't configure a replica. Ok("leader") => Self { diff --git a/src/controllers/site_metadata.rs b/src/controllers/site_metadata.rs index 4d784a188d..b55bd58d71 100644 --- a/src/controllers/site_metadata.rs +++ b/src/controllers/site_metadata.rs @@ -10,7 +10,7 @@ pub async fn show_deployed_sha(state: AppState) -> impl IntoResponse { let read_only = state.config.db.are_all_read_only(); let deployed_sha = - dotenv::var("HEROKU_SLUG_COMMIT").unwrap_or_else(|_| String::from("unknown")); + dotenvy::var("HEROKU_SLUG_COMMIT").unwrap_or_else(|_| String::from("unknown")); Json(json!({ "deployed_sha": &deployed_sha[..], diff --git a/src/controllers/user/session.rs b/src/controllers/user/session.rs index 96341fa5e0..abc6a414b9 100644 --- a/src/controllers/user/session.rs +++ b/src/controllers/user/session.rs @@ -156,7 +156,7 @@ mod tests { fn pg_connection() -> PgConnection { let database_url = - dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); + dotenvy::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); PgConnection::establish(&database_url).unwrap() } diff --git a/src/email.rs b/src/email.rs index ac953336bb..3b30e6d1d3 100644 --- a/src/email.rs +++ b/src/email.rs @@ -22,9 +22,9 @@ impl Emails { /// to a SMTP server or store the emails on the local filesystem. pub fn from_environment(config: &config::Server) -> Self { let backend = match ( - dotenv::var("MAILGUN_SMTP_LOGIN"), - dotenv::var("MAILGUN_SMTP_PASSWORD"), - dotenv::var("MAILGUN_SMTP_SERVER"), + dotenvy::var("MAILGUN_SMTP_LOGIN"), + dotenvy::var("MAILGUN_SMTP_PASSWORD"), + dotenvy::var("MAILGUN_SMTP_SERVER"), ) { (Ok(login), Ok(password), Ok(server)) => EmailBackend::Smtp { server, diff --git a/src/lib.rs b/src/lib.rs index b5ecbe055c..1b08d8ea0a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,7 +88,7 @@ pub fn build_handler(app: Arc) -> axum::Router { /// Convenience function requiring that an environment variable is set. /// -/// Ensures that we've initialized the dotenv crate in order to read environment variables +/// Ensures that we've initialized the dotenvy crate in order to read environment variables /// from a *.env* file if present. Don't use this for optionally set environment variables. /// /// # Panics @@ -97,12 +97,12 @@ pub fn build_handler(app: Arc) -> axum::Router { /// in the current environment. #[track_caller] pub fn env(s: &str) -> String { - dotenv::var(s).unwrap_or_else(|_| panic!("must have `{s}` defined")) + dotenvy::var(s).unwrap_or_else(|_| panic!("must have `{s}` defined")) } /// Parse an optional environment variable /// -/// Ensures that we've initialized the dotenv crate in order to read environment variables +/// Ensures that we've initialized the dotenvy crate in order to read environment variables /// from a *.env* file if present. A variable that is set to invalid unicode will be handled /// as if it was unset. /// @@ -111,7 +111,7 @@ pub fn env(s: &str) -> String { /// Panics if the environment variable is set but cannot be parsed as the requested type. #[track_caller] pub fn env_optional(s: &str) -> Option { - dotenv::var(s).ok().map(|s| { + dotenvy::var(s).ok().map(|s| { s.parse() .unwrap_or_else(|_| panic!("`{s}` was defined but could not be parsed")) }) diff --git a/src/models/keyword.rs b/src/models/keyword.rs index e64fbb8705..58d9121321 100644 --- a/src/models/keyword.rs +++ b/src/models/keyword.rs @@ -90,7 +90,7 @@ mod tests { fn pg_connection() -> PgConnection { let database_url = - dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); + dotenvy::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); let mut conn = PgConnection::establish(&database_url).unwrap(); // These tests deadlock if run concurrently conn.batch_execute("BEGIN;").unwrap(); diff --git a/src/publish_rate_limit.rs b/src/publish_rate_limit.rs index d01f362870..0e269e4b85 100644 --- a/src/publish_rate_limit.rs +++ b/src/publish_rate_limit.rs @@ -16,12 +16,12 @@ pub struct PublishRateLimit { impl Default for PublishRateLimit { fn default() -> Self { - let minutes = dotenv::var("WEB_NEW_PKG_RATE_LIMIT_RATE_MINUTES") + let minutes = dotenvy::var("WEB_NEW_PKG_RATE_LIMIT_RATE_MINUTES") .unwrap_or_default() .parse() .ok() .unwrap_or(10); - let burst = dotenv::var("WEB_NEW_PKG_RATE_LIMIT_BURST") + let burst = dotenvy::var("WEB_NEW_PKG_RATE_LIMIT_BURST") .unwrap_or_default() .parse() .ok() diff --git a/src/sentry/mod.rs b/src/sentry/mod.rs index 601a93c2fc..cf8d6da271 100644 --- a/src/sentry/mod.rs +++ b/src/sentry/mod.rs @@ -11,18 +11,18 @@ use std::sync::Arc; /// `HEROKU_SLUG_COMMIT`, if present, will be used as the `release` property /// on all events. pub fn init() -> ClientInitGuard { - let dsn = dotenv::var("SENTRY_DSN_API") + let dsn = dotenvy::var("SENTRY_DSN_API") .ok() .into_dsn() .expect("SENTRY_DSN_API is not a valid Sentry DSN value"); let environment = dsn.as_ref().map(|_| { - dotenv::var("SENTRY_ENV_API") + dotenvy::var("SENTRY_ENV_API") .expect("SENTRY_ENV_API must be set when using SENTRY_DSN_API") .into() }); - let release = dotenv::var("HEROKU_SLUG_COMMIT").ok().map(Into::into); + let release = dotenvy::var("HEROKU_SLUG_COMMIT").ok().map(Into::into); let traces_sample_rate = env_optional("SENTRY_TRACES_SAMPLE_RATE").unwrap_or(0.0); diff --git a/src/swirl/runner.rs b/src/swirl/runner.rs index 1f4100b0c7..85140879f8 100644 --- a/src/swirl/runner.rs +++ b/src/swirl/runner.rs @@ -426,7 +426,7 @@ mod tests { fn runner() -> Runner { let database_url = - dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); + dotenvy::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); super::Runner::internal_test_runner(None, database_url) } diff --git a/src/test_util.rs b/src/test_util.rs index 0d1bbf214b..80dc5cfa53 100644 --- a/src/test_util.rs +++ b/src/test_util.rs @@ -4,7 +4,7 @@ use diesel::prelude::*; pub fn pg_connection_no_transaction() -> PgConnection { let database_url = - dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); + dotenvy::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); PgConnection::establish(&database_url).unwrap() } diff --git a/src/tests/all.rs b/src/tests/all.rs index 64dccc5093..854feb9ca8 100644 --- a/src/tests/all.rs +++ b/src/tests/all.rs @@ -102,7 +102,7 @@ pub struct OkBool { // Return the environment variable only if it has been defined #[track_caller] fn env(var: &str) -> String { - match dotenv::var(var) { + match dotenvy::var(var) { Ok(ref s) if s.is_empty() => panic!("environment variable `{var}` must not be empty"), Ok(s) => s, _ => panic!("environment variable `{var}` must be defined and valid unicode"), diff --git a/src/tests/categories.rs b/src/tests/categories.rs index 1f65df66fe..58b89c1536 100644 --- a/src/tests/categories.rs +++ b/src/tests/categories.rs @@ -39,7 +39,7 @@ description = "Another category ho hum" fn pg_connection() -> PgConnection { let database_url = - dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); + dotenvy::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); let mut conn = PgConnection::establish(&database_url).unwrap(); conn.begin_test_transaction().unwrap(); conn diff --git a/src/tests/record.rs b/src/tests/record.rs index d4b7bef3da..4235b79c78 100644 --- a/src/tests/record.rs +++ b/src/tests/record.rs @@ -90,7 +90,7 @@ enum Record { pub fn proxy() -> (String, Bomb) { let me = thread::current().name().unwrap().to_string(); - let record_env = dotenv::var("RECORD").ok(); + let record_env = dotenvy::var("RECORD").ok(); let (url_tx, url_rx) = mpsc::channel(); @@ -280,7 +280,7 @@ async fn record_http(req: Request, client: Client) -> Result().unwrap(); diff --git a/src/tests/server_binary.rs b/src/tests/server_binary.rs index cd199b2016..30f5074728 100644 --- a/src/tests/server_binary.rs +++ b/src/tests/server_binary.rs @@ -88,7 +88,7 @@ struct ServerBin { impl ServerBin { fn prepare() -> Result { - let mut env = dotenv::vars().collect::>(); + let mut env = dotenvy::vars().collect::>(); // Bind a random port every time the server is started. env.insert("PORT".into(), "0".into()); // Avoid creating too many database connections. diff --git a/src/tests/util/test_app.rs b/src/tests/util/test_app.rs index 4808c0940d..d0c6e662e1 100644 --- a/src/tests/util/test_app.rs +++ b/src/tests/util/test_app.rs @@ -335,8 +335,8 @@ fn simple_config() -> config::Server { base: config::Base::test(), db: config::DatabasePools::test_from_environment(), session_key: cookie::Key::derive_from("test this has to be over 32 bytes long".as_bytes()), - gh_client_id: ClientId::new(dotenv::var("GH_CLIENT_ID").unwrap_or_default()), - gh_client_secret: ClientSecret::new(dotenv::var("GH_CLIENT_SECRET").unwrap_or_default()), + gh_client_id: ClientId::new(dotenvy::var("GH_CLIENT_ID").unwrap_or_default()), + gh_client_secret: ClientSecret::new(dotenvy::var("GH_CLIENT_SECRET").unwrap_or_default()), max_upload_size: 3000, max_unpack_size: 2000, publish_rate_limit: Default::default(), diff --git a/src/worker/cloudfront.rs b/src/worker/cloudfront.rs index 5439852162..f3987cf22e 100644 --- a/src/worker/cloudfront.rs +++ b/src/worker/cloudfront.rs @@ -18,9 +18,9 @@ pub struct CloudFront { impl CloudFront { pub fn from_environment() -> Option { - let distribution_id = dotenv::var("CLOUDFRONT_DISTRIBUTION").ok()?; - let access_key = dotenv::var("AWS_ACCESS_KEY").expect("missing AWS_ACCESS_KEY"); - let secret_key = dotenv::var("AWS_SECRET_KEY").expect("missing AWS_SECRET_KEY"); + let distribution_id = dotenvy::var("CLOUDFRONT_DISTRIBUTION").ok()?; + let access_key = dotenvy::var("AWS_ACCESS_KEY").expect("missing AWS_ACCESS_KEY"); + let secret_key = dotenvy::var("AWS_SECRET_KEY").expect("missing AWS_SECRET_KEY"); Some(Self { distribution_id, access_key, diff --git a/src/worker/dump_db.rs b/src/worker/dump_db.rs index f865aa2349..7a7b469771 100644 --- a/src/worker/dump_db.rs +++ b/src/worker/dump_db.rs @@ -95,7 +95,7 @@ impl DumpDirectory { } let metadata = Metadata { timestamp: &self.timestamp, - crates_io_commit: dotenv::var("HEROKU_SLUG_COMMIT") + crates_io_commit: dotenvy::var("HEROKU_SLUG_COMMIT") .unwrap_or_else(|_| "unknown".to_owned()), }; let path = self.export_dir.join("metadata.json");