From 35dc714f1b3cfa12517ed79ac2e38b986e94d28f Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Thu, 18 Jul 2019 12:18:22 -0400 Subject: [PATCH] Fix uploads of large crates to S3 breaking the client Currently, we reuse the client for all files that are uploaded per call to the relevant code. Unfortunately, S3 will prevent connections from being long-lived (it's unknown what the exact timeout is, but we hit it relatively rarely). This code attempts to retry S3 uploads up to 3 times while replacing the client each time. --- src/db/file.rs | 47 ++++++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/src/db/file.rs b/src/db/file.rs index 5e0eee1e6..060431618 100644 --- a/src/db/file.rs +++ b/src/db/file.rs @@ -121,7 +121,7 @@ pub fn add_path_into_database>(conn: &Connection, try!(cookie.load::<&str>(&[])); let trans = try!(conn.transaction()); - let client = s3_client(); + let mut client = s3_client(); let mut file_list_with_mimes: Vec<(String, PathBuf)> = Vec::new(); for file_path in try!(get_file_list(&path)) { @@ -158,22 +158,35 @@ pub fn add_path_into_database>(conn: &Connection, } }; - let content: Option> = if let Some(client) = &client { - let s3_res = client.put_object(PutObjectRequest { - bucket: "rust-docs-rs".into(), - key: bucket_path.clone(), - body: Some(content.clone().into()), - content_type: Some(mime.clone()), - ..Default::default() - }).sync(); - match s3_res { - // we've successfully uploaded the content, so steal it; - // we don't want to put it in the DB - Ok(_) => None, - // Since s3 was configured, we want to panic on failure to upload. - Err(e) => { - panic!("failed to upload to {}: {:?}", bucket_path, e) - }, + let content: Option> = if let Some(client) = &mut client { + let mut attempts = 0; + loop { + let s3_res = client.put_object(PutObjectRequest { + bucket: "rust-docs-rs".into(), + key: bucket_path.clone(), + body: Some(content.clone().into()), + content_type: Some(mime.clone()), + ..Default::default() + }).sync(); + attempts += 1; + match s3_res { + // we've successfully uploaded the content, so steal it; + // we don't want to put it in the DB + Ok(_) => break None, + // Since s3 was configured, we want to panic on failure to upload. + Err(e) => { + log::error!("failed to upload to {}: {:?}", bucket_path, e); + // Get a new client, in case the old one's connection is stale. + // AWS will kill our connection if it's alive for too long; this avoids + // that preventing us from building the crate entirely. + *client = s3_client().unwrap(); + if attempts > 3 { + panic!("failed to upload 3 times, exiting"); + } else { + continue; + } + }, + } } } else { Some(content.clone().into())