Skip to content

store & show build logs for non-default targets #2427

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Feb 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ jobs:

- name: run slow tests
env:
DOCSRS_INCLUDE_DEFAULT_TARGETS: false
DOCSRS_INCLUDE_DEFAULT_TARGETS: true
run: |
cargo test --locked -- --ignored --test-threads=1

Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

23 changes: 23 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ hex = "0.4.3"
# Async
tokio = { version = "1.0", features = ["rt-multi-thread", "signal", "macros"] }
futures-util = "0.3.5"
async-stream = "0.3.5"
aws-config = "1.0.0"
aws-sdk-s3 = "1.3.0"
aws-sdk-cloudfront = "1.3.0"
Expand Down
28 changes: 21 additions & 7 deletions src/docbuilder/rustwide_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,7 @@ impl RustwideBuilder {
}

let mut algs = HashSet::new();
let mut target_build_logs = HashMap::new();
if has_docs {
debug!("adding documentation for the default target to the database");
self.copy_docs(
Expand All @@ -493,14 +494,15 @@ impl RustwideBuilder {
// Limit the number of targets so that no one can try to build all 200000 possible targets
for target in other_targets.into_iter().take(limits.targets()) {
debug!("building package {} {} for {}", name, version, target);
self.build_target(
let target_res = self.build_target(
target,
build,
&limits,
local_storage.path(),
&mut successful_targets,
&metadata,
)?;
target_build_logs.insert(target, target_res.build_log);
}
let (_, new_alg) = self.runtime.block_on(add_path_into_remote_archive(
&self.async_storage,
Expand Down Expand Up @@ -588,6 +590,10 @@ impl RustwideBuilder {
))?;
let build_log_path = format!("build-logs/{build_id}/{default_target}.txt");
self.storage.store_one(build_log_path, res.build_log)?;
for (target, log) in target_build_logs {
let build_log_path = format!("build-logs/{build_id}/{target}.txt");
self.storage.store_one(build_log_path, log)?;
}

// Some crates.io crate data is mutable, so we proactively update it during a release
if !is_local {
Expand Down Expand Up @@ -640,7 +646,7 @@ impl RustwideBuilder {
local_storage: &Path,
successful_targets: &mut Vec<String>,
metadata: &Metadata,
) -> Result<()> {
) -> Result<FullBuildResult> {
let target_res = self.execute_build(target, false, build, limits, metadata, false)?;
if target_res.result.successful {
// Cargo is not giving any error and not generating documentation of some crates
Expand All @@ -652,7 +658,7 @@ impl RustwideBuilder {
successful_targets.push(target.to_string());
}
}
Ok(())
Ok(target_res)
}

fn get_coverage(
Expand Down Expand Up @@ -981,25 +987,26 @@ mod tests {

// check release record in the db (default and other targets)
let mut conn = env.db().conn();
let rows = conn
.query(
let row = conn
.query_one(
"SELECT
r.rustdoc_status,
r.default_target,
r.doc_targets,
r.archive_storage,
cov.total_items
cov.total_items,
b.id as build_id
FROM
crates as c
INNER JOIN releases AS r ON c.id = r.crate_id
INNER JOIN builds as b ON r.id = b.rid
LEFT OUTER JOIN doc_coverage AS cov ON r.id = cov.release_id
WHERE
c.name = $1 AND
r.version = $2",
&[&crate_, &version],
)
.unwrap();
let row = rows.first().unwrap();

assert!(row.get::<_, bool>("rustdoc_status"));
assert_eq!(row.get::<_, String>("default_target"), default_target);
Expand Down Expand Up @@ -1086,6 +1093,13 @@ mod tests {

assert!(target_docs_present);
assert_success(&target_url, web)?;

assert!(storage
.exists(&format!(
"build-logs/{}/{target}.txt",
row.get::<_, i32>("build_id")
))
.unwrap());
}
}

Expand Down
24 changes: 19 additions & 5 deletions src/storage/database.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
use super::{Blob, FileRange};
use crate::{db::Pool, error::Result, InstanceMetrics};
use chrono::{DateTime, Utc};
use futures_util::stream::{Stream, TryStreamExt};
use sqlx::Acquire;

use super::{Blob, FileRange};
use crate::db::Pool;
use crate::error::Result;
use crate::InstanceMetrics;
use std::{convert::TryFrom, sync::Arc};

pub(crate) struct DatabaseBackend {
Expand Down Expand Up @@ -167,6 +165,22 @@ impl DatabaseBackend {
Ok(())
}

pub(super) async fn list_prefix<'a>(
&'a self,
prefix: &'a str,
) -> impl Stream<Item = Result<String>> + 'a {
sqlx::query!(
"SELECT path
FROM files
WHERE path LIKE $1
ORDER BY path;",
format!("{}%", prefix.replace('%', "\\%"))
)
.fetch(&self.pool)
.map_err(Into::into)
.map_ok(|row| row.path)
}

pub(crate) async fn delete_prefix(&self, prefix: &str) -> Result<()> {
sqlx::query!(
"DELETE FROM files WHERE path LIKE $1;",
Expand Down
70 changes: 65 additions & 5 deletions src/storage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,20 @@ mod s3;
pub use self::compression::{compress, decompress, CompressionAlgorithm, CompressionAlgorithms};
use self::database::DatabaseBackend;
use self::s3::S3Backend;
use crate::error::Result;
use crate::web::metrics::RenderingTimesRecorder;
use crate::{db::Pool, utils::spawn_blocking, Config, InstanceMetrics};
use crate::{
db::Pool, error::Result, utils::spawn_blocking, web::metrics::RenderingTimesRecorder, Config,
InstanceMetrics,
};
use anyhow::{anyhow, ensure};
use chrono::{DateTime, Utc};
use fn_error_context::context;
use futures_util::stream::BoxStream;
use path_slash::PathExt;
use std::io::BufReader;
use std::{
collections::{HashMap, HashSet},
ffi::OsStr,
fmt, fs, io,
fmt, fs,
io::{self, BufReader},
ops::RangeInclusive,
path::{Path, PathBuf},
sync::Arc,
Expand Down Expand Up @@ -558,6 +560,16 @@ impl AsyncStorage {
}
}

pub(super) async fn list_prefix<'a>(
&'a self,
prefix: &'a str,
) -> BoxStream<'a, Result<String>> {
match &self.backend {
StorageBackend::Database(db) => Box::pin(db.list_prefix(prefix).await),
StorageBackend::S3(s3) => Box::pin(s3.list_prefix(prefix).await),
}
}

pub(crate) async fn delete_prefix(&self, prefix: &str) -> Result<()> {
match &self.backend {
StorageBackend::Database(db) => db.delete_prefix(prefix).await,
Expand Down Expand Up @@ -752,6 +764,22 @@ impl Storage {
self.runtime.block_on(self.inner.store_one(path, content))
}

/// sync wrapper for the list_prefix function
/// purely for testing purposes since it collects all files into a Vec.
#[cfg(test)]
pub(crate) fn list_prefix(&self, prefix: &str) -> impl Iterator<Item = Result<String>> {
use futures_util::stream::StreamExt;
self.runtime
.block_on(async {
self.inner
.list_prefix(prefix)
.await
.collect::<Vec<_>>()
.await
})
.into_iter()
}

pub(crate) fn delete_prefix(&self, prefix: &str) -> Result<()> {
self.runtime.block_on(self.inner.delete_prefix(prefix))
}
Expand Down Expand Up @@ -964,6 +992,37 @@ mod backend_tests {
Ok(())
}

fn test_list_prefix(storage: &Storage) -> Result<()> {
static FILENAMES: &[&str] = &["baz.txt", "some/bar.txt"];

storage.store_blobs(
FILENAMES
.iter()
.map(|&filename| Blob {
path: filename.into(),
mime: "text/plain".into(),
date_updated: Utc::now(),
compression: None,
content: b"test content\n".to_vec(),
})
.collect(),
)?;

assert_eq!(
storage.list_prefix("").collect::<Result<Vec<String>>>()?,
FILENAMES
);

assert_eq!(
storage
.list_prefix("some/")
.collect::<Result<Vec<String>>>()?,
&["some/bar.txt"]
);

Ok(())
}

fn test_too_long_filename(storage: &Storage) -> Result<()> {
// minio returns ErrKeyTooLongError when the key is over 1024 bytes long.
// When testing, minio just gave me `XMinioInvalidObjectName`, so I'll check that too.
Expand Down Expand Up @@ -1321,6 +1380,7 @@ mod backend_tests {
test_get_range,
test_get_too_big,
test_too_long_filename,
test_list_prefix,
test_delete_prefix,
test_delete_prefix_without_matches,
test_delete_percent,
Expand Down
Loading