Skip to content

Commit

Permalink
Merge torrust#117: Clear clippy warnings
Browse files Browse the repository at this point in the history
f0d3da5 dev: fix remaning clippy warnings 2 (Cameron Garnham)
da91f97 dev: fix remaning clippy warnings (Cameron Garnham)
493adbb dev: fix clippy warnings for: src/tracker.rs (Cameron Garnham)
6d5e2ed dev: fix clippy warnings for: src/routes/user.rs (Cameron Garnham)
33c3218 dev: fix clippy warnings for: src/routes/torrent.rs (Cameron Garnham)
c20608f dev: fix clippy warnings for: src/routes/settings.rs (Cameron Garnham)
7277e4e dev: fix clippy warnings for: src/routes/mod.rs (Cameron Garnham)
fd75fd4 dev: fix clippy warnings for: src/routes/category.rs (Cameron Garnham)
21493b0 dev: fix clippy warnings for: src/routes/about.rs (Cameron Garnham)
f1b3663 dev: fix clippy warnings for: src/models/user.rs (Cameron Garnham)
a0947d0 dev: fix clippy warnings for: src/models/torrent.rs (Cameron Garnham)
269cd28 dev: fix clippy warnings for: src/models/torrent_file.rs (Cameron Garnham)
eb3dd11 dev: fix clippy warnings for: src/models/response.rs (Cameron Garnham)
b737f10 dev: fix clippy warnings for: src/mailer.rs (Cameron Garnham)
3e7a917 dev: fix clippy warnings for: src/lib.rs (Cameron Garnham)
7f79fa9 dev: fix clippy warnings for: src/errors.rs (Cameron Garnham)
578b213 dev: fix clippy warnings for: src/databases/sqlite.rs (Cameron Garnham)
593ac6f dev: fix clippy warnings for: src/databases/mysql.rs (Cameron Garnham)
ebc360e dev: fix clippy warnings for: src/databases/database.rs (Cameron Garnham)
7b28120 dev: fix clippy warnings for: src/console/commands/import_tracker_statistics.rs (Cameron Garnham)
836d53f dev: fix clippy warnings for: src/config.rs (Cameron Garnham)
a741a22 dev: fix clippy warnings for: src/auth.rs (Cameron Garnham)
4a70ee0 dev: apply clippy auto-fixes (Cameron Garnham)

Pull request description:

ACKs for top commit:
  josecelano:
    ACK f0d3da5
  da2ce7:
    ACK f0d3da5

Tree-SHA512: 5deb188c95e40272f168124460843456c54ff947e44861b45f40614f4b5ddc16d2e47e28b298d0e9eb0898d0c9021853607d5848a2f4b2a71740a4bd3c5362fa
  • Loading branch information
da2ce7 committed May 10, 2023
2 parents f1270ff + f0d3da5 commit d3762ff
Show file tree
Hide file tree
Showing 54 changed files with 1,240 additions and 701 deletions.
1 change: 1 addition & 0 deletions project-words.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ hexlify
httpseeds
imagoodboy
imdl
indexmap
infohash
jsonwebtoken
leechers
Expand Down
14 changes: 6 additions & 8 deletions src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,9 @@ use crate::bootstrap::logging;
use crate::cache::image::manager::ImageCacheService;
use crate::common::AppData;
use crate::config::Configuration;
use crate::databases::database::connect_database;
use crate::mailer::MailerService;
use crate::routes;
use crate::tracker::service::Service;
use crate::databases::database;
use crate::tracker::statistics_importer::StatisticsImporter;
use crate::{mailer, routes, tracker};

pub struct Running {
pub api_server: Server,
Expand Down Expand Up @@ -43,12 +41,12 @@ pub async fn run(configuration: Configuration) -> Running {

// Build app dependencies

let database = Arc::new(connect_database(&database_connect_url).await.expect("Database error."));
let database = Arc::new(database::connect(&database_connect_url).await.expect("Database error."));
let auth = Arc::new(AuthorizationService::new(cfg.clone(), database.clone()));
let tracker_service = Arc::new(Service::new(cfg.clone(), database.clone()).await);
let tracker_service = Arc::new(tracker::service::Service::new(cfg.clone(), database.clone()).await);
let tracker_statistics_importer =
Arc::new(StatisticsImporter::new(cfg.clone(), tracker_service.clone(), database.clone()).await);
let mailer_service = Arc::new(MailerService::new(cfg.clone()).await);
let mailer_service = Arc::new(mailer::Service::new(cfg.clone()).await);
let image_cache_service = Arc::new(ImageCacheService::new(cfg.clone()).await);

// Build app container
Expand Down Expand Up @@ -92,7 +90,7 @@ pub async fn run(configuration: Configuration) -> Running {
.wrap(Cors::permissive())
.app_data(web::Data::new(app_data.clone()))
.wrap(middleware::Logger::default())
.configure(routes::init_routes)
.configure(routes::init)
})
.bind((ip, net_port))
.expect("can't bind server to socket address");
Expand Down
38 changes: 29 additions & 9 deletions src/auth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use crate::config::Configuration;
use crate::databases::database::Database;
use crate::errors::ServiceError;
use crate::models::user::{UserClaims, UserCompact};
use crate::utils::clock::current_time;
use crate::utils::clock;

pub struct AuthorizationService {
cfg: Arc<Configuration>,
Expand All @@ -19,19 +19,25 @@ impl AuthorizationService {
AuthorizationService { cfg, database }
}

/// Create Json Web Token
pub async fn sign_jwt(&self, user: UserCompact) -> String {
let settings = self.cfg.settings.read().await;

// create JWT that expires in two weeks
let key = settings.auth.secret_key.as_bytes();
// TODO: create config option for setting the token validity in seconds
let exp_date = current_time() + 1_209_600; // two weeks from now
let exp_date = clock::now() + 1_209_600; // two weeks from now

let claims = UserClaims { user, exp: exp_date };

encode(&Header::default(), &claims, &EncodingKey::from_secret(key)).unwrap()
encode(&Header::default(), &claims, &EncodingKey::from_secret(key)).expect("argument `Header` should match `EncodingKey`")
}

/// Verify Json Web Token
///
/// # Errors
///
/// This function will return an error if the JWT is not good or expired.
pub async fn verify_jwt(&self, token: &str) -> Result<UserClaims, ServiceError> {
let settings = self.cfg.settings.read().await;

Expand All @@ -41,7 +47,7 @@ impl AuthorizationService {
&Validation::new(Algorithm::HS256),
) {
Ok(token_data) => {
if token_data.claims.exp < current_time() {
if token_data.claims.exp < clock::now() {
return Err(ServiceError::TokenExpired);
}
Ok(token_data.claims)
Expand All @@ -50,12 +56,21 @@ impl AuthorizationService {
}
}

/// Get Claims from Request
///
/// # Errors
///
/// This function will return an `ServiceError::TokenNotFound` if `HeaderValue` is `None`
/// This function will pass through the `ServiceError::TokenInvalid` if unable to verify the JWT.
pub async fn get_claims_from_request(&self, req: &HttpRequest) -> Result<UserClaims, ServiceError> {
let _auth = req.headers().get("Authorization");
match _auth {
Some(_) => {
let _split: Vec<&str> = _auth.unwrap().to_str().unwrap().split("Bearer").collect();
let token = _split[1].trim();
match req.headers().get("Authorization") {
Some(auth) => {
let split: Vec<&str> = auth
.to_str()
.expect("variable `auth` contains data that is not visible ASCII chars.")
.split("Bearer")
.collect();
let token = split[1].trim();

match self.verify_jwt(token).await {
Ok(claims) => Ok(claims),
Expand All @@ -66,6 +81,11 @@ impl AuthorizationService {
}
}

/// Get User (in compact form) from Request
///
/// # Errors
///
/// This function will return an `ServiceError::UserNotFound` if unable to get user from database.
pub async fn get_user_compact_from_request(&self, req: &HttpRequest) -> Result<UserCompact, ServiceError> {
let claims = self.get_claims_from_request(req).await?;

Expand Down
4 changes: 2 additions & 2 deletions src/bin/upgrade.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
//! It updates the application from version v1.0.0 to v2.0.0.
//! You can execute it with: `cargo run --bin upgrade ./data.db ./data_v2.db ./uploads`

use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::run_upgrader;
use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::run;

#[actix_web::main]
async fn main() {
run_upgrader().await;
run().await;
}
4 changes: 4 additions & 0 deletions src/cache/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ pub struct BytesCache {
}

impl BytesCache {
#[must_use]
pub fn new() -> Self {
Self {
bytes_table: IndexMap::new(),
Expand All @@ -36,6 +37,7 @@ impl BytesCache {
}

// With a total capacity in bytes.
#[must_use]
pub fn with_capacity(capacity: usize) -> Self {
let mut new = Self::new();

Expand All @@ -45,6 +47,7 @@ impl BytesCache {
}

// With a limit for individual entry sizes.
#[must_use]
pub fn with_entry_size_limit(entry_size_limit: usize) -> Self {
let mut new = Self::new();

Expand Down Expand Up @@ -77,6 +80,7 @@ impl BytesCache {
}

// Size of all the entry bytes combined.
#[must_use]
pub fn total_size(&self) -> usize {
let mut size: usize = 0;

Expand Down
60 changes: 36 additions & 24 deletions src/cache/image/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::time::{Duration, SystemTime};
use bytes::Bytes;
use tokio::sync::RwLock;

use crate::cache::cache::BytesCache;
use crate::cache::BytesCache;
use crate::config::Configuration;
use crate::models::user::UserCompact;

Expand All @@ -19,11 +19,12 @@ pub enum Error {

type UserQuotas = HashMap<i64, ImageCacheQuota>;

#[must_use]
pub fn now_in_secs() -> u64 {
match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(n) => n.as_secs(),
Err(_) => panic!("SystemTime before UNIX EPOCH!"),
}
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("SystemTime before UNIX EPOCH!")
.as_secs()
}

#[derive(Clone)]
Expand All @@ -36,6 +37,7 @@ pub struct ImageCacheQuota {
}

impl ImageCacheQuota {
#[must_use]
pub fn new(user_id: i64, max_usage: usize, period_secs: u64) -> Self {
Self {
user_id,
Expand All @@ -46,14 +48,19 @@ impl ImageCacheQuota {
}
}

pub fn add_usage(&mut self, amount: usize) -> Result<(), ()> {
/// Add Usage Quota
///
/// # Errors
///
/// This function will return a `Error::UserQuotaMet` if user quota has been met.
pub fn add_usage(&mut self, amount: usize) -> Result<(), Error> {
// Check if quota needs to be reset.
if now_in_secs() - self.date_start_secs > self.period_secs {
self.reset();
}

if self.is_reached() {
return Err(());
return Err(Error::UserQuotaMet);
}

self.usage = self.usage.saturating_add(amount);
Expand All @@ -66,6 +73,7 @@ impl ImageCacheQuota {
self.date_start_secs = now_in_secs();
}

#[must_use]
pub fn is_reached(&self) -> bool {
self.usage >= self.max_usage
}
Expand All @@ -89,7 +97,7 @@ impl ImageCacheService {
let reqwest_client = reqwest::Client::builder()
.timeout(Duration::from_millis(settings.image_cache.max_request_timeout_ms))
.build()
.unwrap();
.expect("unable to build client request");

drop(settings);

Expand All @@ -103,33 +111,37 @@ impl ImageCacheService {

/// Get an image from the url and insert it into the cache if it isn't cached already.
/// Unauthenticated users can only get already cached images.
///
/// # Errors
///
/// Return a `Error::Unauthenticated` if the user has not been authenticated.
pub async fn get_image_by_url(&self, url: &str, opt_user: Option<UserCompact>) -> Result<Bytes, Error> {
if let Some(entry) = self.image_cache.read().await.get(url).await {
return Ok(entry.bytes);
}

if opt_user.is_none() {
return Err(Error::Unauthenticated);
}

let user = opt_user.unwrap();
match opt_user {
None => Err(Error::Unauthenticated),

self.check_user_quota(&user).await?;
Some(user) => {
self.check_user_quota(&user).await?;

let image_bytes = self.get_image_from_url_as_bytes(url).await?;
let image_bytes = self.get_image_from_url_as_bytes(url).await?;

self.check_image_size(&image_bytes).await?;
self.check_image_size(&image_bytes).await?;

// These two functions could be executed after returning the image to the client,
// but than we would need a dedicated task or thread that executes these functions.
// This can be problematic if a task is spawned after every user request.
// Since these functions execute very fast, I don't see a reason to further optimize this.
// For now.
self.update_image_cache(url, &image_bytes).await?;
// These two functions could be executed after returning the image to the client,
// but than we would need a dedicated task or thread that executes these functions.
// This can be problematic if a task is spawned after every user request.
// Since these functions execute very fast, I don't see a reason to further optimize this.
// For now.
self.update_image_cache(url, &image_bytes).await?;

self.update_user_quota(&user, image_bytes.len()).await?;
self.update_user_quota(&user, image_bytes.len()).await?;

Ok(image_bytes)
Ok(image_bytes)
}
}
}

async fn get_image_from_url_as_bytes(&self, url: &str) -> Result<Bytes, Error> {
Expand Down
Loading

0 comments on commit d3762ff

Please sign in to comment.