diff --git a/Cargo.lock b/Cargo.lock index 5d10e92..d68eff9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,6 +23,21 @@ dependencies = [ "futures-core", ] +[[package]] +name = "async-compression" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0122885821398cc923ece939e24d1056a2384ee719432397fa9db87230ff11" +dependencies = [ + "futures-core", + "futures-io", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", +] + [[package]] name = "async-executor" version = "1.5.1" @@ -141,8 +156,10 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" name = "bincache" version = "0.2.0" dependencies = [ + "async-compression", "async-std", "async-trait", + "futures-util", "paste", "thiserror", "tokio", @@ -187,6 +204,9 @@ name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +dependencies = [ + "jobserver", +] [[package]] name = "cfg-if" @@ -284,11 +304,44 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-macro" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.18", +] + +[[package]] +name = "futures-task" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" + +[[package]] +name = "futures-util" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +dependencies = [ + "futures-core", + "futures-io", + "futures-macro", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -342,6 +395,15 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "jobserver" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.63" @@ -362,9 +424,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.144" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "linux-raw-sys" @@ -374,9 +436,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "log" -version = "0.4.18" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" dependencies = [ "value-bag", ] @@ -427,6 +489,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" + [[package]] name = "polling" version = "2.8.0" @@ -445,9 +513,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -463,9 +531,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.19" +version = "0.37.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" dependencies = [ "bitflags", "errno", @@ -757,3 +825,33 @@ name = "windows_x86_64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "zstd" +version = "0.12.3+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "6.0.5+zstd.1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.8+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +dependencies = [ + "cc", + "libc", + "pkg-config", +] diff --git a/README.md b/README.md index 4c4e8b0..bbb596c 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ Bincache uses a strategy pattern to allow for different caching strategies: ```rust #[tokio::main(flavor = "current_thread")] async fn main() -> Result<(), Box> { - let mut cache = bincache::MemoryCacheBuilder::new().build()?; + let mut cache = bincache::MemoryCacheBuilder::default().build()?; // Put a key-value pair into the cache cache.put(&"foo", b"foo".to_vec()).await?; diff --git a/bincache/Cargo.toml b/bincache/Cargo.toml index b34edab..0a2bd3b 100644 --- a/bincache/Cargo.toml +++ b/bincache/Cargo.toml @@ -6,10 +6,11 @@ description = "ZitaneLabs binary cache." [features] default = ["implicit-blocking"] -implicit-blocking = [] -blocking = [] -rt_tokio_1 = ["dep:tokio"] -rt_async-std_1 = ["dep:async-std"] +implicit-blocking = ["dep:futures-util", "async-compression/futures-io"] +blocking = ["dep:futures-util", "async-compression/futures-io"] +rt_tokio_1 = ["dep:tokio", "async-compression/tokio"] +rt_async-std_1 = ["dep:async-std", "async-compression/futures-io"] +comp_zstd = ["async-compression/zstd"] [dependencies] paste = "1.0.12" @@ -17,6 +18,8 @@ thiserror = "1.0.40" async-trait = { version = "0.1" } tokio = { version = "1", features = ["rt", "fs", "io-util"], optional = true } async-std = { version = "1", optional = true } +async-compression = {version = "0.4.0"} +futures-util = { version = "0.3", features = ["io"], optional = true } [dev-dependencies] uuid = { version = "1.3.3", features = ["v4"] } diff --git a/bincache/src/builder.rs b/bincache/src/builder.rs index 9d4ea22..afae57d 100644 --- a/bincache/src/builder.rs +++ b/bincache/src/builder.rs @@ -1,5 +1,5 @@ mod cache_builder; mod error; -pub use cache_builder::CacheBuilder; +pub use cache_builder::*; pub use error::Error; diff --git a/bincache/src/builder/cache_builder.rs b/bincache/src/builder/cache_builder.rs index 4cfb409..c59dc0d 100644 --- a/bincache/src/builder/cache_builder.rs +++ b/bincache/src/builder/cache_builder.rs @@ -1,9 +1,8 @@ use std::hash::Hash; -use super::Error; - use crate::cache::Cache; -use crate::traits::{CacheKey, CacheStrategy}; +use crate::compression::{MaybeCompressor, Noop}; +use crate::traits::{CacheKey, CacheStrategy, CompressionStrategy}; use crate::Result; /// A builder for creating a new [Cache]. @@ -22,51 +21,128 @@ use crate::Result; /// cache.put("key", b"value".to_vec()).await.unwrap(); /// } /// ``` -#[derive(Debug)] -pub struct CacheBuilder { - strategy: Option, +#[derive(Debug, Default)] +pub struct CacheBuilder; + +pub struct CacheBuilderWithStrategy { + strategy: S, } -impl CacheBuilder { - /// Set the [CacheStrategy]. - pub fn with_strategy<_S>(self, strategy: _S) -> CacheBuilder<_S> - where - _S: CacheStrategy, - { - CacheBuilder { - strategy: Some(strategy), +impl Default for CacheBuilderWithStrategy +where + S: Default + CacheStrategy, +{ + fn default() -> Self { + CacheBuilderWithStrategy { + strategy: S::default(), + } + } +} + +pub struct CacheBuilderWithCompression { + compressor: C, +} + +impl Default for CacheBuilderWithCompression +where + C: Default + CompressionStrategy, +{ + fn default() -> Self { + CacheBuilderWithCompression { + compressor: C::default(), } } } -impl CacheBuilder +pub struct CacheBuilderWithCompressionAndStrategy { + strategy: S, + compressor: C, +} + +impl Default for CacheBuilderWithCompressionAndStrategy where - S: CacheStrategy + Default, + S: Default + CacheStrategy, + C: Default + CompressionStrategy, { - pub fn new() -> CacheBuilder { - CacheBuilder { - strategy: Some(S::default()), + fn default() -> Self { + CacheBuilderWithCompressionAndStrategy { + strategy: S::default(), + compressor: C::default(), + } + } +} + +impl CacheBuilder { + /// Add a strategy to the cache + pub fn with_strategy(self, strategy: S) -> CacheBuilderWithStrategy + where + S: CacheStrategy, + { + CacheBuilderWithStrategy { strategy } + } + + /// Add a compression algorithm to the cache + pub fn with_compression(self, compressor: C) -> CacheBuilderWithCompression + where + C: CompressionStrategy, + { + CacheBuilderWithCompression { compressor } + } +} + +impl CacheBuilderWithCompression { + /// Add a strategy to the cache + pub fn with_strategy(self, strategy: S) -> CacheBuilderWithCompressionAndStrategy { + { + CacheBuilderWithCompressionAndStrategy { + strategy, + compressor: self.compressor, + } } } } -impl CacheBuilder +impl CacheBuilderWithStrategy where S: CacheStrategy, { - /// Build the [Cache]. - pub fn build(self) -> Result> + /// Add a compression algorithm to the cache + pub fn with_compression(self, compressor: C) -> CacheBuilderWithCompressionAndStrategy + where + C: CompressionStrategy, + { + CacheBuilderWithCompressionAndStrategy { + strategy: self.strategy, + compressor, + } + } + + /// Build the cache without using compression + pub fn build(self) -> Result> where K: CacheKey + Eq + Hash + Sync + Send, { - Ok(Cache::new(self.strategy.ok_or(Error::NoStrategy)?)) + Ok(Cache::new( + self.strategy, + MaybeCompressor::::Passthrough, + )) } } -impl Default for CacheBuilder<()> { - /// Create a new [CacheBuilder] with the default configuration. - fn default() -> CacheBuilder<()> { - CacheBuilder { strategy: None } +impl CacheBuilderWithCompressionAndStrategy +where + S: CacheStrategy, + C: CompressionStrategy, +{ + pub fn build(self) -> Result> + where + K: CacheKey + Eq + Hash + Sync + Send, + C: CompressionStrategy + Sync + Send, + { + Ok(Cache::new( + self.strategy, + MaybeCompressor::Compressor(self.compressor), + )) } } @@ -82,8 +158,8 @@ mod tests { } async fn test_type_aliased() { - type NoopCacheBuilder = CacheBuilder; - _ = NoopCacheBuilder::new().build::().unwrap(); + type NoopCacheBuilder = CacheBuilderWithStrategy; + _ = NoopCacheBuilder::default().build::().unwrap(); } async fn test_key_inference() { diff --git a/bincache/src/cache.rs b/bincache/src/cache.rs index 003c1aa..0fee412 100644 --- a/bincache/src/cache.rs +++ b/bincache/src/cache.rs @@ -1,5 +1,8 @@ use crate::{ - traits::{CacheKey, CacheStrategy, FlushableStrategy, RecoverableStrategy}, + compression::MaybeCompressor, + traits::{ + CacheKey, CacheStrategy, CompressionStrategy, FlushableStrategy, RecoverableStrategy, + }, Result, }; @@ -7,25 +10,32 @@ use std::{borrow::Cow, collections::HashMap, hash::Hash}; /// Binary cache. #[derive(Debug)] -pub struct Cache +pub struct Cache where K: CacheKey + Eq + Hash, S: CacheStrategy, + C: CompressionStrategy + Sync + Send, { data: HashMap, strategy: S, + compressor: MaybeCompressor, } -impl Cache +impl Cache where K: CacheKey + Eq + Hash + Sync + Send, S: CacheStrategy, + C: CompressionStrategy + Sync + Send, { /// Create a new [Cache]. - pub fn new(strategy: S) -> Cache { + pub fn new(strategy: S, compressor: MaybeCompressor) -> Cache + where + C: CompressionStrategy + Sync + Send, + { Cache { data: HashMap::new(), strategy, + compressor, } } @@ -34,6 +44,8 @@ where where V: Into> + Send, { + let value: Cow<'_, [u8]> = self.compressor.compress(value.into()).await?; + let entry = self.strategy.put(&key, value).await?; self.data.insert(key, entry); Ok(()) @@ -42,13 +54,15 @@ where /// Get an entry from the cache. pub async fn get(&self, key: K) -> Result> { let entry = self.data.get(&key).ok_or(crate::Error::KeyNotFound)?; - self.strategy.get(entry).await + let value = self.strategy.get(entry).await?; + self.compressor.decompress(value).await } /// Take an entry from the cache, removing it. pub async fn take(&mut self, key: K) -> Result> { let entry = self.data.remove(&key).ok_or(crate::Error::KeyNotFound)?; - self.strategy.take(entry).await + let value = self.strategy.take(entry).await?; + Ok(self.compressor.decompress(value.into()).await?.into_owned()) } /// Delete an entry from the cache. @@ -63,10 +77,11 @@ where } } -impl Cache +impl Cache where K: CacheKey + Eq + Hash + Send, S: RecoverableStrategy + Send, + C: CompressionStrategy + Sync + Send, { /// Recover the cache from a previous state. /// Returns the number of recovered items. @@ -94,10 +109,11 @@ where } } -impl Cache +impl Cache where K: CacheKey + Eq + Hash + ToOwned + Sync + Send, S: FlushableStrategy, + C: CompressionStrategy + Sync + Send, { /// Flush entries to an underlying non-volatile storage. /// Returns the number of flushed items. diff --git a/bincache/src/compression.rs b/bincache/src/compression.rs new file mode 100644 index 0000000..1ba5752 --- /dev/null +++ b/bincache/src/compression.rs @@ -0,0 +1,11 @@ +mod compression_level; +mod maybe_compressor; +mod noop_compressor; +#[cfg(feature = "comp_zstd")] +mod zstd_compressor; + +pub use compression_level::CompressionLevel; +pub(crate) use maybe_compressor::MaybeCompressor; +pub(crate) use noop_compressor::Noop; +#[cfg(feature = "comp_zstd")] +pub use zstd_compressor::Zstd; diff --git a/bincache/src/compression/compression_level.rs b/bincache/src/compression/compression_level.rs new file mode 100644 index 0000000..937b7c1 --- /dev/null +++ b/bincache/src/compression/compression_level.rs @@ -0,0 +1,25 @@ +/// Compression level variants +#[derive(Debug, Clone, Copy)] +pub enum CompressionLevel { + /// Best compression level for the given compression algorithm + Best, + /// Default compression level for the given compression algorithm + Default, + /// Fastest compression level for the given compression algorithm + Fastest, + /// Specify a custom compression level, which will be clamped to the values + /// accepted by the underlying compression library. + Precise(i32), +} + +impl From for async_compression::Level { + fn from(val: CompressionLevel) -> Self { + use CompressionLevel::*; + match val { + Best => async_compression::Level::Best, + Default => async_compression::Level::Default, + Fastest => async_compression::Level::Fastest, + Precise(level) => async_compression::Level::Precise(level), + } + } +} diff --git a/bincache/src/compression/maybe_compressor.rs b/bincache/src/compression/maybe_compressor.rs new file mode 100644 index 0000000..44e7c31 --- /dev/null +++ b/bincache/src/compression/maybe_compressor.rs @@ -0,0 +1,41 @@ +use crate::Result; +use async_trait::async_trait; +use std::borrow::Cow; + +use crate::{compression::Noop, traits::CompressionStrategy}; + +/// Workaround for optional compression +#[derive(Debug)] +pub enum MaybeCompressor +where + T: CompressionStrategy + Sync + Send, +{ + Compressor(T), + Passthrough, +} + +#[async_trait] +impl CompressionStrategy for MaybeCompressor +where + T: CompressionStrategy + Sync + Send, +{ + async fn compress<'a>(&self, data: Cow<'a, [u8]>) -> Result> { + match self { + Self::Compressor(compressor) => compressor.compress(data).await, + Self::Passthrough => Noop::default().compress(data).await, + } + } + + async fn decompress<'a>(&self, data: Cow<'a, [u8]>) -> Result> { + match self { + Self::Compressor(compressor) => compressor.decompress(data).await, + Self::Passthrough => Noop::default().decompress(data).await, + } + } +} + +impl MaybeCompressor { + pub fn noop() -> Self { + MaybeCompressor::::Passthrough + } +} diff --git a/bincache/src/compression/noop_compressor.rs b/bincache/src/compression/noop_compressor.rs new file mode 100644 index 0000000..ea35450 --- /dev/null +++ b/bincache/src/compression/noop_compressor.rs @@ -0,0 +1,42 @@ +use crate::traits::CompressionStrategy; +use crate::Result; +use async_trait::async_trait; +use std::borrow::Cow; + +#[derive(Default, Debug)] +pub struct Noop; + +#[async_trait] +impl CompressionStrategy for Noop { + async fn compress<'a>(&self, data: Cow<'a, [u8]>) -> Result> { + Ok(data) + } + + async fn decompress<'a>(&self, data: Cow<'a, [u8]>) -> Result> { + Ok(data) + } +} + +#[cfg(test)] +mod tests { + use super::Noop; + use crate::{async_test, traits::CompressionStrategy}; + + fn create_arb_data(range: usize) -> Vec { + let mut vec = Vec::with_capacity(range); + for i in 0..range { + vec.push((i % 255) as u8); + } + vec + } + + async_test! { + async fn test_compression() { + let data = create_arb_data(1024); + let zstd = Noop; + let compressed = zstd.compress(data.clone().into()).await.unwrap(); + let decompressed = zstd.decompress(compressed).await.unwrap(); + assert_eq!(data.as_slice(), decompressed.as_ref()); + } + } +} diff --git a/bincache/src/compression/zstd_compressor.rs b/bincache/src/compression/zstd_compressor.rs new file mode 100644 index 0000000..257cea1 --- /dev/null +++ b/bincache/src/compression/zstd_compressor.rs @@ -0,0 +1,117 @@ +use super::compression_level::CompressionLevel; +use crate::traits::CompressionStrategy; +use crate::Result; +use async_trait::async_trait; +use std::borrow::Cow; + +/// A Compressor using Zstd +#[derive(Debug)] +pub struct Zstd { + level: CompressionLevel, +} + +impl Zstd { + /// Creates a new Zstd Compressor with the given compression level + pub fn new(level: CompressionLevel) -> Self { + Zstd { level } + } +} + +impl Default for Zstd { + /// Creates a new Zstd Compressor with the default compression level + fn default() -> Self { + Zstd { + level: CompressionLevel::Default, + } + } +} + +#[async_trait] +impl CompressionStrategy for Zstd { + async fn compress<'a>(&self, data: Cow<'a, [u8]>) -> Result> { + #[cfg(feature = "rt_tokio_1")] + { + use async_compression::tokio::write; + use tokio::io::AsyncWriteExt; + let mut encoder = + write::ZstdEncoder::with_quality(Vec::with_capacity(data.len()), self.level.into()); + encoder.write_all(data.as_ref()).await?; + encoder.shutdown().await?; + return Ok(encoder.into_inner().into()); + } + #[cfg(any(feature = "blocking", feature = "implicit-blocking"))] + { + use async_compression::futures::write; + use futures_util::AsyncWriteExt; + let mut encoder = + write::ZstdEncoder::with_quality(Vec::with_capacity(data.len()), self.level.into()); + encoder.write_all(data.as_ref()).await?; + encoder.close().await?; + return Ok(encoder.into_inner().into()); + } + #[cfg(feature = "rt_async-std_1")] + { + use async_compression::futures::write; + use async_std::io::WriteExt; + let mut encoder = + write::ZstdEncoder::with_quality(Vec::with_capacity(data.len()), self.level.into()); + encoder.write_all(data.as_ref()).await?; + encoder.flush().await?; + return Ok(encoder.into_inner().into()); + } + } + + async fn decompress<'a>(&self, data: Cow<'a, [u8]>) -> Result> { + #[cfg(feature = "rt_tokio_1")] + { + use async_compression::tokio::write; + use tokio::io::AsyncWriteExt; + let mut encoder = write::ZstdDecoder::new(Vec::with_capacity(data.len())); + encoder.write_all(data.as_ref()).await?; + encoder.shutdown().await?; + return Ok(encoder.into_inner().into()); + } + #[cfg(any(feature = "blocking", feature = "implicit-blocking"))] + { + use async_compression::futures::write; + use futures_util::AsyncWriteExt; + let mut encoder = write::ZstdDecoder::new(Vec::with_capacity(data.len())); + encoder.write_all(data.as_ref()).await?; + encoder.close().await?; + return Ok(encoder.into_inner().into()); + } + #[cfg(feature = "rt_async-std_1")] + { + use async_compression::futures::write; + use async_std::io::WriteExt; + let mut encoder = write::ZstdDecoder::new(Vec::with_capacity(data.len())); + encoder.write_all(data.as_ref()).await?; + encoder.flush().await?; + return Ok(encoder.into_inner().into()); + } + } +} + +#[cfg(test)] +mod tests { + use super::Zstd; + use crate::{async_test, traits::CompressionStrategy}; + + fn create_arb_data(range: usize) -> Vec { + let mut vec = Vec::with_capacity(range); + for i in 0..range { + vec.push((i % 255) as u8); + } + vec + } + + async_test! { + async fn test_compression() { + let data = create_arb_data(1024); + let zstd = Zstd::default(); + let compressed = zstd.compress(data.clone().into()).await.unwrap(); + let decompressed = zstd.decompress(compressed).await.unwrap(); + assert_eq!(data.as_slice(), decompressed.as_ref()); + } + } +} diff --git a/bincache/src/lib.rs b/bincache/src/lib.rs index d5a78ac..ed30f62 100644 --- a/bincache/src/lib.rs +++ b/bincache/src/lib.rs @@ -27,7 +27,7 @@ //! //! # #[tokio::main(flavor = "current_thread")] //! # async fn main() -> Result<(), Box> { -//! let mut cache = MemoryCacheBuilder::new().build()?; +//! let mut cache = MemoryCacheBuilder::default().build()?; //! cache.put("key", b"value".to_vec()).await?; //! # Ok(()) //! # } @@ -72,6 +72,7 @@ compile_error!("Cannot enable multiple async runtime features at the same time." mod builder; mod cache; +pub mod compression; pub mod error; mod macros; pub mod strategies; diff --git a/bincache/src/macros.rs b/bincache/src/macros.rs index 4bc0e61..3268977 100644 --- a/bincache/src/macros.rs +++ b/bincache/src/macros.rs @@ -2,9 +2,9 @@ macro_rules! reexport_strategy { ($strategy:ident) => { paste::paste! { #[doc = concat!("A [Cache] using the [", stringify!($strategy), "Strategy].")] - pub type [<$strategy Cache>] = $crate::Cache; + pub type [<$strategy Cache>] = $crate::Cache; #[doc = concat!("A [CacheBuilder] using the [", stringify!($strategy), "Strategy].")] - pub type [<$strategy CacheBuilder>] = $crate::CacheBuilder<$crate::strategies::$strategy>; + pub type [<$strategy CacheBuilder>] = $crate::builder::CacheBuilderWithStrategy<$crate::strategies::$strategy>; pub use $crate::strategies::$strategy as [<$strategy Strategy>]; const _: () = { diff --git a/bincache/src/strategies/disk.rs b/bincache/src/strategies/disk.rs index b288c75..52ff5f8 100644 --- a/bincache/src/strategies/disk.rs +++ b/bincache/src/strategies/disk.rs @@ -178,12 +178,12 @@ impl RecoverableStrategy for Disk { #[cfg(test)] mod tests { use super::{Disk, LIMIT_KIND_BYTE, LIMIT_KIND_ENTRY}; - use crate::{async_test, utils::test::TempDir, Cache, Error}; + use crate::{async_test, compression::MaybeCompressor, utils::test::TempDir, Cache, Error}; async_test! { async fn test_default() { let temp_dir = TempDir::new(); - let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), None, None)); + let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), None, None), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); @@ -213,7 +213,7 @@ mod tests { async fn test_strategy_with_byte_limit() { let temp_dir = TempDir::new(); - let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), Some(6), None)); + let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), Some(6), None), MaybeCompressor::noop()); let foo_data = b"foo".to_vec(); let bar_data = b"bar".to_vec(); @@ -242,7 +242,7 @@ mod tests { async fn test_strategy_with_entry_limit() { let temp_dir = TempDir::new(); - let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), None, Some(3))); + let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), None, Some(3)), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); cache.put("bar", b"bar".to_vec()).await.unwrap(); @@ -266,7 +266,7 @@ mod tests { // populate cache { - let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), None, None)); + let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), None, None), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); cache.put("bar", b"bar".to_vec()).await.unwrap(); @@ -274,7 +274,7 @@ mod tests { // recover cache { - let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), None, None)); + let mut cache = Cache::new(Disk::new(temp_dir.as_ref(), None, None), MaybeCompressor::noop()); let recovered_items = cache .recover(|k| Some(k.to_string())) .await diff --git a/bincache/src/strategies/hybrid.rs b/bincache/src/strategies/hybrid.rs index 30b029f..9d693e6 100644 --- a/bincache/src/strategies/hybrid.rs +++ b/bincache/src/strategies/hybrid.rs @@ -321,12 +321,12 @@ mod tests { use std::fs::metadata; use super::{Hybrid, Limits, LIMIT_KIND_BYTE_DISK, LIMIT_KIND_ENTRY_DISK}; - use crate::{async_test, utils::test::TempDir, Cache, Error}; + use crate::{async_test, compression::MaybeCompressor, utils::test::TempDir, Cache, Error}; async_test! { async fn test_default_strategy() { // We don't need a temp dir here, because we don't write to disk - let mut cache = Cache::new(Hybrid::default()); + let mut cache = Cache::new(Hybrid::default(), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); @@ -369,7 +369,7 @@ mod tests { temp_dir.as_ref(), Limits::new(Some(6), None), Limits::default(), - )); + ), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); cache.put("bar", b"bar".to_vec()).await.unwrap(); @@ -389,7 +389,7 @@ mod tests { temp_dir.as_ref(), Limits::new(None, Some(2)), Limits::default(), - )); + ), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); cache.put("bar", b"bar".to_vec()).await.unwrap(); @@ -409,7 +409,7 @@ mod tests { temp_dir.as_ref(), Limits::new(Some(6), None), Limits::new(Some(6), None), - )); + ), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); cache.put("bar", b"bar".to_vec()).await.unwrap(); @@ -443,7 +443,7 @@ mod tests { temp_dir.as_ref(), Limits::new(None, Some(2)), Limits::new(None, Some(2)), - )); + ), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); cache.put("bar", b"bar".to_vec()).await.unwrap(); @@ -479,7 +479,7 @@ mod tests { temp_dir.as_ref(), Limits::new(None, Some(1)), Limits::default(), - )); + ), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); cache.put("bar", b"bar".to_vec()).await.unwrap(); @@ -492,7 +492,7 @@ mod tests { temp_dir.as_ref(), Limits::default(), Limits::default(), - )); + ), MaybeCompressor::noop()); let recovered_items = cache .recover(|k| Some(k.to_string())) .await @@ -510,7 +510,7 @@ mod tests { temp_dir.as_ref(), Limits::default(), Limits::default(), - )); + ), MaybeCompressor::noop()); cache.put("foo", b"foo".as_slice()).await.unwrap(); cache.put("bar", b"bar".as_slice()).await.unwrap(); diff --git a/bincache/src/strategies/memory.rs b/bincache/src/strategies/memory.rs index dff458b..1fb78ae 100644 --- a/bincache/src/strategies/memory.rs +++ b/bincache/src/strategies/memory.rs @@ -101,11 +101,11 @@ impl CacheStrategy for Memory { #[cfg(test)] mod tests { use super::{Memory, LIMIT_KIND_BYTE, LIMIT_KIND_ENTRY}; - use crate::{async_test, Cache, Error}; + use crate::{async_test, compression::MaybeCompressor, Cache, Error}; async_test! { async fn test_default_strategy() { - let mut cache = Cache::new(Memory::default()); + let mut cache = Cache::new(Memory::default(), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); @@ -134,7 +134,7 @@ mod tests { } async fn test_strategy_with_byte_limit() { - let mut cache = Cache::new(Memory::new(Some(6), None)); + let mut cache = Cache::new(Memory::new(Some(6), None), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); cache.put("bar", b"bar".to_vec()).await.unwrap(); @@ -153,7 +153,7 @@ mod tests { } async fn test_strategy_with_entry_limit() { - let mut cache = Cache::new(Memory::new(None, Some(3))); + let mut cache = Cache::new(Memory::new(None, Some(3)), MaybeCompressor::noop()); cache.put("foo", b"foo".to_vec()).await.unwrap(); cache.put("bar", b"bar".to_vec()).await.unwrap(); diff --git a/bincache/src/traits.rs b/bincache/src/traits.rs index cc24ddd..550ac0a 100644 --- a/bincache/src/traits.rs +++ b/bincache/src/traits.rs @@ -1,9 +1,11 @@ mod cache_key; mod cache_strategy; +mod compression_strategy; mod flushable_strategy; mod recoverable_strategy; pub use cache_key::CacheKey; pub use cache_strategy::CacheStrategy; +pub use compression_strategy::CompressionStrategy; pub use flushable_strategy::FlushableStrategy; pub use recoverable_strategy::RecoverableStrategy; diff --git a/bincache/src/traits/compression_strategy.rs b/bincache/src/traits/compression_strategy.rs new file mode 100644 index 0000000..20e4809 --- /dev/null +++ b/bincache/src/traits/compression_strategy.rs @@ -0,0 +1,13 @@ +use std::borrow::Cow; + +use crate::Result; +use async_trait::async_trait; + +/// A compression strategy. +#[async_trait] +pub trait CompressionStrategy: std::fmt::Debug { + /// Compress binary data + async fn compress<'a>(&self, data: Cow<'a, [u8]>) -> Result>; + /// Decompress binary data + async fn decompress<'a>(&self, value: Cow<'a, [u8]>) -> Result>; +}