Skip to content

Commit

Permalink
Release version 0.7.0 (rust-ml#320)
Browse files Browse the repository at this point in the history
* Update CHANGELOG

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* (cargo-release) version 0.7.0

* Add news entry

* Fix docs

* Add date to changelog
  • Loading branch information
YuhanLiin authored Oct 16, 2023
1 parent f9f7dd4 commit 34d1c84
Show file tree
Hide file tree
Showing 24 changed files with 106 additions and 73 deletions.
15 changes: 15 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,18 @@
Version 0.7.0 - 2023-10-15
========================
Changes
-----------
* add `array_from_gz_csv` and `array_from_csv` in `linfa-datasets`
* make Serde support in `linfa-linear`, `linfa-logistic`, and `linfa-ftrl` optional
* bump `argmin` to 0.8.1
* add Serde support to `linfa-preprocessing` and `linfa-bayes`
* make licenses follow SPDX 2.1 license expression standard

Removals
-----------
* Removed Approximate DBSCAN from `linfa-clustering` due to performance issues. It's now an alias to regular DBSCAN.
* Removed `partitions` dependency, which breaks in current versions of Rust.

Version 0.6.1 - 2022-12-03
========================
New Algorithms
Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa"
version = "0.6.1"
version = "0.7.0"
authors = [
"Luca Palmieri <rust@lpalmieri.com>",
"Lorenz Schmidt <bytesnake@mailbox.org>",
Expand Down
6 changes: 3 additions & 3 deletions algorithms/linfa-bayes/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-bayes"
version = "0.6.1"
version = "0.7.0"
authors = ["VasanthakumarV <vasanth260m12@gmail.com>"]
description = "Collection of Naive Bayes Algorithms"
edition = "2018"
Expand All @@ -25,8 +25,8 @@ ndarray = { version = "0.15" , features = ["approx"]}
ndarray-stats = "0.5"
thiserror = "1.0"

linfa = { version = "0.6.1", path = "../.." }
linfa = { version = "0.7.0", path = "../.." }

[dev-dependencies]
approx = "0.4"
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["winequality"] }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["winequality"] }
10 changes: 5 additions & 5 deletions algorithms/linfa-clustering/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-clustering"
version = "0.6.1"
version = "0.7.0"
edition = "2018"
authors = [
"Luca Palmieri <rust@lpalmieri.com>",
Expand Down Expand Up @@ -38,18 +38,18 @@ rand_xoshiro = "0.6"
space = "0.12"
thiserror = "1.0"
#partitions = "0.2.4" This one will break in a future version of Rust and has no replacement
linfa = { version = "0.6.1", path = "../.." }
linfa-nn = { version = "0.6.1", path = "../linfa-nn" }
linfa = { version = "0.7.0", path = "../.." }
linfa-nn = { version = "0.7.0", path = "../linfa-nn" }
noisy_float = "0.2.0"

[dev-dependencies]
ndarray-npy = { version = "0.8", default-features = false }
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["generate"] }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["generate"] }
criterion = "0.4.0"
serde_json = "1"
approx = "0.4"
lax = "0.15.0"
linfa = { version = "0.6.0", path = "../..", features = ["benchmarks"] }
linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] }

[[bench]]
name = "k_means"
Expand Down
6 changes: 3 additions & 3 deletions algorithms/linfa-elasticnet/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-elasticnet"
version = "0.6.1"
version = "0.7.0"
authors = [
"Paul Körbitz / Google <koerbitz@google.com>",
"Lorenz Schmidt <bytesnake@mailbox.org>"
Expand Down Expand Up @@ -37,9 +37,9 @@ num-traits = "0.2"
approx = "0.4"
thiserror = "1.0"

linfa = { version = "0.6.1", path = "../.." }
linfa = { version = "0.7.0", path = "../.." }

[dev-dependencies]
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["diabetes", "linnerud"] }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["diabetes", "linnerud"] }
ndarray-rand = "0.14"
rand_xoshiro = "0.6"
8 changes: 4 additions & 4 deletions algorithms/linfa-ftrl/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-ftrl"
version = "0.6.1"
version = "0.7.0"
authors = ["Liudmyla Kyrashchuk <himila@tutanota.com>"]

description = "A Machine Learning framework for Rust"
Expand Down Expand Up @@ -31,13 +31,13 @@ thiserror = "1.0"
rand = "0.8.5"
rand_xoshiro = "0.6.0"

linfa = { version = "0.6.1", path = "../.."}
linfa = { version = "0.7.0", path = "../.."}

[dev-dependencies]
criterion = "0.4.0"
approx = "0.4"
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["winequality"] }
linfa = { version = "0.6.1", path = "../..", features = ["benchmarks"] }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["winequality"] }
linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] }

[[bench]]
name = "ftrl"
Expand Down
8 changes: 4 additions & 4 deletions algorithms/linfa-hierarchical/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-hierarchical"
version = "0.6.1"
version = "0.7.0"
authors = ["Lorenz Schmidt <lorenz.schmidt@mailbox.org>"]
edition = "2018"

Expand All @@ -18,10 +18,10 @@ ndarray = { version = "0.15" }
kodama = "0.2"
thiserror = "1.0.25"

linfa = { version = "0.6.1", path = "../.." }
linfa-kernel = { version = "0.6.1", path = "../linfa-kernel" }
linfa = { version = "0.7.0", path = "../.." }
linfa-kernel = { version = "0.7.0", path = "../linfa-kernel" }

[dev-dependencies]
rand = "0.8"
ndarray-rand = "0.14"
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["iris"] }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["iris"] }
6 changes: 3 additions & 3 deletions algorithms/linfa-ica/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-ica"
version = "0.6.1"
version = "0.7.0"
authors = ["VasanthakumarV <vasanth260m12@gmail.com>"]
description = "A collection of Independent Component Analysis (ICA) algorithms"
edition = "2018"
Expand Down Expand Up @@ -34,13 +34,13 @@ num-traits = "0.2"
rand_xoshiro = "0.6"
thiserror = "1.0"

linfa = { version = "0.6.1", path = "../.." }
linfa = { version = "0.7.0", path = "../.." }

[dev-dependencies]
ndarray-npy = { version = "0.8", default-features = false }
paste = "1.0"
criterion = "0.4.0"
linfa = { version = "0.6.0", path = "../..", features = ["benchmarks"] }
linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] }

[[bench]]
name = "fast_ica"
Expand Down
6 changes: 3 additions & 3 deletions algorithms/linfa-kernel/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-kernel"
version = "0.6.1"
version = "0.7.0"
authors = ["Lorenz Schmidt <bytesnake@mailbox.org>"]
description = "Kernel methods for non-linear algorithms"
edition = "2018"
Expand Down Expand Up @@ -28,5 +28,5 @@ ndarray = "0.15"
num-traits = "0.2"
sprs = { version="0.11", default-features = false }

linfa = { version = "0.6.1", path = "../.." }
linfa-nn = { version = "0.6.1", path = "../linfa-nn" }
linfa = { version = "0.7.0", path = "../.." }
linfa-nn = { version = "0.7.0", path = "../linfa-nn" }
8 changes: 4 additions & 4 deletions algorithms/linfa-linear/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-linear"
version = "0.6.1"
version = "0.7.0"
authors = [
"Paul Körbitz / Google <koerbitz@google.com>",
"VasanthakumarV <vasanth260m12@gmail.com>"
Expand Down Expand Up @@ -36,14 +36,14 @@ argmin = { version = "0.8.1", default-features = false }
argmin-math = { version = "0.3", features = ["ndarray_v0_15-nolinalg"] }
thiserror = "1.0"

linfa = { version = "0.6.1", path = "../.." }
linfa = { version = "0.7.0", path = "../.." }

[dev-dependencies]
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["diabetes"] }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["diabetes"] }
approx = "0.4"
criterion = "0.4.0"
statrs = "0.16.0"
linfa = { version = "0.6.0", path = "../..", features = ["benchmarks"] }
linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] }

[[bench]]
name = "ols_bench"
Expand Down
6 changes: 3 additions & 3 deletions algorithms/linfa-logistic/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-logistic"
version = "0.6.1"
version = "0.7.0"
authors = ["Paul Körbitz / Google <koerbitz@google.com>"]

description = "A Machine Learning framework for Rust"
Expand Down Expand Up @@ -30,9 +30,9 @@ argmin-math = { version = "0.3", features = ["ndarray_v0_15-nolinalg"] }
thiserror = "1.0"


linfa = { version = "0.6.1", path = "../.." }
linfa = { version = "0.7.0", path = "../.." }

[dev-dependencies]
approx = "0.4"
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["winequality"] }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["winequality"] }
rmp-serde = "1"
6 changes: 3 additions & 3 deletions algorithms/linfa-nn/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-nn"
version = "0.6.1"
version = "0.7.0"
authors = ["YuhanLiin <yuhanliin+github@protonmail.com>"]
edition = "2018"
description = "A collection of nearest neighbour algorithms"
Expand Down Expand Up @@ -33,14 +33,14 @@ thiserror = "1.0"

kdtree = "0.6.0"

linfa = { version = "0.6.1", path = "../.." }
linfa = { version = "0.7.0", path = "../.." }

[dev-dependencies]
approx = "0.4"
criterion = "0.4.0"
rand_xoshiro = "0.6"
ndarray-rand = "0.14"
linfa = { version = "0.6.0", path = "../..", features = ["benchmarks"] }
linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] }

[[bench]]
name = "nn"
Expand Down
8 changes: 4 additions & 4 deletions algorithms/linfa-pls/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-pls"
version = "0.6.1"
version = "0.7.0"
edition = "2018"
authors = ["relf <remi.lafage@onera.fr>"]
description = "Partial Least Squares family methods"
Expand Down Expand Up @@ -33,11 +33,11 @@ ndarray-rand = "0.14"
num-traits = "0.2"
paste = "1.0"
thiserror = "1.0"
linfa = { version = "0.6.1", path = "../.." }
linfa = { version = "0.7.0", path = "../.." }

[dev-dependencies]
linfa = { version = "0.6.1", path = "../..", features = ["benchmarks"] }
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["linnerud"] }
linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["linnerud"] }
approx = "0.4"
rand_xoshiro = "0.6"
criterion = "0.4.0"
Expand Down
8 changes: 4 additions & 4 deletions algorithms/linfa-preprocessing/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-preprocessing"
version = "0.6.1"
version = "0.7.0"
authors = ["Sauro98 <ivadonadi98@gmail.com>"]

description = "A Machine Learning framework for Rust"
Expand All @@ -18,7 +18,7 @@ blas = ["ndarray-linalg", "linfa/ndarray-linalg"]
serde = ["serde_crate", "ndarray/serde", "serde_regex"]

[dependencies]
linfa = { version = "0.6.1", path = "../.." }
linfa = { version = "0.7.0", path = "../.." }
ndarray = { version = "0.15", features = ["approx"] }
ndarray-linalg = { version = "0.15", optional = true }
linfa-linalg = { version = "0.1", default-features = false }
Expand All @@ -41,8 +41,8 @@ default-features = false
features = ["std", "derive"]

[dev-dependencies]
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["diabetes", "winequality"] }
linfa-bayes = { version = "0.6.1", path = "../linfa-bayes" }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["diabetes", "winequality"] }
linfa-bayes = { version = "0.7.0", path = "../linfa-bayes" }
iai = "0.1"
curl = "0.4.35"
flate2 = "1.0.20"
Expand Down
4 changes: 2 additions & 2 deletions algorithms/linfa-preprocessing/src/linear_scaling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ use serde_crate::{Deserialize, Serialize};
serde(crate = "serde_crate")
)]
#[derive(Clone, Debug, PartialEq, Eq)]
/// Possible scaling methods for [LinearScaler](LinearScaler)
/// Possible scaling methods for [LinearScaler]
///
/// * Standard (with mean, with std): subtracts the mean to each feature and scales it by the inverse of its standard deviation
/// * MinMax (min, max): scales each feature to fit in the range `min..=max`, default values are
Expand Down Expand Up @@ -271,7 +271,7 @@ impl<F: Float> LinearScaler<F> {
&self.scales
}

/// Returns the method used for fitting. Useful for printing, since [ScalingMethod](ScalingMethod) implements `Display`
/// Returns the method used for fitting. Useful for printing, since [ScalingMethod] implements `Display`
pub fn method(&self) -> &ScalingMethod<F> {
&self.method
}
Expand Down
10 changes: 5 additions & 5 deletions algorithms/linfa-preprocessing/src/tf_idf_vectorization.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@ impl TfIdfMethod {
}
}

/// Simlar to [`CountVectorizer`](CountVectorizer) but instead of
/// Simlar to [`CountVectorizer`] but instead of
/// just counting the term frequency of each vocabulary entry in each given document,
/// it computes the term frequecy times the inverse document frequency, thus giving more importance
/// to entries that appear many times but only on some documents. The weight function can be adjusted
/// by setting the appropriate [method](TfIdfMethod). This struct provides the same string
/// processing customizations described in [`CountVectorizer`](CountVectorizer).
/// processing customizations described in [`CountVectorizer`].
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
Expand Down Expand Up @@ -123,7 +123,7 @@ impl TfIdfVectorizer {
}

/// Learns a vocabulary from the texts in `x`, according to the specified attributes and maps each
/// vocabulary entry to an integer value, producing a [FittedTfIdfVectorizer](FittedTfIdfVectorizer).
/// vocabulary entry to an integer value, producing a [FittedTfIdfVectorizer].
///
/// Returns an error if:
/// * one of the `n_gram` boundaries is set to zero or the minimum value is greater than the maximum value
Expand All @@ -140,8 +140,8 @@ impl TfIdfVectorizer {
})
}

/// Produces a [FittedTfIdfVectorizer](FittedTfIdfVectorizer) with the input vocabulary.
/// All struct attributes are ignored in the fitting but will be used by the [FittedTfIdfVectorizer](FittedTfIdfVectorizer)
/// Produces a [FittedTfIdfVectorizer] with the input vocabulary.
/// All struct attributes are ignored in the fitting but will be used by the [FittedTfIdfVectorizer]
/// to transform any text to be examined. As such this will return an error in the same cases as the `fit` method.
pub fn fit_vocabulary<T: ToString>(&self, words: &[T]) -> Result<FittedTfIdfVectorizer> {
let fitted_vectorizer = self.count_vectorizer.fit_vocabulary(words)?;
Expand Down
4 changes: 2 additions & 2 deletions algorithms/linfa-preprocessing/src/whitening.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ pub enum WhiteningMethod {
}

/// Struct that can be fitted to the input data to obtain the related whitening matrix.
/// Fitting returns a [FittedWhitener](FittedWhitener) struct that can be used to
/// Fitting returns a [FittedWhitener] struct that can be used to
/// apply the whitening transformation to the input data.
#[cfg_attr(
feature = "serde",
Expand Down Expand Up @@ -157,7 +157,7 @@ impl<F: Float, D: Data<Elem = F>, T: AsTargets> Fit<ArrayBase<D, Ix2>, T, Prepro

/// Struct that can be used to whiten data. Data will be scaled according to the whitening matrix learned
/// during fitting.
/// Obtained by fitting a [Whitener](Whitener).
/// Obtained by fitting a [Whitener].
///
/// Transforming the data used during fitting will yield a scaled data matrix with
/// unit diagonal covariance matrix.
Expand Down
8 changes: 4 additions & 4 deletions algorithms/linfa-reduction/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "linfa-reduction"
version = "0.6.1"
version = "0.7.0"
authors = ["Lorenz Schmidt <bytesnake@mailbox.org>"]
description = "A collection of dimensionality reduction techniques"
edition = "2018"
Expand Down Expand Up @@ -33,10 +33,10 @@ num-traits = "0.2"
thiserror = "1.0"
rand = { version = "0.8", features = ["small_rng"] }

linfa = { version = "0.6.1", path = "../.." }
linfa-kernel = { version = "0.6.1", path = "../linfa-kernel" }
linfa = { version = "0.7.0", path = "../.." }
linfa-kernel = { version = "0.7.0", path = "../linfa-kernel" }

[dev-dependencies]
ndarray-npy = { version = "0.8", default-features = false }
linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["iris", "generate"] }
linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["iris", "generate"] }
approx = { version = "0.4" }
Loading

0 comments on commit 34d1c84

Please sign in to comment.