Skip to content

Commit

Permalink
feat/features: change meaning of framework features
Browse files Browse the repository at this point in the history
  • Loading branch information
hobofan committed Mar 15, 2016
1 parent c36149e commit 58d72f5
Show file tree
Hide file tree
Showing 9 changed files with 206 additions and 31 deletions.
8 changes: 4 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ keywords = ["deep-learning", "neural-networks", "machine-learning", "framework"]
license = "MIT OR Apache-2.0"

[dependencies]
collenchyma = { version = "0.0.8", default-features = false }
collenchyma-blas = { version = "0.2.0", default-features = false }
collenchyma = { version = "0.0.8", default-features = false, features = ["native"] } # native feature to read/write data into tensors
collenchyma-blas = { version = "0.2.0", default-features = false, features = ["native"] } # only compiles with native feature
collenchyma-nn = { version = "0.3.2", default-features = false }

log = "0.3.2"
Expand All @@ -30,8 +30,8 @@ timeit = "0.1.2"
env_logger = "0.3"

[features]
default = ["native", "cuda", "opencl"]
native = ["collenchyma/native", "collenchyma-blas/native", "collenchyma-nn/native"]
default = ["native"]
native = ["collenchyma-blas/native", "collenchyma-nn/native"]
cuda = ["collenchyma/cuda", "collenchyma-blas/cuda", "collenchyma-nn/cuda"]
opencl = ["collenchyma/opencl", "collenchyma-blas/opencl", "collenchyma-nn/opencl"]

Expand Down
18 changes: 9 additions & 9 deletions examples/benchmarks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,12 +112,12 @@ fn get_time_scale<'a>(sec: f64) -> (f64, &'a str) {
}
}

#[cfg(not(feature = "cuda"))]
#[cfg(feature="native")]
fn bench_alexnet() {
println!("Examples run only with CUDA support at the moment, because of missing native convolution implementation for the Collenchyma NN Plugin.");
println!("Try compiling with the \"cuda\" feature flag.");
println!("Try running with `cargo run --no-default-features --features cuda --example benchmarks alexnet`.");
}
#[cfg(feature = "cuda")]
#[cfg(all(feature="cuda", not(feature="native")))]
fn bench_alexnet() {
let mut cfg = SequentialConfig::default();
cfg.add_input("data", &vec![128, 3, 224, 224]);
Expand Down Expand Up @@ -194,12 +194,12 @@ fn bench_alexnet() {
}
}

#[cfg(not(feature = "cuda"))]
#[cfg(feature="native")]
fn bench_overfeat() {
println!("Examples run only with CUDA support at the moment, because of missing native convolution implementation for the Collenchyma NN Plugin.");
println!("Try compiling with the \"cuda\" feature flag.");
println!("Try running with `cargo run --no-default-features --features cuda --example benchmarks overfeat`.");
}
#[cfg(feature = "cuda")]
#[cfg(all(feature="cuda", not(feature="native")))]
fn bench_overfeat() {
let mut cfg = SequentialConfig::default();
cfg.add_input("data", &vec![128, 3, 231, 231]);
Expand Down Expand Up @@ -276,12 +276,12 @@ fn bench_overfeat() {
}
}

#[cfg(not(feature = "cuda"))]
#[cfg(feature="native")]
fn bench_vgg_a() {
println!("Examples run only with CUDA support at the moment, because of missing native convolution implementation for the Collenchyma NN Plugin.");
println!("Try compiling with the \"cuda\" feature flag.");
println!("Try running with `cargo run --no-default-features --features cuda --example benchmarks vgg`.");
}
#[cfg(feature = "cuda")]
#[cfg(all(feature="cuda", not(feature="native")))]
fn bench_vgg_a() {
let mut cfg = SequentialConfig::default();
cfg.add_input("data", &vec![64, 3, 224, 224]);
Expand Down
12 changes: 12 additions & 0 deletions src/layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -687,9 +687,11 @@ impl<B: IBackend + LayerOps<f32> + 'static> Layer<B> {
/// [3]: ../layers/index.html
fn worker_from_config(backend: Rc<B>, config: &LayerConfig) -> Box<ILayer<B>> {
match config.layer_type.clone() {
#[cfg(all(feature="cuda", not(feature="native")))]
LayerType::Convolution(layer_config) => Box::new(Convolution::from_config(&layer_config)),
LayerType::Linear(layer_config) => Box::new(Linear::from_config(&layer_config)),
LayerType::LogSoftmax => Box::new(LogSoftmax::default()),
#[cfg(all(feature="cuda", not(feature="native")))]
LayerType::Pooling(layer_config) => Box::new(Pooling::from_config(&layer_config)),
LayerType::Sequential(layer_config) => Box::new(Sequential::from_config(backend, &layer_config)),
LayerType::Softmax => Box::new(Softmax::default()),
Expand Down Expand Up @@ -1103,12 +1105,14 @@ pub struct LayerConfig {
pub enum LayerType {
// Common layers
/// Convolution Layer
#[cfg(all(feature="cuda", not(feature="native")))]
Convolution(ConvolutionConfig),
/// Linear Layer
Linear(LinearConfig),
/// LogSoftmax Layer
LogSoftmax,
/// Pooling Layer
#[cfg(all(feature="cuda", not(feature="native")))]
Pooling(PoolingConfig),
/// Sequential Layer
Sequential(SequentialConfig),
Expand All @@ -1131,14 +1135,22 @@ impl LayerType {
/// Returns wether the LayerType supports in-place operations.
pub fn supports_in_place(&self) -> bool {
match *self {
#[cfg(all(feature="cuda", not(feature="native")))]
LayerType::Convolution(_) => false,
LayerType::Linear(_) => false,
LayerType::LogSoftmax => false,
#[cfg(all(feature="cuda", not(feature="native")))]
LayerType::Pooling(_) => false,
LayerType::Sequential(_) => false,
LayerType::Softmax => false,
#[cfg(all(feature="cuda", not(feature="native")))]
LayerType::ReLU => true,
#[cfg(feature="native")]
LayerType::ReLU => false,
#[cfg(all(feature="cuda", not(feature="native")))]
LayerType::Sigmoid => true,
#[cfg(feature="native")]
LayerType::Sigmoid => false,
LayerType::NegativeLogLikelihood(_) => false,
LayerType::Reshape(_) => true,
}
Expand Down
71 changes: 70 additions & 1 deletion src/layers/activation/relu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@
//! needed in a Sigmoid layer.
use co::{IBackend,SharedTensor};
use conn::{Relu, ReluPointwise};
use conn::Relu;
#[cfg(all(feature="cuda", not(feature="native")))]
use conn::ReluPointwise;
use layer::*;
use util::ArcLock;

Expand All @@ -16,6 +18,11 @@ use util::ArcLock;
/// ReLU Activation Layer
pub struct ReLU;

//
// ReLU + ReLUPointwise
// Only on CUDA
//
#[cfg(all(feature="cuda", not(feature="native")))]
impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ILayer<B> for ReLU {
impl_ilayer_activation!();

Expand All @@ -41,6 +48,7 @@ impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ILayer<B> for ReLU {
}
}

#[cfg(all(feature="cuda", not(feature="native")))]
impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ComputeOutput<f32, B> for ReLU {
fn compute_output(&self,
backend: &B,
Expand All @@ -54,6 +62,7 @@ impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ComputeOutput<f32, B> for ReL
}
}

#[cfg(all(feature="cuda", not(feature="native")))]
impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ComputeInputGradient<f32, B> for ReLU {
fn compute_input_gradient(&self,
backend: &B,
Expand All @@ -69,4 +78,64 @@ impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ComputeInputGradient<f32, B>
}
}

#[cfg(all(feature="cuda", not(feature="native")))]
impl<B: IBackend + Relu<f32> + ReluPointwise<f32>> ComputeParametersGradient<f32, B> for ReLU {}

//
// ReLU without ReLUPointwise
// Only on CUDA
//
#[cfg(feature="native")]
impl<B: IBackend + Relu<f32>> ILayer<B> for ReLU {
impl_ilayer_activation!();

fn reshape(&mut self,
backend: ::std::rc::Rc<B>,
input_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>) {
if let Some(inp) = input_data.get(0) {
let read_inp = inp.read().unwrap();
let input_desc = read_inp.desc();
input_gradient[0].write().unwrap().resize(input_desc).unwrap();
output_data[0].write().unwrap().resize(input_desc).unwrap();
output_gradient[0].write().unwrap().resize(input_desc).unwrap();
}
}
}

#[cfg(feature="native")]
impl<B: IBackend + Relu<f32>> ComputeOutput<f32, B> for ReLU {
fn compute_output(&self,
backend: &B,
_weights: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
output_data: &mut [&mut SharedTensor<f32>]) {
match input_data.get(0) {
Some(input) => backend.relu_plain(input, output_data[0]).unwrap(),
None => panic!("No input provided for ReLU layer."),
}
}
}

#[cfg(feature="native")]
impl<B: IBackend + Relu<f32>> ComputeInputGradient<f32, B> for ReLU {
fn compute_input_gradient(&self,
backend: &B,
weights_data: &[&SharedTensor<f32>],
output_data: &[&SharedTensor<f32>],
output_gradients: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
input_gradients: &mut [&mut SharedTensor<f32>]) {
match output_data.get(0) {
Some(_) => backend.relu_grad_plain(output_data[0], output_gradients[0], input_data[0], input_gradients[0]).unwrap(),
None => panic!("No output_data provided for ReLU layer backward."),
}
}
}

#[cfg(feature="native")]
impl<B: IBackend + Relu<f32>> ComputeParametersGradient<f32, B> for ReLU {}
67 changes: 67 additions & 0 deletions src/layers/activation/sigmoid.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,11 @@ use util::ArcLock;
/// Sigmoid Activation Layer
pub struct Sigmoid;

//
// Sigmoid + SigmoidPointwise
// Only on CUDA
//
#[cfg(all(feature="cuda", not(feature="native")))]
impl<B: IBackend + conn::Sigmoid<f32> + conn::SigmoidPointwise<f32>> ILayer<B> for Sigmoid {
impl_ilayer_activation!();

Expand All @@ -47,6 +52,7 @@ impl<B: IBackend + conn::Sigmoid<f32> + conn::SigmoidPointwise<f32>> ILayer<B> f
}
}

#[cfg(all(feature="cuda", not(feature="native")))]
impl<B: IBackend + conn::Sigmoid<f32> + conn::SigmoidPointwise<f32>> ComputeOutput<f32, B> for Sigmoid {
fn compute_output(&self,
backend: &B,
Expand All @@ -60,6 +66,7 @@ impl<B: IBackend + conn::Sigmoid<f32> + conn::SigmoidPointwise<f32>> ComputeOutp
}
}

#[cfg(all(feature="cuda", not(feature="native")))]
impl<B: IBackend + conn::Sigmoid<f32> + conn::SigmoidPointwise<f32>> ComputeInputGradient<f32, B> for Sigmoid {
fn compute_input_gradient(&self,
backend: &B,
Expand All @@ -75,4 +82,64 @@ impl<B: IBackend + conn::Sigmoid<f32> + conn::SigmoidPointwise<f32>> ComputeInpu
}
}

#[cfg(all(feature="cuda", not(feature="native")))]
impl<B: IBackend + conn::Sigmoid<f32> + conn::SigmoidPointwise<f32>> ComputeParametersGradient<f32, B> for Sigmoid {}

//
// Sigmoid without SigmoidPointwise
// Only on CUDA
//
#[cfg(feature="native")]
impl<B: IBackend + conn::Sigmoid<f32>> ILayer<B> for Sigmoid {
impl_ilayer_activation!();

fn reshape(&mut self,
backend: ::std::rc::Rc<B>,
input_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_data: &mut Vec<ArcLock<SharedTensor<f32>>>,
output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>) {
if let Some(inp) = input_data.get(0) {
let read_inp = inp.read().unwrap();
let input_desc = read_inp.desc();
input_gradient[0].write().unwrap().resize(input_desc).unwrap();
output_data[0].write().unwrap().resize(input_desc).unwrap();
output_gradient[0].write().unwrap().resize(input_desc).unwrap();
}
}
}

#[cfg(feature="native")]
impl<B: IBackend + conn::Sigmoid<f32>> ComputeOutput<f32, B> for Sigmoid {
fn compute_output(&self,
backend: &B,
_weights: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
output_data: &mut [&mut SharedTensor<f32>]) {
match input_data.get(0) {
Some(input) => backend.sigmoid_plain(input, output_data[0]).unwrap(),
None => panic!("No input provided for Sigmoid layer."),
}
}
}

#[cfg(feature="native")]
impl<B: IBackend + conn::Sigmoid<f32>> ComputeInputGradient<f32, B> for Sigmoid {
fn compute_input_gradient(&self,
backend: &B,
weights_data: &[&SharedTensor<f32>],
output_data: &[&SharedTensor<f32>],
output_gradients: &[&SharedTensor<f32>],
input_data: &[&SharedTensor<f32>],
input_gradients: &mut [&mut SharedTensor<f32>]) {
match output_data.get(0) {
Some(_) => backend.sigmoid_grad_plain(output_data[0], output_gradients[0], input_data[0], input_gradients[0]).unwrap(),
None => panic!("No output_data provided for Sigmoid layer backward."),
}
}
}

#[cfg(feature="native")]
impl<B: IBackend + conn::Sigmoid<f32>> ComputeParametersGradient<f32, B> for Sigmoid {}
4 changes: 4 additions & 0 deletions src/layers/common/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,20 @@ macro_rules! impl_ilayer_common {
)
}

#[cfg(all(feature="cuda", not(feature="native")))]
pub use self::convolution::{Convolution, ConvolutionConfig};
pub use self::linear::{Linear, LinearConfig};
pub use self::log_softmax::LogSoftmax;
#[cfg(all(feature="cuda", not(feature="native")))]
pub use self::pooling::{Pooling, PoolingConfig, PoolingMode};
pub use self::sequential::{Sequential, SequentialConfig};
pub use self::softmax::Softmax;

#[cfg(all(feature="cuda", not(feature="native")))]
pub mod convolution;
pub mod linear;
pub mod log_softmax;
#[cfg(all(feature="cuda", not(feature="native")))]
pub mod pooling;
pub mod sequential;
pub mod softmax;
Expand Down
10 changes: 5 additions & 5 deletions src/layers/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,28 +46,28 @@
/// [1]: ./layer/trait.ILayer.html
/// [2]: ./layers/activation/index.html
#[allow(unused_import_braces)]
pub use self::activation::{
ReLU,
Sigmoid,
};

#[allow(unused_import_braces)]
#[cfg(all(feature="cuda", not(feature="native")))]
pub use self::common::{
Convolution, ConvolutionConfig,
Pooling, PoolingConfig, PoolingMode,
};

pub use self::common::{
Linear, LinearConfig,
LogSoftmax,
Pooling, PoolingConfig, PoolingMode,
Sequential, SequentialConfig,
Softmax,
};

#[allow(unused_import_braces)]
pub use self::loss::{
NegativeLogLikelihood, NegativeLogLikelihoodConfig,
};

#[allow(unused_import_braces)]
pub use self::utility::{
Flatten,
Reshape, ReshapeConfig,
Expand Down
Loading

0 comments on commit 58d72f5

Please sign in to comment.