Skip to content

Commit

Permalink
refactor/tests: convert tests and benches to the new memory access AP…
Browse files Browse the repository at this point in the history
…I [SKIP_CHANGELOG]

REFERENCE: autumnai/collenchyma#37, autumnai/collenchyma#62
  • Loading branch information
alexandermorozov committed Apr 30, 2016
1 parent e20fc95 commit f506a2c
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 24 deletions.
8 changes: 4 additions & 4 deletions benches/network_benches.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ mod cuda {
#[bench]
#[ignore]
#[cfg(feature = "cuda")]
fn bench_mnsit_forward_1(b: &mut Bencher) {
fn bench_mnsit_forward_1(_b: &mut Bencher) {
let mut cfg = SequentialConfig::default();
// set up input
cfg.add_input("in", &vec![1, 30, 30]);
Expand All @@ -96,7 +96,7 @@ mod cuda {
backend.clone(), &LayerConfig::new("network", LayerType::Sequential(cfg)));

let _ = timeit_loops!(10, {
let inp = SharedTensor::<f32>::new(backend.device(), &vec![1, 30, 30]).unwrap();
let inp = SharedTensor::<f32>::new(&[1, 30, 30]);
let inp_lock = Arc::new(RwLock::new(inp));

network.forward(&[inp_lock]);
Expand Down Expand Up @@ -260,7 +260,7 @@ mod cuda {

let func = || {
let forward_time = timeit_loops!(1, {
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 112, 112]).unwrap();
let inp = SharedTensor::new(&[128, 3, 112, 112]);

let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock]);
Expand Down Expand Up @@ -416,7 +416,7 @@ mod cuda {
backend.clone(), &LayerConfig::new("network", LayerType::Sequential(cfg)));

let mut func = || {
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 112, 112]).unwrap();
let inp = SharedTensor::<f32>::new(&[128, 3, 112, 112]);

let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock]);
Expand Down
8 changes: 3 additions & 5 deletions examples/benchmarks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,7 @@ fn bench_alexnet() {
let func = || {
let forward_time = timeit_loops!(1, {
{
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 224, 224]).unwrap();

let inp = SharedTensor::<f32>::new(&[128, 3, 224, 224]);
let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock.clone()]);
}
Expand Down Expand Up @@ -242,8 +241,7 @@ fn bench_overfeat() {
let func = || {
let forward_time = timeit_loops!(1, {
{
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 231, 231]).unwrap();

let inp = SharedTensor::new(&[128, 3, 231, 231]);
let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock.clone()]);
}
Expand Down Expand Up @@ -339,7 +337,7 @@ fn bench_vgg_a() {
let func = || {
let forward_time = timeit_loops!(1, {
{
let inp = SharedTensor::<f32>::new(backend.device(), &vec![64, 3, 224, 224]).unwrap();
let inp = SharedTensor::new(&[64, 3, 224, 224]);

let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock.clone()]);
Expand Down
33 changes: 18 additions & 15 deletions tests/layer_specs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,10 @@ mod layer_spec {
let loaded_weights = loaded_layer.learnable_weights_data();
let loaded_weight_lock = loaded_weights[0].read().unwrap();

let original_weight = original_weight_lock.get(native_backend().device()).unwrap().as_native().unwrap().as_slice::<f32>();
let loaded_weight = loaded_weight_lock.get(native_backend().device()).unwrap().as_native().unwrap().as_slice::<f32>();
let original_weight = original_weight_lock.read(native_backend().device())
.unwrap().as_native().unwrap().as_slice::<f32>();
let loaded_weight = loaded_weight_lock.read(native_backend().device())
.unwrap().as_native().unwrap().as_slice::<f32>();

assert_eq!(original_weight, loaded_weight);
}
Expand Down Expand Up @@ -131,27 +133,28 @@ mod layer_spec {
let mut reshape_network = Layer::from_config(cuda_backend.clone(), &LayerConfig::new("reshape_model", LayerType::Sequential(reshape_model)));

let input = vec![1f32, 1f32, 2f32];
let mut normal_tensor = SharedTensor::<f32>::new(native_backend.device(), &(3)).unwrap();
let mut normal_tensor = SharedTensor::<f32>::new(&[3]);
// let mut normal_tensor_output = SharedTensor::<f32>::new(native_backend.device(), &(3)).unwrap();
let mut reshape_tensor = SharedTensor::<f32>::new(native_backend.device(), &(3)).unwrap();
let mut reshape_tensor = SharedTensor::<f32>::new(&[3]);
// let mut reshape_tensor_output = SharedTensor::<f32>::new(native_backend.device(), &(3)).unwrap();
write_to_memory(normal_tensor.get_mut(native_backend.device()).unwrap(), &input);
write_to_memory(reshape_tensor.get_mut(native_backend.device()).unwrap(), &input);
write_to_memory(normal_tensor.write_only(native_backend.device()).unwrap(), &input);
write_to_memory(reshape_tensor.write_only(native_backend.device()).unwrap(), &input);

let normal_tensor_output = normal_network.forward(&[Arc::new(RwLock::new(normal_tensor))])[0].clone();
let _ = normal_tensor_output.write().unwrap().add_device(native_backend.device());
normal_tensor_output.write().unwrap().sync(native_backend.device()).unwrap();
let normal_tensor_output_native_ = normal_tensor_output.read().unwrap();
let normal_tensor_output_native = normal_tensor_output_native_.get(native_backend.device()).unwrap().as_native().unwrap();
assert_eq!(&[0.7310585786f32, 0.7310586f32, 0.880797f32], normal_tensor_output_native.as_slice::<f32>());
let normal_tensor_output_native = normal_tensor_output_native_
.read(native_backend.device()).unwrap().as_native().unwrap();
assert_eq!(&[0.7310585786f32, 0.7310586f32, 0.880797f32],
normal_tensor_output_native.as_slice::<f32>());

let reshape_tensor_output = reshape_network.forward(&[Arc::new(RwLock::new(reshape_tensor))])[0].clone();
let _ = reshape_tensor_output.write().unwrap().add_device(native_backend.device());
reshape_tensor_output.write().unwrap().sync(native_backend.device()).unwrap();
let reshape_tensor_output_native_ = reshape_tensor_output.read().unwrap();
let reshape_tensor_output_native = reshape_tensor_output_native_.get(native_backend.device()).unwrap().as_native().unwrap();
assert_eq!(&[0.7310585786f32, 0.7310586f32, 0.880797f32], reshape_tensor_output_native.as_slice::<f32>());
assert_eq!(normal_tensor_output_native.as_slice::<f32>(), reshape_tensor_output_native.as_slice::<f32>());
let reshape_tensor_output_native = reshape_tensor_output_native_
.read(native_backend.device()).unwrap().as_native().unwrap();
assert_eq!(&[0.7310585786f32, 0.7310586f32, 0.880797f32],
reshape_tensor_output_native.as_slice::<f32>());
assert_eq!(normal_tensor_output_native.as_slice::<f32>(),
reshape_tensor_output_native.as_slice::<f32>());
}
}

Expand Down

0 comments on commit f506a2c

Please sign in to comment.