Skip to content

Commit

Permalink
refactor/tensor: return SharedTensor from new instead of `Result<…
Browse files Browse the repository at this point in the history
…..>`

Allocation of `SharedTensor` may fail only on OOM, so returning `Result` type
is redundant.
  • Loading branch information
alexandermorozov committed Apr 23, 2016
1 parent 753a773 commit 6a21c7f
Show file tree
Hide file tree
Showing 11 changed files with 24 additions and 24 deletions.
6 changes: 3 additions & 3 deletions benches/shared_tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ fn bench_256_sync_1mb_native_opencl(b: &mut Bencher) {
// if let &DeviceType::OpenCL(ref cl_d) = cl_device {
// println!("{:?}", cl_d.hardwares()[0].clone().load_name());
// }
let mem = &mut SharedTensor::<u8>::new(&1_048_576).unwrap();
let mem = &mut SharedTensor::<u8>::new(&1_048_576);
mem.write_only(&cl_device);
bench_256_sync_1mb_native_opencl_profile(b, nt_device, cl_device, mem);
}
Expand All @@ -133,7 +133,7 @@ fn bench_256_sync_1mb_native_cuda(b: &mut Bencher) {
// if let &DeviceType::Cuda(ref cl_d) = cl_device {
// println!("{:?}", cl_d.hardwares()[0].clone().load_name());
// }
let mem = &mut SharedTensor::<u8>::new(&1_048_576).unwrap();
let mem = &mut SharedTensor::<u8>::new(&1_048_576);
mem.write_only(&cl_device);
bench_256_sync_1mb_native_cuda_profile(b, nt_device, cl_device, mem);
}
Expand All @@ -154,7 +154,7 @@ fn bench_2_sync_128mb_native_cuda(b: &mut Bencher) {
// if let &DeviceType::Cuda(ref cl_d) = cl_device {
// println!("{:?}", cl_d.hardwares()[0].clone().load_name());
// }
let mem = &mut SharedTensor::<u8>::new(&(128 * 1_048_576)).unwrap();
let mem = &mut SharedTensor::<u8>::new(&(128 * 1_048_576));
mem.write_only(&cl_device);
bench_2_sync_128mb_native_cuda_profile(b, nt_device, cl_device, mem);
}
Expand Down
8 changes: 4 additions & 4 deletions src/tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
//! // allocate memory
//! let native = Native::new();
//! let device = native.new_device(native.hardwares()).unwrap();
//! let shared_data = &mut SharedTensor::<i32>::new(&5).unwrap();
//! let shared_data = &mut SharedTensor::<i32>::new(&5);
//! // fill memory with some numbers
//! let mut mem = shared_data.write_only(&device).unwrap().as_mut_native().unwrap();
//! mem.as_mut_slice::<i32>().clone_from_slice(&[0, 1, 2, 3, 4]);
Expand Down Expand Up @@ -266,13 +266,13 @@ impl <T> fmt::Debug for SharedTensor<T> {
impl<T> SharedTensor<T> {
/// Create new Tensor by allocating [Memory][1] on a Device.
/// [1]: ../memory/index.html
pub fn new<D: IntoTensorDesc>(desc: &D) -> Result<SharedTensor<T>, Error> {
Ok(SharedTensor {
pub fn new<D: IntoTensorDesc>(desc: &D) -> SharedTensor<T> {
SharedTensor {
desc: desc.into(),
locations: RefCell::new(Vec::new()),
up_to_date: Cell::new(0),
phantom: PhantomData,
})
}
}

/// Change the shape of the Tensor.
Expand Down
2 changes: 1 addition & 1 deletion tests/compile-fail/drop_live_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ fn main() {
let ntv = Native::new();
let dev = ntv.new_device(ntv.hardwares()).unwrap();

let x = &mut SharedTensor::<f32>::new(&10).unwrap();
let x = &mut SharedTensor::<f32>::new(&10);
let m = x.write_only(&dev).unwrap();
x.drop_device(&dev);
//~^ ERROR error: cannot borrow `*x` as mutable more than once at a time
Expand Down
2 changes: 1 addition & 1 deletion tests/compile-fail/leak_read_reference.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ fn main() {
let dev = ntv.new_device(ntv.hardwares()).unwrap();

let mem = {
let x = &mut SharedTensor::<f32>::new(&10).unwrap();
let x = &mut SharedTensor::<f32>::new(&10);
//~^ ERROR error: borrowed value does not live long enough
x.write_only(&dev).unwrap();
let m = x.read(&dev).unwrap();
Expand Down
2 changes: 1 addition & 1 deletion tests/compile-fail/leak_write_reference.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ fn main() {
let dev = ntv.new_device(ntv.hardwares()).unwrap();

let mem = {
let x = &mut SharedTensor::<f32>::new(&10).unwrap();
let x = &mut SharedTensor::<f32>::new(&10);
//~^ ERROR error: borrowed value does not live long enough
let m = x.write_only(&dev).unwrap();
m
Expand Down
2 changes: 1 addition & 1 deletion tests/compile-fail/read_write_borrows.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ fn main() {
let ntv = Native::new();
let dev = ntv.new_device(ntv.hardwares()).unwrap();

let x = &mut SharedTensor::<f32>::new(&10).unwrap();
let x = &mut SharedTensor::<f32>::new(&10);
let m1 = x.write_only(&dev).unwrap();
let m2 = x.read(&dev).unwrap();
//~^ ERROR cannot borrow `*x` as immutable because it is also borrowed as mutable
Expand Down
2 changes: 1 addition & 1 deletion tests/compile-fail/two_write_borrows.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ fn main() {
let ntv = Native::new();
let dev = ntv.new_device(ntv.hardwares()).unwrap();

let x = &mut SharedTensor::<f32>::new(&10).unwrap();
let x = &mut SharedTensor::<f32>::new(&10);
let m1 = x.write_only(&dev).unwrap();
let m2 = x.write_only(&dev).unwrap();
//~^ ERROR error: cannot borrow `*x` as mutable more than once at a time
Expand Down
2 changes: 1 addition & 1 deletion tests/framework_cuda_specs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ mod framework_cuda_spec {
let cuda = Cuda::new();
let device = cuda.new_device(&cuda.hardwares()[0..1]).unwrap();
for _ in 0..256 {
let x = &mut SharedTensor::<f32>::new(&vec![256, 1024, 128]).unwrap();
let mut x = SharedTensor::<f32>::new(&vec![256, 1024, 128]);
x.write_only(&device).unwrap();
}
}
Expand Down
2 changes: 1 addition & 1 deletion tests/run-pass/multiple_read_only_borrows.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ fn main() {
let ntv = Native::new();
let dev = ntv.new_device(ntv.hardwares()).unwrap();

let x = &mut SharedTensor::<f32>::new(&10).unwrap();
let x = &mut SharedTensor::<f32>::new(&10);
x.write_only(&dev).unwrap();

let m1 = x.read(&dev);
Expand Down
18 changes: 9 additions & 9 deletions tests/shared_memory_specs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ mod shared_memory_spec {
fn it_creates_new_shared_memory_for_native() {
let ntv = Native::new();
let cpu = ntv.new_device(ntv.hardwares()).unwrap();
let shared_data = &mut SharedTensor::<f32>::new(&10).unwrap();
let mut shared_data = SharedTensor::<f32>::new(&10);
match shared_data.write_only(&cpu).unwrap() {
&mut MemoryType::Native(ref dat) => {
let data = dat.as_slice::<f32>();
Expand All @@ -40,7 +40,7 @@ mod shared_memory_spec {
fn it_creates_new_shared_memory_for_cuda() {
let ntv = Cuda::new();
let device = ntv.new_device(&ntv.hardwares()[0..1]).unwrap();
let shared_data = &mut SharedTensor::<f32>::new(&10).unwrap();
let mut shared_data = SharedTensor::<f32>::new(&10);
match shared_data.write_only(&device) {
Ok(&mut MemoryType::Cuda(_)) => {},
#[cfg(any(feature = "cuda", feature = "opencl"))]
Expand All @@ -53,7 +53,7 @@ mod shared_memory_spec {
fn it_creates_new_shared_memory_for_opencl() {
let ntv = OpenCL::new();
let device = ntv.new_device(&ntv.hardwares()[0..1]).unwrap();
let shared_data = &mut SharedTensor::<f32>::new(&10).unwrap();
let mut shared_data = SharedTensor::<f32>::new(&10);
match shared_data.write_only(&device) {
Ok(&mut MemoryType::OpenCL(_)) => {},
_ => assert!(false),
Expand All @@ -65,7 +65,7 @@ mod shared_memory_spec {
fn it_fails_on_initialized_memory_read() {
let ntv = Native::new();
let cpu = ntv.new_device(ntv.hardwares()).unwrap();
let shared_data = &mut SharedTensor::<f32>::new(&10).unwrap();
let mut shared_data = SharedTensor::<f32>::new(&10);
assert_eq!(shared_data.read(&cpu).unwrap_err(),
Error::UninitializedMemory);
assert_eq!(shared_data.read_write(&cpu).unwrap_err(),
Expand All @@ -85,7 +85,7 @@ mod shared_memory_spec {
let nt = Native::new();
let cu_device = cu.new_device(&cu.hardwares()[0..1]).unwrap();
let nt_device = nt.new_device(nt.hardwares()).unwrap();
let mem = &mut SharedTensor::<f64>::new(&3).unwrap();
let mut mem = SharedTensor::<f64>::new(&3);
write_to_memory(mem.write_only(&nt_device).unwrap(),
&[1.0f64, 2.0, 123.456]);
match mem.read(&cu_device) {
Expand Down Expand Up @@ -115,7 +115,7 @@ mod shared_memory_spec {
let nt = Native::new();
let cl_device = cl.new_device(&cl.hardwares()[0..1]).unwrap();
let nt_device = nt.new_device(nt.hardwares()).unwrap();
let mem = &mut SharedTensor::<f64>::new(&3).unwrap();
let mut mem = SharedTensor::<f64>::new(&3);
write_to_memory(mem.write_only(&nt_device).unwrap(),
&[1.0f64, 2.0, 123.456]);
match mem.read(&cl_device) {
Expand All @@ -127,7 +127,7 @@ mod shared_memory_spec {
}
// It has not successfully synced to the device.
// Not the other way around.
mem.drop_device(&nt_device);
mem.drop_device(&nt_device).unwrap();
match mem.read(&nt_device) {
Ok(m) => assert_eq!(m.as_native().unwrap().as_slice::<f64>(),
[1.0, 2.0, 123.456]),
Expand All @@ -140,13 +140,13 @@ mod shared_memory_spec {

#[test]
fn it_reshapes_correctly() {
let mut shared_data = &mut SharedTensor::<f32>::new(&10).unwrap();
let mut shared_data = SharedTensor::<f32>::new(&10);
assert!(shared_data.reshape(&vec![5, 2]).is_ok());
}

#[test]
fn it_returns_err_for_invalid_size_reshape() {
let mut shared_data = &mut SharedTensor::<f32>::new(&10).unwrap();
let mut shared_data = SharedTensor::<f32>::new(&10);
assert!(shared_data.reshape(&vec![10, 2]).is_err());
}
}
2 changes: 1 addition & 1 deletion tests/tensor_specs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ mod tensor_spec {

#[test]
fn it_resizes_tensor() {
let mut tensor = SharedTensor::<f32>::new(&(10, 20, 30)).unwrap();
let mut tensor = SharedTensor::<f32>::new(&(10, 20, 30));
assert_eq!(tensor.desc(), &[10, 20, 30]);
tensor.resize(&(2, 3, 4, 5)).unwrap();
assert_eq!(tensor.desc(), &[2, 3, 4, 5]);
Expand Down

0 comments on commit 6a21c7f

Please sign in to comment.