From 6a21c7fd3821bd5cf4a55d0419d986980ebbc81d Mon Sep 17 00:00:00 2001 From: Alexander Morozov Date: Sat, 23 Apr 2016 16:18:28 +0300 Subject: [PATCH] refactor/tensor: return `SharedTensor` from `new` instead of `Result<..>` Allocation of `SharedTensor` may fail only on OOM, so returning `Result` type is redundant. --- benches/shared_tensor.rs | 6 +++--- src/tensor.rs | 8 ++++---- tests/compile-fail/drop_live_memory.rs | 2 +- tests/compile-fail/leak_read_reference.rs | 2 +- tests/compile-fail/leak_write_reference.rs | 2 +- tests/compile-fail/read_write_borrows.rs | 2 +- tests/compile-fail/two_write_borrows.rs | 2 +- tests/framework_cuda_specs.rs | 2 +- tests/run-pass/multiple_read_only_borrows.rs | 2 +- tests/shared_memory_specs.rs | 18 +++++++++--------- tests/tensor_specs.rs | 2 +- 11 files changed, 24 insertions(+), 24 deletions(-) diff --git a/benches/shared_tensor.rs b/benches/shared_tensor.rs index 0f3f7c85..4791d8e0 100644 --- a/benches/shared_tensor.rs +++ b/benches/shared_tensor.rs @@ -112,7 +112,7 @@ fn bench_256_sync_1mb_native_opencl(b: &mut Bencher) { // if let &DeviceType::OpenCL(ref cl_d) = cl_device { // println!("{:?}", cl_d.hardwares()[0].clone().load_name()); // } - let mem = &mut SharedTensor::::new(&1_048_576).unwrap(); + let mem = &mut SharedTensor::::new(&1_048_576); mem.write_only(&cl_device); bench_256_sync_1mb_native_opencl_profile(b, nt_device, cl_device, mem); } @@ -133,7 +133,7 @@ fn bench_256_sync_1mb_native_cuda(b: &mut Bencher) { // if let &DeviceType::Cuda(ref cl_d) = cl_device { // println!("{:?}", cl_d.hardwares()[0].clone().load_name()); // } - let mem = &mut SharedTensor::::new(&1_048_576).unwrap(); + let mem = &mut SharedTensor::::new(&1_048_576); mem.write_only(&cl_device); bench_256_sync_1mb_native_cuda_profile(b, nt_device, cl_device, mem); } @@ -154,7 +154,7 @@ fn bench_2_sync_128mb_native_cuda(b: &mut Bencher) { // if let &DeviceType::Cuda(ref cl_d) = cl_device { // println!("{:?}", cl_d.hardwares()[0].clone().load_name()); // } - let mem = &mut SharedTensor::::new(&(128 * 1_048_576)).unwrap(); + let mem = &mut SharedTensor::::new(&(128 * 1_048_576)); mem.write_only(&cl_device); bench_2_sync_128mb_native_cuda_profile(b, nt_device, cl_device, mem); } diff --git a/src/tensor.rs b/src/tensor.rs index cc1234da..93062513 100644 --- a/src/tensor.rs +++ b/src/tensor.rs @@ -40,7 +40,7 @@ //! // allocate memory //! let native = Native::new(); //! let device = native.new_device(native.hardwares()).unwrap(); -//! let shared_data = &mut SharedTensor::::new(&5).unwrap(); +//! let shared_data = &mut SharedTensor::::new(&5); //! // fill memory with some numbers //! let mut mem = shared_data.write_only(&device).unwrap().as_mut_native().unwrap(); //! mem.as_mut_slice::().clone_from_slice(&[0, 1, 2, 3, 4]); @@ -266,13 +266,13 @@ impl fmt::Debug for SharedTensor { impl SharedTensor { /// Create new Tensor by allocating [Memory][1] on a Device. /// [1]: ../memory/index.html - pub fn new(desc: &D) -> Result, Error> { - Ok(SharedTensor { + pub fn new(desc: &D) -> SharedTensor { + SharedTensor { desc: desc.into(), locations: RefCell::new(Vec::new()), up_to_date: Cell::new(0), phantom: PhantomData, - }) + } } /// Change the shape of the Tensor. diff --git a/tests/compile-fail/drop_live_memory.rs b/tests/compile-fail/drop_live_memory.rs index d7a96f36..86abfa07 100644 --- a/tests/compile-fail/drop_live_memory.rs +++ b/tests/compile-fail/drop_live_memory.rs @@ -5,7 +5,7 @@ fn main() { let ntv = Native::new(); let dev = ntv.new_device(ntv.hardwares()).unwrap(); - let x = &mut SharedTensor::::new(&10).unwrap(); + let x = &mut SharedTensor::::new(&10); let m = x.write_only(&dev).unwrap(); x.drop_device(&dev); //~^ ERROR error: cannot borrow `*x` as mutable more than once at a time diff --git a/tests/compile-fail/leak_read_reference.rs b/tests/compile-fail/leak_read_reference.rs index 4578693a..7f7c41dd 100644 --- a/tests/compile-fail/leak_read_reference.rs +++ b/tests/compile-fail/leak_read_reference.rs @@ -6,7 +6,7 @@ fn main() { let dev = ntv.new_device(ntv.hardwares()).unwrap(); let mem = { - let x = &mut SharedTensor::::new(&10).unwrap(); + let x = &mut SharedTensor::::new(&10); //~^ ERROR error: borrowed value does not live long enough x.write_only(&dev).unwrap(); let m = x.read(&dev).unwrap(); diff --git a/tests/compile-fail/leak_write_reference.rs b/tests/compile-fail/leak_write_reference.rs index 881865da..07a8fae2 100644 --- a/tests/compile-fail/leak_write_reference.rs +++ b/tests/compile-fail/leak_write_reference.rs @@ -6,7 +6,7 @@ fn main() { let dev = ntv.new_device(ntv.hardwares()).unwrap(); let mem = { - let x = &mut SharedTensor::::new(&10).unwrap(); + let x = &mut SharedTensor::::new(&10); //~^ ERROR error: borrowed value does not live long enough let m = x.write_only(&dev).unwrap(); m diff --git a/tests/compile-fail/read_write_borrows.rs b/tests/compile-fail/read_write_borrows.rs index f83831a1..e7573600 100644 --- a/tests/compile-fail/read_write_borrows.rs +++ b/tests/compile-fail/read_write_borrows.rs @@ -5,7 +5,7 @@ fn main() { let ntv = Native::new(); let dev = ntv.new_device(ntv.hardwares()).unwrap(); - let x = &mut SharedTensor::::new(&10).unwrap(); + let x = &mut SharedTensor::::new(&10); let m1 = x.write_only(&dev).unwrap(); let m2 = x.read(&dev).unwrap(); //~^ ERROR cannot borrow `*x` as immutable because it is also borrowed as mutable diff --git a/tests/compile-fail/two_write_borrows.rs b/tests/compile-fail/two_write_borrows.rs index 2edff8a6..ddbb17a1 100644 --- a/tests/compile-fail/two_write_borrows.rs +++ b/tests/compile-fail/two_write_borrows.rs @@ -5,7 +5,7 @@ fn main() { let ntv = Native::new(); let dev = ntv.new_device(ntv.hardwares()).unwrap(); - let x = &mut SharedTensor::::new(&10).unwrap(); + let x = &mut SharedTensor::::new(&10); let m1 = x.write_only(&dev).unwrap(); let m2 = x.write_only(&dev).unwrap(); //~^ ERROR error: cannot borrow `*x` as mutable more than once at a time diff --git a/tests/framework_cuda_specs.rs b/tests/framework_cuda_specs.rs index b52ea360..26e543b9 100644 --- a/tests/framework_cuda_specs.rs +++ b/tests/framework_cuda_specs.rs @@ -48,7 +48,7 @@ mod framework_cuda_spec { let cuda = Cuda::new(); let device = cuda.new_device(&cuda.hardwares()[0..1]).unwrap(); for _ in 0..256 { - let x = &mut SharedTensor::::new(&vec![256, 1024, 128]).unwrap(); + let mut x = SharedTensor::::new(&vec![256, 1024, 128]); x.write_only(&device).unwrap(); } } diff --git a/tests/run-pass/multiple_read_only_borrows.rs b/tests/run-pass/multiple_read_only_borrows.rs index daa0f2ea..ce3015ff 100644 --- a/tests/run-pass/multiple_read_only_borrows.rs +++ b/tests/run-pass/multiple_read_only_borrows.rs @@ -5,7 +5,7 @@ fn main() { let ntv = Native::new(); let dev = ntv.new_device(ntv.hardwares()).unwrap(); - let x = &mut SharedTensor::::new(&10).unwrap(); + let x = &mut SharedTensor::::new(&10); x.write_only(&dev).unwrap(); let m1 = x.read(&dev); diff --git a/tests/shared_memory_specs.rs b/tests/shared_memory_specs.rs index 23a89bb1..e263c3ee 100644 --- a/tests/shared_memory_specs.rs +++ b/tests/shared_memory_specs.rs @@ -24,7 +24,7 @@ mod shared_memory_spec { fn it_creates_new_shared_memory_for_native() { let ntv = Native::new(); let cpu = ntv.new_device(ntv.hardwares()).unwrap(); - let shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); match shared_data.write_only(&cpu).unwrap() { &mut MemoryType::Native(ref dat) => { let data = dat.as_slice::(); @@ -40,7 +40,7 @@ mod shared_memory_spec { fn it_creates_new_shared_memory_for_cuda() { let ntv = Cuda::new(); let device = ntv.new_device(&ntv.hardwares()[0..1]).unwrap(); - let shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); match shared_data.write_only(&device) { Ok(&mut MemoryType::Cuda(_)) => {}, #[cfg(any(feature = "cuda", feature = "opencl"))] @@ -53,7 +53,7 @@ mod shared_memory_spec { fn it_creates_new_shared_memory_for_opencl() { let ntv = OpenCL::new(); let device = ntv.new_device(&ntv.hardwares()[0..1]).unwrap(); - let shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); match shared_data.write_only(&device) { Ok(&mut MemoryType::OpenCL(_)) => {}, _ => assert!(false), @@ -65,7 +65,7 @@ mod shared_memory_spec { fn it_fails_on_initialized_memory_read() { let ntv = Native::new(); let cpu = ntv.new_device(ntv.hardwares()).unwrap(); - let shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); assert_eq!(shared_data.read(&cpu).unwrap_err(), Error::UninitializedMemory); assert_eq!(shared_data.read_write(&cpu).unwrap_err(), @@ -85,7 +85,7 @@ mod shared_memory_spec { let nt = Native::new(); let cu_device = cu.new_device(&cu.hardwares()[0..1]).unwrap(); let nt_device = nt.new_device(nt.hardwares()).unwrap(); - let mem = &mut SharedTensor::::new(&3).unwrap(); + let mut mem = SharedTensor::::new(&3); write_to_memory(mem.write_only(&nt_device).unwrap(), &[1.0f64, 2.0, 123.456]); match mem.read(&cu_device) { @@ -115,7 +115,7 @@ mod shared_memory_spec { let nt = Native::new(); let cl_device = cl.new_device(&cl.hardwares()[0..1]).unwrap(); let nt_device = nt.new_device(nt.hardwares()).unwrap(); - let mem = &mut SharedTensor::::new(&3).unwrap(); + let mut mem = SharedTensor::::new(&3); write_to_memory(mem.write_only(&nt_device).unwrap(), &[1.0f64, 2.0, 123.456]); match mem.read(&cl_device) { @@ -127,7 +127,7 @@ mod shared_memory_spec { } // It has not successfully synced to the device. // Not the other way around. - mem.drop_device(&nt_device); + mem.drop_device(&nt_device).unwrap(); match mem.read(&nt_device) { Ok(m) => assert_eq!(m.as_native().unwrap().as_slice::(), [1.0, 2.0, 123.456]), @@ -140,13 +140,13 @@ mod shared_memory_spec { #[test] fn it_reshapes_correctly() { - let mut shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); assert!(shared_data.reshape(&vec![5, 2]).is_ok()); } #[test] fn it_returns_err_for_invalid_size_reshape() { - let mut shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); assert!(shared_data.reshape(&vec![10, 2]).is_err()); } } diff --git a/tests/tensor_specs.rs b/tests/tensor_specs.rs index 61f7c666..8c85e976 100644 --- a/tests/tensor_specs.rs +++ b/tests/tensor_specs.rs @@ -31,7 +31,7 @@ mod tensor_spec { #[test] fn it_resizes_tensor() { - let mut tensor = SharedTensor::::new(&(10, 20, 30)).unwrap(); + let mut tensor = SharedTensor::::new(&(10, 20, 30)); assert_eq!(tensor.desc(), &[10, 20, 30]); tensor.resize(&(2, 3, 4, 5)).unwrap(); assert_eq!(tensor.desc(), &[2, 3, 4, 5]);