diff --git a/.travis.yml b/.travis.yml index caa9bf8a5..97b4d1fa4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,6 +30,9 @@ addons: packages: - libopenblas-dev - gfortran +before_script: + - rustup component add rustfmt script: - | + cargo fmt --all -- --check && ./scripts/all-tests.sh "$FEATURES" "$IS_NIGHTLY" diff --git a/benches/bench1.rs b/benches/bench1.rs index 13bbcb29a..6f47b6b59 100644 --- a/benches/bench1.rs +++ b/benches/bench1.rs @@ -1,25 +1,17 @@ #![feature(test)] #![allow(unused_imports)] -extern crate test; extern crate ndarray; +extern crate test; -use ndarray::{ - Array, - Axis, - Ix, - Array1, - Array2, - Zip, -}; -use ndarray::{arr0, arr1, arr2, azip, s}; use ndarray::ShapeBuilder; +use ndarray::{arr0, arr1, arr2, azip, s}; +use ndarray::{Array, Array1, Array2, Axis, Ix, Zip}; use test::black_box; #[bench] -fn iter_sum_1d_regular(bench: &mut test::Bencher) -{ +fn iter_sum_1d_regular(bench: &mut test::Bencher) { let a = Array::::zeros(64 * 64); let a = black_box(a); bench.iter(|| { @@ -32,8 +24,7 @@ fn iter_sum_1d_regular(bench: &mut test::Bencher) } #[bench] -fn iter_sum_1d_raw(bench: &mut test::Bencher) -{ +fn iter_sum_1d_raw(bench: &mut test::Bencher) { // this is autovectorized to death (= great performance) let a = Array::::zeros(64 * 64); let a = black_box(a); @@ -47,8 +38,7 @@ fn iter_sum_1d_raw(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_regular(bench: &mut test::Bencher) -{ +fn iter_sum_2d_regular(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| { @@ -61,8 +51,7 @@ fn iter_sum_2d_regular(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_by_row(bench: &mut test::Bencher) -{ +fn iter_sum_2d_by_row(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| { @@ -77,8 +66,7 @@ fn iter_sum_2d_by_row(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_raw(bench: &mut test::Bencher) -{ +fn iter_sum_2d_raw(bench: &mut test::Bencher) { // this is autovectorized to death (= great performance) let a = Array::::zeros((64, 64)); let a = black_box(a); @@ -92,8 +80,7 @@ fn iter_sum_2d_raw(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_cutout(bench: &mut test::Bencher) -{ +fn iter_sum_2d_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -107,8 +94,7 @@ fn iter_sum_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) -{ +fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -124,8 +110,7 @@ fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) -{ +fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -141,8 +126,7 @@ fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) -{ +fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros((64, 64)); a.swap_axes(0, 1); let a = black_box(a); @@ -156,8 +140,7 @@ fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) -{ +fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) { let mut a = Array::::zeros((64, 64)); a.swap_axes(0, 1); let a = black_box(a); @@ -173,116 +156,85 @@ fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) } #[bench] -fn sum_2d_regular(bench: &mut test::Bencher) -{ +fn sum_2d_regular(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = black_box(a); - bench.iter(|| { - a.sum() - }); + bench.iter(|| a.sum()); } #[bench] -fn sum_2d_cutout(bench: &mut test::Bencher) -{ +fn sum_2d_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); - bench.iter(|| { - a.sum() - }); + bench.iter(|| a.sum()); } #[bench] -fn sum_2d_float(bench: &mut test::Bencher) -{ +fn sum_2d_float(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = black_box(a.view()); - bench.iter(|| { - a.sum() - }); + bench.iter(|| a.sum()); } #[bench] -fn sum_2d_float_cutout(bench: &mut test::Bencher) -{ +fn sum_2d_float_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); - bench.iter(|| { - a.sum() - }); + bench.iter(|| a.sum()); } #[bench] -fn sum_2d_float_t_cutout(bench: &mut test::Bencher) -{ +fn sum_2d_float_t_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]).reversed_axes(); let a = black_box(av); - bench.iter(|| { - a.sum() - }); + bench.iter(|| a.sum()); } #[bench] -fn fold_sum_i32_2d_regular(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_regular(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); - bench.iter(|| { - a.fold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); - bench.iter(|| { - a.fold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) { let a = Array::::zeros((64, 128)); let av = a.slice(s![.., ..;2]); let a = black_box(av); - bench.iter(|| { - a.fold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_transpose(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_transpose(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = a.t(); - bench.iter(|| { - a.fold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let mut av = a.slice(s![1..-1, 1..-1]); av.swap_axes(0, 1); let a = black_box(av); - bench.iter(|| { - a.fold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.fold(0, |acc, &x| acc + x)); } const ADD2DSZ: usize = 64; #[bench] -fn add_2d_regular(bench: &mut test::Bencher) -{ +fn add_2d_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -292,8 +244,7 @@ fn add_2d_regular(bench: &mut test::Bencher) } #[bench] -fn add_2d_zip(bench: &mut test::Bencher) -{ +fn add_2d_zip(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| { @@ -302,32 +253,25 @@ fn add_2d_zip(bench: &mut test::Bencher) } #[bench] -fn add_2d_alloc(bench: &mut test::Bencher) -{ +fn add_2d_alloc(bench: &mut test::Bencher) { let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); - bench.iter(|| { - &a + &b - }); + bench.iter(|| &a + &b); } #[bench] -fn add_2d_zip_alloc(bench: &mut test::Bencher) -{ +fn add_2d_zip_alloc(bench: &mut test::Bencher) { let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); - bench.iter(|| { - unsafe { - let mut c = Array::uninitialized(a.dim()); - azip!(a, b, mut c in { *c = a + b }); - c - } + bench.iter(|| unsafe { + let mut c = Array::uninitialized(a.dim()); + azip!(a, b, mut c in { *c = a + b }); + c }); } #[bench] -fn add_2d_assign_ops(bench: &mut test::Bencher) -{ +fn add_2d_assign_ops(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -339,8 +283,7 @@ fn add_2d_assign_ops(bench: &mut test::Bencher) } #[bench] -fn add_2d_cutout(bench: &mut test::Bencher) -{ +fn add_2d_cutout(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ + 2, ADD2DSZ + 2)); let mut acut = a.slice_mut(s![1..-1, 1..-1]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -351,8 +294,7 @@ fn add_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn add_2d_zip_cutout(bench: &mut test::Bencher) -{ +fn add_2d_zip_cutout(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ + 2, ADD2DSZ + 2)); let mut acut = a.slice_mut(s![1..-1, 1..-1]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -362,8 +304,7 @@ fn add_2d_zip_cutout(bench: &mut test::Bencher) } #[bench] -fn add_2d_cutouts_by_4(bench: &mut test::Bencher) -{ +fn add_2d_cutouts_by_4(bench: &mut test::Bencher) { let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (4, 4); @@ -375,8 +316,7 @@ fn add_2d_cutouts_by_4(bench: &mut test::Bencher) } #[bench] -fn add_2d_cutouts_by_16(bench: &mut test::Bencher) -{ +fn add_2d_cutouts_by_16(bench: &mut test::Bencher) { let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (16, 16); @@ -388,8 +328,7 @@ fn add_2d_cutouts_by_16(bench: &mut test::Bencher) } #[bench] -fn add_2d_cutouts_by_32(bench: &mut test::Bencher) -{ +fn add_2d_cutouts_by_32(bench: &mut test::Bencher) { let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (32, 32); @@ -401,8 +340,7 @@ fn add_2d_cutouts_by_32(bench: &mut test::Bencher) } #[bench] -fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) -{ +fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) { let mut a = Array2::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array1::::zeros(ADD2DSZ); let bv = b.view(); @@ -412,8 +350,7 @@ fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) } #[bench] -fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) -{ +fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros(()); let bv = b.view(); @@ -425,52 +362,40 @@ fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) #[bench] fn scalar_toowned(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); - bench.iter(|| { - a.to_owned() - }); + bench.iter(|| a.to_owned()); } #[bench] fn scalar_add_1(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let n = 1.; - bench.iter(|| { - &a + n - }); + bench.iter(|| &a + n); } #[bench] fn scalar_add_2(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let n = 1.; - bench.iter(|| { - n + &a - }); + bench.iter(|| n + &a); } #[bench] fn scalar_sub_1(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let n = 1.; - bench.iter(|| { - &a - n - }); + bench.iter(|| &a - n); } #[bench] fn scalar_sub_2(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let n = 1.; - bench.iter(|| { - n - &a - }); + bench.iter(|| n - &a); } - // This is for comparison with add_2d_broadcast_0_to_2 #[bench] -fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) -{ +fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let n = black_box(0); bench.iter(|| { @@ -479,8 +404,7 @@ fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) } #[bench] -fn add_2d_strided(bench: &mut test::Bencher) -{ +fn add_2d_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -491,8 +415,7 @@ fn add_2d_strided(bench: &mut test::Bencher) } #[bench] -fn add_2d_regular_dyn(bench: &mut test::Bencher) -{ +fn add_2d_regular_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); let b = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); let bv = b.view(); @@ -502,8 +425,7 @@ fn add_2d_regular_dyn(bench: &mut test::Bencher) } #[bench] -fn add_2d_strided_dyn(bench: &mut test::Bencher) -{ +fn add_2d_strided_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(&[ADD2DSZ, ADD2DSZ * 2][..]); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); @@ -513,10 +435,8 @@ fn add_2d_strided_dyn(bench: &mut test::Bencher) }); } - #[bench] -fn add_2d_zip_strided(bench: &mut test::Bencher) -{ +fn add_2d_zip_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -526,8 +446,7 @@ fn add_2d_zip_strided(bench: &mut test::Bencher) } #[bench] -fn add_2d_one_transposed(bench: &mut test::Bencher) -{ +fn add_2d_one_transposed(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -537,8 +456,7 @@ fn add_2d_one_transposed(bench: &mut test::Bencher) } #[bench] -fn add_2d_zip_one_transposed(bench: &mut test::Bencher) -{ +fn add_2d_zip_one_transposed(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -548,8 +466,7 @@ fn add_2d_zip_one_transposed(bench: &mut test::Bencher) } #[bench] -fn add_2d_both_transposed(bench: &mut test::Bencher) -{ +fn add_2d_both_transposed(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -560,8 +477,7 @@ fn add_2d_both_transposed(bench: &mut test::Bencher) } #[bench] -fn add_2d_zip_both_transposed(bench: &mut test::Bencher) -{ +fn add_2d_zip_both_transposed(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -572,8 +488,7 @@ fn add_2d_zip_both_transposed(bench: &mut test::Bencher) } #[bench] -fn add_2d_f32_regular(bench: &mut test::Bencher) -{ +fn add_2d_f32_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -585,8 +500,7 @@ fn add_2d_f32_regular(bench: &mut test::Bencher) const ADD3DSZ: usize = 16; #[bench] -fn add_3d_strided(bench: &mut test::Bencher) -{ +fn add_3d_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD3DSZ, ADD3DSZ, ADD3DSZ * 2)); let mut a = a.slice_mut(s![.., .., ..;2]); let b = Array::::zeros(a.dim()); @@ -597,8 +511,7 @@ fn add_3d_strided(bench: &mut test::Bencher) } #[bench] -fn add_3d_strided_dyn(bench: &mut test::Bencher) -{ +fn add_3d_strided_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(&[ADD3DSZ, ADD3DSZ, ADD3DSZ * 2][..]); let mut a = a.slice_mut(s![.., .., ..;2]); let b = Array::::zeros(a.dim()); @@ -608,12 +521,10 @@ fn add_3d_strided_dyn(bench: &mut test::Bencher) }); } - const ADD1D_SIZE: usize = 64 * 64; #[bench] -fn add_1d_regular(bench: &mut test::Bencher) -{ +fn add_1d_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros(ADD1D_SIZE); let b = Array::::zeros(a.dim()); bench.iter(|| { @@ -622,8 +533,7 @@ fn add_1d_regular(bench: &mut test::Bencher) } #[bench] -fn add_1d_strided(bench: &mut test::Bencher) -{ +fn add_1d_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros(ADD1D_SIZE * 2); let mut av = a.slice_mut(s![..;2]); let b = Array::::zeros(av.dim()); @@ -633,8 +543,7 @@ fn add_1d_strided(bench: &mut test::Bencher) } #[bench] -fn iadd_scalar_2d_regular(bench: &mut test::Bencher) -{ +fn iadd_scalar_2d_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| { a += 1.; @@ -642,8 +551,7 @@ fn iadd_scalar_2d_regular(bench: &mut test::Bencher) } #[bench] -fn iadd_scalar_2d_strided(bench: &mut test::Bencher) -{ +fn iadd_scalar_2d_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); bench.iter(|| { @@ -652,8 +560,7 @@ fn iadd_scalar_2d_strided(bench: &mut test::Bencher) } #[bench] -fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) -{ +fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(vec![ADD2DSZ, ADD2DSZ]); bench.iter(|| { a += 1.; @@ -661,8 +568,7 @@ fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) } #[bench] -fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) -{ +fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(vec![ADD2DSZ, ADD2DSZ * 2]); let mut a = a.slice_mut(s![.., ..;2]); bench.iter(|| { @@ -671,8 +577,7 @@ fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) } #[bench] -fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) -{ +fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) { let mut av = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = Array::::zeros((ADD2DSZ, ADD2DSZ)); let scalar = 3.1415926535; @@ -682,8 +587,7 @@ fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) } #[bench] -fn assign_scalar_2d_corder(bench: &mut test::Bencher) -{ +fn assign_scalar_2d_corder(bench: &mut test::Bencher) { let a = Array::zeros((ADD2DSZ, ADD2DSZ)); let mut a = black_box(a); let s = 3.; @@ -691,8 +595,7 @@ fn assign_scalar_2d_corder(bench: &mut test::Bencher) } #[bench] -fn assign_scalar_2d_cutout(bench: &mut test::Bencher) -{ +fn assign_scalar_2d_cutout(bench: &mut test::Bencher) { let mut a = Array::zeros((66, 66)); let a = a.slice_mut(s![1..-1, 1..-1]); let mut a = black_box(a); @@ -701,8 +604,7 @@ fn assign_scalar_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn assign_scalar_2d_forder(bench: &mut test::Bencher) -{ +fn assign_scalar_2d_forder(bench: &mut test::Bencher) { let mut a = Array::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut a = black_box(a); @@ -711,16 +613,14 @@ fn assign_scalar_2d_forder(bench: &mut test::Bencher) } #[bench] -fn assign_zero_2d_corder(bench: &mut test::Bencher) -{ +fn assign_zero_2d_corder(bench: &mut test::Bencher) { let a = Array::zeros((ADD2DSZ, ADD2DSZ)); let mut a = black_box(a); bench.iter(|| a.fill(0.)) } #[bench] -fn assign_zero_2d_cutout(bench: &mut test::Bencher) -{ +fn assign_zero_2d_cutout(bench: &mut test::Bencher) { let mut a = Array::zeros((66, 66)); let a = a.slice_mut(s![1..-1, 1..-1]); let mut a = black_box(a); @@ -728,8 +628,7 @@ fn assign_zero_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn assign_zero_2d_forder(bench: &mut test::Bencher) -{ +fn assign_zero_2d_forder(bench: &mut test::Bencher) { let mut a = Array::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut a = black_box(a); @@ -737,26 +636,35 @@ fn assign_zero_2d_forder(bench: &mut test::Bencher) } #[bench] -fn bench_iter_diag(bench: &mut test::Bencher) -{ +fn bench_iter_diag(bench: &mut test::Bencher) { let a = Array::::zeros((1024, 1024)); - bench.iter(|| for elt in a.diag() { black_box(elt); }) + bench.iter(|| { + for elt in a.diag() { + black_box(elt); + } + }) } #[bench] -fn bench_row_iter(bench: &mut test::Bencher) -{ +fn bench_row_iter(bench: &mut test::Bencher) { let a = Array::::zeros((1024, 1024)); let it = a.row(17); - bench.iter(|| for elt in it.clone() { black_box(elt); }) + bench.iter(|| { + for elt in it.clone() { + black_box(elt); + } + }) } #[bench] -fn bench_col_iter(bench: &mut test::Bencher) -{ +fn bench_col_iter(bench: &mut test::Bencher) { let a = Array::::zeros((1024, 1024)); let it = a.column(17); - bench.iter(|| for elt in it.clone() { black_box(elt); }) + bench.iter(|| { + for elt in it.clone() { + black_box(elt); + } + }) } macro_rules! mat_mul { @@ -779,7 +687,7 @@ macro_rules! mat_mul { } } -mat_mul!{mat_mul_f32, f32, +mat_mul! {mat_mul_f32, f32, (m004, 4, 4, 4) (m007, 7, 7, 7) (m008, 8, 8, 8) @@ -793,7 +701,7 @@ mat_mul!{mat_mul_f32, f32, (mix10000, 128, 10000, 128) } -mat_mul!{mat_mul_f64, f64, +mat_mul! {mat_mul_f64, f64, (m004, 4, 4, 4) (m007, 7, 7, 7) (m008, 8, 8, 8) @@ -807,7 +715,7 @@ mat_mul!{mat_mul_f64, f64, (mix10000, 128, 10000, 128) } -mat_mul!{mat_mul_i32, i32, +mat_mul! {mat_mul_i32, i32, (m004, 4, 4, 4) (m007, 7, 7, 7) (m008, 8, 8, 8) @@ -819,115 +727,96 @@ mat_mul!{mat_mul_i32, i32, } #[bench] -fn create_iter_4d(bench: &mut test::Bencher) -{ +fn create_iter_4d(bench: &mut test::Bencher) { let mut a = Array::from_elem((4, 5, 3, 2), 1.0); a.swap_axes(0, 1); a.swap_axes(2, 1); let v = black_box(a.view()); - bench.iter(|| { - v.into_iter() - }); + bench.iter(|| v.into_iter()); } #[bench] -fn bench_to_owned_n(bench: &mut test::Bencher) -{ +fn bench_to_owned_n(bench: &mut test::Bencher) { let a = Array::::zeros((32, 32)); bench.iter(|| a.to_owned()); } #[bench] -fn bench_to_owned_t(bench: &mut test::Bencher) -{ +fn bench_to_owned_t(bench: &mut test::Bencher) { let mut a = Array::::zeros((32, 32)); a.swap_axes(0, 1); bench.iter(|| a.to_owned()); } #[bench] -fn bench_to_owned_strided(bench: &mut test::Bencher) -{ +fn bench_to_owned_strided(bench: &mut test::Bencher) { let a = Array::::zeros((32, 64)); let a = a.slice(s![.., ..;2]); bench.iter(|| a.to_owned()); } #[bench] -fn equality_i32(bench: &mut test::Bencher) -{ +fn equality_i32(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64)); bench.iter(|| a == b); } #[bench] -fn equality_f32(bench: &mut test::Bencher) -{ +fn equality_f32(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64)); bench.iter(|| a == b); } #[bench] -fn equality_f32_mixorder(bench: &mut test::Bencher) -{ +fn equality_f32_mixorder(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64).f()); bench.iter(|| a == b); } #[bench] -fn dot_f32_16(bench: &mut test::Bencher) -{ +fn dot_f32_16(bench: &mut test::Bencher) { let a = Array::::zeros(16); let b = Array::::zeros(16); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_20(bench: &mut test::Bencher) -{ +fn dot_f32_20(bench: &mut test::Bencher) { let a = Array::::zeros(20); let b = Array::::zeros(20); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_32(bench: &mut test::Bencher) -{ +fn dot_f32_32(bench: &mut test::Bencher) { let a = Array::::zeros(32); let b = Array::::zeros(32); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_256(bench: &mut test::Bencher) -{ +fn dot_f32_256(bench: &mut test::Bencher) { let a = Array::::zeros(256); let b = Array::::zeros(256); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_1024(bench: &mut test::Bencher) -{ +fn dot_f32_1024(bench: &mut test::Bencher) { let av = Array::::zeros(1024); let bv = Array::::zeros(1024); - bench.iter(|| { - av.dot(&bv) - }); + bench.iter(|| av.dot(&bv)); } #[bench] -fn dot_f32_10e6(bench: &mut test::Bencher) -{ +fn dot_f32_10e6(bench: &mut test::Bencher) { let n = 1_000_000; let av = Array::::zeros(n); let bv = Array::::zeros(n); - bench.iter(|| { - av.dot(&bv) - }); + bench.iter(|| av.dot(&bv)); } #[bench] @@ -954,7 +843,9 @@ const MEAN_SUM_N: usize = 127; fn range_mat(m: Ix, n: Ix) -> Array2 { assert!(m * n != 0); - Array::linspace(0., (m * n - 1) as f32, m * n).into_shape((m, n)).unwrap() + Array::linspace(0., (m * n - 1) as f32, m * n) + .into_shape((m, n)) + .unwrap() } #[bench] diff --git a/benches/chunks.rs b/benches/chunks.rs index 61d68d2bb..3bdcb1c59 100644 --- a/benches/chunks.rs +++ b/benches/chunks.rs @@ -8,8 +8,7 @@ use ndarray::prelude::*; use ndarray::NdProducer; #[bench] -fn chunk2x2_iter_sum(bench: &mut Bencher) -{ +fn chunk2x2_iter_sum(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -21,8 +20,7 @@ fn chunk2x2_iter_sum(bench: &mut Bencher) } #[bench] -fn chunk2x2_sum(bench: &mut Bencher) -{ +fn chunk2x2_sum(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -34,8 +32,7 @@ fn chunk2x2_sum(bench: &mut Bencher) } #[bench] -fn chunk2x2_sum_get1(bench: &mut Bencher) -{ +fn chunk2x2_sum_get1(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -43,15 +40,14 @@ fn chunk2x2_sum_get1(bench: &mut Bencher) let (m, n) = a.dim(); for i in 0..m { for j in 0..n { - sum[[i/2, j/2]] += a[[i, j]]; + sum[[i / 2, j / 2]] += a[[i, j]]; } } }); } #[bench] -fn chunk2x2_sum_uget1(bench: &mut Bencher) -{ +fn chunk2x2_sum_uget1(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -60,7 +56,7 @@ fn chunk2x2_sum_uget1(bench: &mut Bencher) for i in 0..m { for j in 0..n { unsafe { - *sum.uget_mut([i/2, j/2]) += *a.uget([i, j]); + *sum.uget_mut([i / 2, j / 2]) += *a.uget([i, j]); } } } @@ -68,8 +64,7 @@ fn chunk2x2_sum_uget1(bench: &mut Bencher) } #[bench] -fn chunk2x2_sum_get2(bench: &mut Bencher) -{ +fn chunk2x2_sum_get2(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -77,10 +72,10 @@ fn chunk2x2_sum_get2(bench: &mut Bencher) let (m, n) = sum.dim(); for i in 0..m { for j in 0..n { - sum[[i, j]] += a[[i*2 + 0, j*2 + 0]]; - sum[[i, j]] += a[[i*2 + 0, j*2 + 1]]; - sum[[i, j]] += a[[i*2 + 1, j*2 + 1]]; - sum[[i, j]] += a[[i*2 + 1, j*2 + 0]]; + sum[[i, j]] += a[[i * 2 + 0, j * 2 + 0]]; + sum[[i, j]] += a[[i * 2 + 0, j * 2 + 1]]; + sum[[i, j]] += a[[i * 2 + 1, j * 2 + 1]]; + sum[[i, j]] += a[[i * 2 + 1, j * 2 + 0]]; } } }); diff --git a/benches/construct.rs b/benches/construct.rs index 6909da5e6..b9a6a4566 100644 --- a/benches/construct.rs +++ b/benches/construct.rs @@ -8,34 +8,23 @@ use ndarray::prelude::*; #[bench] fn default_f64(bench: &mut Bencher) { - bench.iter(|| { - Array::::default((128, 128)) - }) + bench.iter(|| Array::::default((128, 128))) } #[bench] fn zeros_f64(bench: &mut Bencher) { - bench.iter(|| { - Array::::zeros((128, 128)) - }) + bench.iter(|| Array::::zeros((128, 128))) } #[bench] -fn map_regular(bench: &mut test::Bencher) -{ +fn map_regular(bench: &mut test::Bencher) { let a = Array::linspace(0., 127., 128).into_shape((8, 16)).unwrap(); - bench.iter(|| { - a.map(|&x| 2. * x) - }); + bench.iter(|| a.map(|&x| 2. * x)); } - #[bench] -fn map_stride(bench: &mut test::Bencher) -{ +fn map_stride(bench: &mut test::Bencher) { let a = Array::linspace(0., 127., 256).into_shape((8, 32)).unwrap(); let av = a.slice(s![.., ..;2]); - bench.iter(|| { - av.map(|&x| 2. * x) - }); + bench.iter(|| av.map(|&x| 2. * x)); } diff --git a/benches/higher-order.rs b/benches/higher-order.rs index 22886e4dc..95f076700 100644 --- a/benches/higher-order.rs +++ b/benches/higher-order.rs @@ -1,9 +1,8 @@ - #![feature(test)] extern crate test; -use test::Bencher; use test::black_box; +use test::Bencher; extern crate ndarray; use ndarray::prelude::*; @@ -13,77 +12,68 @@ const X: usize = 64; const Y: usize = 16; #[bench] -fn map_regular(bench: &mut Bencher) -{ +fn map_regular(bench: &mut Bencher) { let a = Array::linspace(0., 127., N).into_shape((X, Y)).unwrap(); - bench.iter(|| { - a.map(|&x| 2. * x) - }); + bench.iter(|| a.map(|&x| 2. * x)); } - pub fn double_array(mut a: ArrayViewMut2) { a *= 2.0; } #[bench] -fn map_stride_double_f64(bench: &mut Bencher) -{ - let mut a = Array::linspace(0., 127., N * 2).into_shape([X, Y * 2]).unwrap(); +fn map_stride_double_f64(bench: &mut Bencher) { + let mut a = Array::linspace(0., 127., N * 2) + .into_shape([X, Y * 2]) + .unwrap(); let mut av = a.slice_mut(s![.., ..;2]); bench.iter(|| { double_array(av.view_mut()); - }); } #[bench] -fn map_stride_f64(bench: &mut Bencher) -{ - let a = Array::linspace(0., 127., N * 2).into_shape([X, Y * 2]).unwrap(); +fn map_stride_f64(bench: &mut Bencher) { + let a = Array::linspace(0., 127., N * 2) + .into_shape([X, Y * 2]) + .unwrap(); let av = a.slice(s![.., ..;2]); - bench.iter(|| { - av.map(|&x| 2. * x) - }); + bench.iter(|| av.map(|&x| 2. * x)); } #[bench] -fn map_stride_u32(bench: &mut Bencher) -{ - let a = Array::linspace(0., 127., N * 2).into_shape([X, Y * 2]).unwrap(); +fn map_stride_u32(bench: &mut Bencher) { + let a = Array::linspace(0., 127., N * 2) + .into_shape([X, Y * 2]) + .unwrap(); let b = a.mapv(|x| x as u32); let av = b.slice(s![.., ..;2]); - bench.iter(|| { - av.map(|&x| 2 * x) - }); + bench.iter(|| av.map(|&x| 2 * x)); } #[bench] -fn fold_axis(bench: &mut Bencher) -{ - let a = Array::linspace(0., 127., N * 2).into_shape([X, Y * 2]).unwrap(); - bench.iter(|| { - a.fold_axis(Axis(0), 0., |&acc, &elt| acc + elt) - }); +fn fold_axis(bench: &mut Bencher) { + let a = Array::linspace(0., 127., N * 2) + .into_shape([X, Y * 2]) + .unwrap(); + bench.iter(|| a.fold_axis(Axis(0), 0., |&acc, &elt| acc + elt)); } const MA: usize = 64; const MASZ: usize = MA * MA; #[bench] -fn map_axis_0(bench: &mut Bencher) -{ - let a = Array::from_iter(0..MASZ as i32).into_shape([MA, MA]).unwrap(); - bench.iter(|| { - a.map_axis(Axis(0), |lane| black_box(lane)) - }); +fn map_axis_0(bench: &mut Bencher) { + let a = Array::from_iter(0..MASZ as i32) + .into_shape([MA, MA]) + .unwrap(); + bench.iter(|| a.map_axis(Axis(0), |lane| black_box(lane))); } #[bench] -fn map_axis_1(bench: &mut Bencher) -{ - let a = Array::from_iter(0..MASZ as i32).into_shape([MA, MA]).unwrap(); - bench.iter(|| { - a.map_axis(Axis(1), |lane| black_box(lane)) - }); +fn map_axis_1(bench: &mut Bencher) { + let a = Array::from_iter(0..MASZ as i32) + .into_shape([MA, MA]) + .unwrap(); + bench.iter(|| a.map_axis(Axis(1), |lane| black_box(lane))); } diff --git a/benches/iter.rs b/benches/iter.rs index a30c640da..82499f50a 100644 --- a/benches/iter.rs +++ b/benches/iter.rs @@ -1,130 +1,104 @@ #![feature(test)] -extern crate test; extern crate rawpointer; -use test::Bencher; -use test::black_box; +extern crate test; use rawpointer::PointerExt; +use test::black_box; +use test::Bencher; extern crate ndarray; use ndarray::prelude::*; -use ndarray::{Zip, FoldWhile}; use ndarray::Slice; +use ndarray::{FoldWhile, Zip}; #[bench] -fn iter_sum_2d_regular(bench: &mut Bencher) -{ +fn iter_sum_2d_regular(bench: &mut Bencher) { let a = Array::::zeros((64, 64)); - bench.iter(|| { - a.iter().fold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.iter().fold(0, |acc, &x| acc + x)); } #[bench] -fn iter_sum_2d_cutout(bench: &mut Bencher) -{ +fn iter_sum_2d_cutout(bench: &mut Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = av; - bench.iter(|| { - a.iter().fold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.iter().fold(0, |acc, &x| acc + x)); } #[bench] -fn iter_all_2d_cutout(bench: &mut Bencher) -{ +fn iter_all_2d_cutout(bench: &mut Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = av; - bench.iter(|| { - a.iter().all(|&x| x >= 0) - }); + bench.iter(|| a.iter().all(|&x| x >= 0)); } #[bench] -fn iter_sum_2d_transpose(bench: &mut Bencher) -{ +fn iter_sum_2d_transpose(bench: &mut Bencher) { let a = Array::::zeros((66, 66)); let a = a.t(); - bench.iter(|| { - a.iter().fold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.iter().fold(0, |acc, &x| acc + x)); } #[bench] -fn iter_filter_sum_2d_u32(bench: &mut Bencher) -{ +fn iter_filter_sum_2d_u32(bench: &mut Bencher) { let a = Array::linspace(0., 1., 256).into_shape((16, 16)).unwrap(); let b = a.mapv(|x| (x * 100.) as u32); - bench.iter(|| { - b.iter().filter(|&&x| x < 75).fold(0, |acc, &x| acc + x) - }); + bench.iter(|| b.iter().filter(|&&x| x < 75).fold(0, |acc, &x| acc + x)); } #[bench] -fn iter_filter_sum_2d_f32(bench: &mut Bencher) -{ +fn iter_filter_sum_2d_f32(bench: &mut Bencher) { let a = Array::linspace(0., 1., 256).into_shape((16, 16)).unwrap(); let b = a * 100.; - bench.iter(|| { - b.iter().filter(|&&x| x < 75.).fold(0., |acc, &x| acc + x) - }); + bench.iter(|| b.iter().filter(|&&x| x < 75.).fold(0., |acc, &x| acc + x)); } #[bench] -fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) -{ +fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) { let a = Array::linspace(0., 1., 256).into_shape((16, 16)).unwrap(); let b = a.mapv(|x| (x * 100.) as u32); let b = b.slice(s![.., ..;2]); - bench.iter(|| { - b.iter().filter(|&&x| x < 75).fold(0, |acc, &x| acc + x) - }); + bench.iter(|| b.iter().filter(|&&x| x < 75).fold(0, |acc, &x| acc + x)); } #[bench] -fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) -{ +fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) { let a = Array::linspace(0., 1., 256).into_shape((16, 16)).unwrap(); let b = a * 100.; let b = b.slice(s![.., ..;2]); - bench.iter(|| { - b.iter().filter(|&&x| x < 75.).fold(0., |acc, &x| acc + x) - }); + bench.iter(|| b.iter().filter(|&&x| x < 75.).fold(0., |acc, &x| acc + x)); } const ZIPSZ: usize = 10_000; #[bench] -fn sum_3_std_zip1(bench: &mut Bencher) -{ +fn sum_3_std_zip1(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; bench.iter(|| { - a.iter().zip(b.iter().zip(&c)).fold(0, |acc, (&a, (&b, &c))| { - acc + a + b + c - }) + a.iter() + .zip(b.iter().zip(&c)) + .fold(0, |acc, (&a, (&b, &c))| acc + a + b + c) }); } #[bench] -fn sum_3_std_zip2(bench: &mut Bencher) -{ +fn sum_3_std_zip2(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; bench.iter(|| { - a.iter().zip(b.iter()).zip(&c).fold(0, |acc, ((&a, &b), &c)| { - acc + a + b + c - }) + a.iter() + .zip(b.iter()) + .zip(&c) + .fold(0, |acc, ((&a, &b), &c)| acc + a + b + c) }); } #[bench] -fn sum_3_std_zip3(bench: &mut Bencher) -{ +fn sum_3_std_zip3(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -138,8 +112,7 @@ fn sum_3_std_zip3(bench: &mut Bencher) } #[bench] -fn vector_sum_3_std_zip(bench: &mut Bencher) -{ +fn vector_sum_3_std_zip(bench: &mut Bencher) { let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -151,8 +124,7 @@ fn vector_sum_3_std_zip(bench: &mut Bencher) } #[bench] -fn sum_3_azip(bench: &mut Bencher) -{ +fn sum_3_azip(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -166,21 +138,21 @@ fn sum_3_azip(bench: &mut Bencher) } #[bench] -fn sum_3_azip_fold(bench: &mut Bencher) -{ +fn sum_3_azip_fold(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; bench.iter(|| { - Zip::from(&a).and(&b).and(&c).fold_while(0, |acc, &a, &b, &c| { - FoldWhile::Continue(acc + a + b + c) - }).into_inner() + Zip::from(&a) + .and(&b) + .and(&c) + .fold_while(0, |acc, &a, &b, &c| FoldWhile::Continue(acc + a + b + c)) + .into_inner() }); } #[bench] -fn vector_sum_3_azip(bench: &mut Bencher) -{ +fn vector_sum_3_azip(bench: &mut Bencher) { let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -200,8 +172,7 @@ fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) { } #[bench] -fn vector_sum_3_zip_unchecked(bench: &mut Bencher) -{ +fn vector_sum_3_zip_unchecked(bench: &mut Bencher) { let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -211,20 +182,17 @@ fn vector_sum_3_zip_unchecked(bench: &mut Bencher) } #[bench] -fn vector_sum_3_zip_unchecked_manual(bench: &mut Bencher) -{ +fn vector_sum_3_zip_unchecked_manual(bench: &mut Bencher) { let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; - bench.iter(move || { - unsafe { - let mut ap = a.as_ptr(); - let mut bp = b.as_ptr(); - let mut cp = c.as_mut_ptr(); - let cend = cp.offset(c.len() as isize); - while cp != cend { - *cp.post_inc() += *ap.post_inc() + *bp.post_inc(); - } + bench.iter(move || unsafe { + let mut ap = a.as_ptr(); + let mut bp = b.as_ptr(); + let mut cp = c.as_mut_ptr(); + let cend = cp.offset(c.len() as isize); + while cp != cend { + *cp.post_inc() += *ap.post_inc() + *bp.post_inc(); } }); } @@ -256,11 +224,10 @@ fn indexed_zip_1d_ix1(bench: &mut Bencher) { } bench.iter(|| { - Zip::indexed(&a) - .apply(|i, &_elt| { - black_box(i); - //assert!(a[i] == elt); - }); + Zip::indexed(&a).apply(|i, &_elt| { + black_box(i); + //assert!(a[i] == elt); + }); }) } @@ -286,16 +253,13 @@ fn indexed_zip_2d_ix2(bench: &mut Bencher) { } bench.iter(|| { - Zip::indexed(&a) - .apply(|i, &_elt| { - black_box(i); - //assert!(a[i] == elt); - }); + Zip::indexed(&a).apply(|i, &_elt| { + black_box(i); + //assert!(a[i] == elt); + }); }) } - - #[bench] fn indexed_iter_3d_ix3(bench: &mut Bencher) { let mut a = Array::::zeros((ISZ, ISZ, ISZ)); @@ -319,11 +283,10 @@ fn indexed_zip_3d_ix3(bench: &mut Bencher) { } bench.iter(|| { - Zip::indexed(&a) - .apply(|i, &_elt| { - black_box(i); - //assert!(a[i] == elt); - }); + Zip::indexed(&a).apply(|i, &_elt| { + black_box(i); + //assert!(a[i] == elt); + }); }) } @@ -344,49 +307,41 @@ fn indexed_iter_3d_dyn(bench: &mut Bencher) { } #[bench] -fn iter_sum_1d_strided_fold(bench: &mut Bencher) -{ +fn iter_sum_1d_strided_fold(bench: &mut Bencher) { let mut a = Array::::ones(10240); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); - bench.iter(|| { - a.iter().fold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.iter().fold(0, |acc, &x| acc + x)); } #[bench] -fn iter_sum_1d_strided_rfold(bench: &mut Bencher) -{ +fn iter_sum_1d_strided_rfold(bench: &mut Bencher) { let mut a = Array::::ones(10240); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); - bench.iter(|| { - a.iter().rfold(0, |acc, &x| acc + x) - }); + bench.iter(|| a.iter().rfold(0, |acc, &x| acc + x)); } - #[bench] -fn iter_axis_iter_sum(bench: &mut Bencher) -{ +fn iter_axis_iter_sum(bench: &mut Bencher) { let a = Array::::zeros((64, 64)); - bench.iter(|| { - a.axis_iter(Axis(0)).map(|plane| plane.sum()).sum::() - }); + bench.iter(|| a.axis_iter(Axis(0)).map(|plane| plane.sum()).sum::()); } #[bench] -fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) -{ +fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) { let a = Array::::zeros((64, 64)); bench.iter(|| { - a.axis_chunks_iter(Axis(0), 1).map(|plane| plane.sum()).sum::() + a.axis_chunks_iter(Axis(0), 1) + .map(|plane| plane.sum()) + .sum::() }); } #[bench] -fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) -{ +fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) { let a = Array::::zeros((64, 64)); bench.iter(|| { - a.axis_chunks_iter(Axis(0), 5).map(|plane| plane.sum()).sum::() + a.axis_chunks_iter(Axis(0), 5) + .map(|plane| plane.sum()) + .sum::() }); } diff --git a/benches/numeric.rs b/benches/numeric.rs index db0252c8a..b1d537bf6 100644 --- a/benches/numeric.rs +++ b/benches/numeric.rs @@ -1,4 +1,3 @@ - #![feature(test)] extern crate test; @@ -12,15 +11,20 @@ const X: usize = 64; const Y: usize = 16; #[bench] -fn clip(bench: &mut Bencher) -{ - let mut a = Array::linspace(0., 127., N * 2).into_shape([X, Y * 2]).unwrap(); +fn clip(bench: &mut Bencher) { + let mut a = Array::linspace(0., 127., N * 2) + .into_shape([X, Y * 2]) + .unwrap(); let min = 2.; let max = 5.; bench.iter(|| { a.mapv_inplace(|mut x| { - if x < min { x = min } - if x > max { x = max } + if x < min { + x = min + } + if x > max { + x = max + } x }) }); diff --git a/benches/par_rayon.rs b/benches/par_rayon.rs index e207a65aa..74a5386f4 100644 --- a/benches/par_rayon.rs +++ b/benches/par_rayon.rs @@ -1,13 +1,13 @@ -#![cfg(feature="rayon")] +#![cfg(feature = "rayon")] #![feature(test)] extern crate rayon; -extern crate ndarray; extern crate itertools; +extern crate ndarray; -use ndarray::prelude::*; use ndarray::parallel::prelude::*; +use ndarray::prelude::*; extern crate test; use test::Bencher; @@ -27,8 +27,7 @@ fn set_threads() { } #[bench] -fn map_exp_regular(bench: &mut Bencher) -{ +fn map_exp_regular(bench: &mut Bencher) { let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); bench.iter(|| { @@ -37,8 +36,7 @@ fn map_exp_regular(bench: &mut Bencher) } #[bench] -fn rayon_exp_regular(bench: &mut Bencher) -{ +fn rayon_exp_regular(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); @@ -51,22 +49,18 @@ const FASTEXP: usize = EXP_N; #[inline] fn fastexp(x: f64) -> f64 { - let x = 1. + x/1024.; + let x = 1. + x / 1024.; x.powi(1024) } #[bench] -fn map_fastexp_regular(bench: &mut Bencher) -{ +fn map_fastexp_regular(bench: &mut Bencher) { let mut a = Array2::::zeros((FASTEXP, FASTEXP)); - bench.iter(|| { - a.mapv_inplace(|x| fastexp(x)) - }); + bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_regular(bench: &mut Bencher) -{ +fn rayon_fastexp_regular(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -75,18 +69,14 @@ fn rayon_fastexp_regular(bench: &mut Bencher) } #[bench] -fn map_fastexp_cut(bench: &mut Bencher) -{ +fn map_fastexp_cut(bench: &mut Bencher) { let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); - bench.iter(|| { - a.mapv_inplace(|x| fastexp(x)) - }); + bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_cut(bench: &mut Bencher) -{ +fn rayon_fastexp_cut(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); @@ -96,8 +86,7 @@ fn rayon_fastexp_cut(bench: &mut Bencher) } #[bench] -fn map_fastexp_by_axis(bench: &mut Bencher) -{ +fn map_fastexp_by_axis(bench: &mut Bencher) { let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { for mut sheet in a.axis_iter_mut(Axis(0)) { @@ -107,29 +96,29 @@ fn map_fastexp_by_axis(bench: &mut Bencher) } #[bench] -fn rayon_fastexp_by_axis(bench: &mut Bencher) -{ +fn rayon_fastexp_by_axis(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { - a.axis_iter_mut(Axis(0)).into_par_iter() + a.axis_iter_mut(Axis(0)) + .into_par_iter() .for_each(|mut sheet| sheet.mapv_inplace(fastexp)); }); } #[bench] -fn rayon_fastexp_zip(bench: &mut Bencher) -{ +fn rayon_fastexp_zip(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { - Zip::from(&mut a).into_par_iter().for_each(|(elt, )| *elt = fastexp(*elt)); + Zip::from(&mut a) + .into_par_iter() + .for_each(|(elt,)| *elt = fastexp(*elt)); }); } #[bench] -fn add(bench: &mut Bencher) -{ +fn add(bench: &mut Bencher) { let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); let c = Array2::::zeros((ADDN, ADDN)); @@ -142,8 +131,7 @@ fn add(bench: &mut Bencher) } #[bench] -fn rayon_add(bench: &mut Bencher) -{ +fn rayon_add(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); diff --git a/blas-tests/src/lib.rs b/blas-tests/src/lib.rs index e69de29bb..8b1378917 100644 --- a/blas-tests/src/lib.rs +++ b/blas-tests/src/lib.rs @@ -0,0 +1 @@ + diff --git a/blas-tests/tests/oper.rs b/blas-tests/tests/oper.rs index 80526e4d8..5e7c8aa3f 100644 --- a/blas-tests/tests/oper.rs +++ b/blas-tests/tests/oper.rs @@ -2,24 +2,30 @@ extern crate defmac; extern crate ndarray; extern crate num_traits; -use ndarray::prelude::*; -use ndarray::{LinalgScalar, Data}; use ndarray::linalg::general_mat_mul; use ndarray::linalg::general_mat_vec_mul; +use ndarray::prelude::*; +use ndarray::{Data, LinalgScalar}; use ndarray::{Ix, Ixs, SliceInfo, SliceOrIndex}; -use std::fmt; use defmac::defmac; use num_traits::Float; +use std::fmt; fn assert_approx_eq(f: F, g: F, tol: F) -> bool { - assert!((f - g).abs() <= tol, "{:?} approx== {:?} (tol={:?})", - f, g, tol); + assert!( + (f - g).abs() <= tol, + "{:?} approx== {:?} (tol={:?})", + f, + g, + tol + ); true } fn assert_close(a: ArrayView, b: ArrayView) - where D: Dimension, +where + D: Dimension, { let diff = (&a - &b).mapv_into(f64::abs); @@ -28,7 +34,7 @@ fn assert_close(a: ArrayView, b: ArrayView) let crtol = b.mapv(|x| x.abs() * rtol); let tol = crtol + atol; let tol_m_diff = &diff - &tol; - let maxdiff = tol_m_diff.fold(0./0., |x, y| f64::max(x, *y)); + let maxdiff = tol_m_diff.fold(0. / 0., |x, y| f64::max(x, *y)); println!("diff offset from tolerance level= {:.2e}", maxdiff); if maxdiff > 0. { println!("{:.4?}", a); @@ -37,16 +43,17 @@ fn assert_close(a: ArrayView, b: ArrayView) } } -fn reference_dot<'a,A, V1, V2>(a: V1, b: V2) -> A - where A: NdFloat, - V1: AsArray<'a, A>, - V2: AsArray<'a, A>, +fn reference_dot<'a, A, V1, V2>(a: V1, b: V2) -> A +where + A: NdFloat, + V1: AsArray<'a, A>, + V2: AsArray<'a, A>, { let a = a.into(); let b = b.into(); - a.iter().zip(b.iter()).fold(A::zero(), |acc, (&x, &y)| { - acc + x * y - }) + a.iter() + .zip(b.iter()) + .fold(A::zero(), |acc, (&x, &y)| acc + x * y) } #[test] @@ -67,7 +74,6 @@ fn dot_product() { assert_approx_eq(a2.dot(&b2), reference_dot(&a2, &b2), 1e-5); } - let a = a.map(|f| *f as f32); let b = b.map(|f| *f as f32); assert_approx_eq(a.dot(&b), dot as f32, 1e-5); @@ -128,11 +134,15 @@ fn dot_product_neg_stride() { } fn range_mat(m: Ix, n: Ix) -> Array2 { - Array::linspace(0., (m * n) as f32 - 1., m * n).into_shape((m, n)).unwrap() + Array::linspace(0., (m * n) as f32 - 1., m * n) + .into_shape((m, n)) + .unwrap() } fn range_mat64(m: Ix, n: Ix) -> Array2 { - Array::linspace(0., (m * n) as f64 - 1., m * n).into_shape((m, n)).unwrap() + Array::linspace(0., (m * n) as f64 - 1., m * n) + .into_shape((m, n)) + .unwrap() } fn range1_mat64(m: Ix) -> Array1 { @@ -140,15 +150,17 @@ fn range1_mat64(m: Ix) -> Array1 { } fn range_i32(m: Ix, n: Ix) -> Array2 { - Array::from_iter(0..(m * n) as i32).into_shape((m, n)).unwrap() + Array::from_iter(0..(m * n) as i32) + .into_shape((m, n)) + .unwrap() } // simple, slow, correct (hopefully) mat mul -fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) - -> Array2 - where A: LinalgScalar, - S: Data, - S2: Data, +fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) -> Array2 +where + A: LinalgScalar, + S: Data, + S2: Data, { let ((m, k), (k2, n)) = (lhs.dim(), rhs.dim()); assert!(m.checked_mul(n).is_some()); @@ -162,8 +174,9 @@ fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase let mut j = 0; for rr in &mut res_elems { unsafe { - *rr = (0..k).fold(A::zero(), - move |s, x| s + *lhs.uget((i, x)) * *rhs.uget((x, j))); + *rr = (0..k).fold(A::zero(), move |s, x| { + s + *lhs.uget((i, x)) * *rhs.uget((x, j)) + }); } j += 1; if j == n { @@ -171,33 +184,33 @@ fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase i += 1; } } - unsafe { - ArrayBase::from_shape_vec_unchecked((m, n), res_elems) - } + unsafe { ArrayBase::from_shape_vec_unchecked((m, n), res_elems) } } // simple, slow, correct (hopefully) mat mul -fn reference_mat_vec_mul(lhs: &ArrayBase, rhs: &ArrayBase) - -> Array1 - where A: LinalgScalar, - S: Data, - S2: Data, +fn reference_mat_vec_mul(lhs: &ArrayBase, rhs: &ArrayBase) -> Array1 +where + A: LinalgScalar, + S: Data, + S2: Data, { let ((m, _), k) = (lhs.dim(), rhs.dim()); reference_mat_mul(lhs, &rhs.to_owned().into_shape((k, 1)).unwrap()) - .into_shape(m).unwrap() + .into_shape(m) + .unwrap() } // simple, slow, correct (hopefully) mat mul -fn reference_vec_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) - -> Array1 - where A: LinalgScalar, - S: Data, - S2: Data, +fn reference_vec_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) -> Array1 +where + A: LinalgScalar, + S: Data, + S2: Data, { let (m, (_, n)) = (lhs.dim(), rhs.dim()); reference_mat_mul(&lhs.to_owned().into_shape((1, m)).unwrap(), rhs) - .into_shape(n).unwrap() + .into_shape(n) + .unwrap() } #[test] @@ -370,22 +383,22 @@ fn scaled_add() { let d = alpha * &b + &a; assert_eq!(c, d); - } #[test] fn scaled_add_2() { let beta = -2.3; - let sizes = vec![(4, 4, 1, 4), - (8, 8, 1, 8), - (17, 15, 17, 15), - (4, 17, 4, 17), - (17, 3, 1, 3), - (19, 18, 19, 18), - (16, 17, 16, 17), - (15, 16, 15, 16), - (67, 63, 1, 63), - ]; + let sizes = vec![ + (4, 4, 1, 4), + (8, 8, 1, 8), + (17, 15, 17, 15), + (4, 17, 4, 17), + (17, 3, 1, 3), + (19, 18, 19, 18), + (16, 17, 16, 17), + (15, 16, 15, 16), + (67, 63, 1, 63), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { @@ -411,27 +424,24 @@ fn scaled_add_2() { #[test] fn scaled_add_3() { let beta = -2.3; - let sizes = vec![(4, 4, 1, 4), - (8, 8, 1, 8), - (17, 15, 17, 15), - (4, 17, 4, 17), - (17, 3, 1, 3), - (19, 18, 19, 18), - (16, 17, 16, 17), - (15, 16, 15, 16), - (67, 63, 1, 63), - ]; + let sizes = vec![ + (4, 4, 1, 4), + (8, 8, 1, 8), + (17, 15, 17, 15), + (4, 17, 4, 17), + (17, 3, 1, 3), + (19, 18, 19, 18), + (16, 17, 16, 17), + (15, 16, 15, 16), + (67, 63, 1, 63), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { for &(m, k, n, q) in &sizes { let mut a = range_mat64(m, k); let mut answer = a.clone(); - let cdim = if n == 1 { - vec![q] - } else { - vec![n, q] - }; + let cdim = if n == 1 { vec![q] } else { vec![n, q] }; let cslice = if n == 1 { vec![SliceOrIndex::from(..).step_by(s2)] } else { @@ -457,20 +467,21 @@ fn scaled_add_3() { } } - #[test] fn gen_mat_mul() { let alpha = -2.3; let beta = 3.14; - let sizes = vec![(4, 4, 4), (8, 8, 8), - (17, 15, 16), - (4, 17, 3), - (17, 3, 22), - (19, 18, 2), - (16, 17, 15), - (15, 16, 17), - (67, 63, 62), - ]; + let sizes = vec![ + (4, 4, 4), + (8, 8, 8), + (17, 15, 16), + (4, 17, 3), + (17, 3, 22), + (19, 18, 2), + (16, 17, 15), + (15, 16, 17), + (67, 63, 62), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { @@ -496,7 +507,6 @@ fn gen_mat_mul() { } } - // Test y = A x where A is f-order #[test] fn gemm_64_1_f() { @@ -514,15 +524,17 @@ fn gemm_64_1_f() { fn gen_mat_mul_i32() { let alpha = -1; let beta = 2; - let sizes = vec![(4, 4, 4), (8, 8, 8), - (17, 15, 16), - (4, 17, 3), - (17, 3, 22), - (19, 18, 2), - (16, 17, 15), - (15, 16, 17), - (67, 63, 62), - ]; + let sizes = vec![ + (4, 4, 4), + (8, 8, 8), + (17, 15, 16), + (4, 17, 3), + (17, 3, 22), + (19, 18, 2), + (16, 17, 15), + (15, 16, 17), + (67, 63, 62), + ]; for &(m, k, n) in &sizes { let a = range_i32(m, k); let b = range_i32(k, n); @@ -538,16 +550,17 @@ fn gen_mat_mul_i32() { fn gen_mat_vec_mul() { let alpha = -2.3; let beta = 3.14; - let sizes = vec![(4, 4), - (8, 8), - (17, 15), - (4, 17), - (17, 3), - (19, 18), - (16, 17), - (15, 16), - (67, 63), - ]; + let sizes = vec![ + (4, 4), + (8, 8), + (17, 15), + (4, 17), + (17, 3), + (19, 18), + (16, 17), + (15, 16), + (67, 63), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { @@ -581,16 +594,17 @@ fn gen_mat_vec_mul() { #[test] fn vec_mat_mul() { - let sizes = vec![(4, 4), - (8, 8), - (17, 15), - (4, 17), - (17, 3), - (19, 18), - (16, 17), - (15, 16), - (67, 63), - ]; + let sizes = vec![ + (4, 4), + (8, 8), + (17, 15), + (4, 17), + (17, 3), + (19, 18), + (16, 17), + (15, 16), + (67, 63), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { diff --git a/build.rs b/build.rs index 3b163b301..ceeeff389 100644 --- a/build.rs +++ b/build.rs @@ -1,4 +1,3 @@ - /// /// This build script emits the openblas linking directive if requested /// diff --git a/examples/axis_ops.rs b/examples/axis_ops.rs index e1dee54d2..671b3acb8 100644 --- a/examples/axis_ops.rs +++ b/examples/axis_ops.rs @@ -3,13 +3,16 @@ extern crate ndarray; use ndarray::prelude::*; fn regularize(a: &mut Array) -> Result<(), ()> - where D: Dimension, - A: ::std::fmt::Debug, +where + D: Dimension, + A: ::std::fmt::Debug, { println!("Regularize:\n{:?}", a); // reverse all neg axes while let Some(ax) = a.axes().find(|ax| ax.stride() <= 0) { - if ax.stride() == 0 { return Err(()); } + if ax.stride() == 0 { + return Err(()); + } // reverse ax println!("Reverse {:?}", ax.axis()); a.invert_axis(ax.axis()); diff --git a/examples/bounds_check_elim.rs b/examples/bounds_check_elim.rs index dc9e1e22a..fd4eb9fab 100644 --- a/examples/bounds_check_elim.rs +++ b/examples/bounds_check_elim.rs @@ -1,4 +1,4 @@ -#![crate_type="lib"] +#![crate_type = "lib"] // Test cases for bounds check elimination @@ -35,12 +35,20 @@ pub fn testvec_as_slice(a: &Vec) -> f64 { #[no_mangle] pub fn test1d_single(a: &Array1, i: usize) -> f64 { - if i < a.len() { a[i] } else { 0. } + if i < a.len() { + a[i] + } else { + 0. + } } #[no_mangle] pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 { - if i < a.len() { *&mut a[i] } else { 0. } + if i < a.len() { + *&mut a[i] + } else { + 0. + } } #[no_mangle] @@ -99,5 +107,4 @@ pub fn test2d_whiles(a: &Array2) -> f64 { sum } -fn main() { -} +fn main() {} diff --git a/examples/column_standardize.rs b/examples/column_standardize.rs index 032c520e3..11ec0d4e1 100644 --- a/examples/column_standardize.rs +++ b/examples/column_standardize.rs @@ -4,7 +4,9 @@ use ndarray::prelude::*; fn std1d(a: ArrayView1) -> f64 { let n = a.len() as f64; - if n == 0. { return 0.; } + if n == 0. { + return 0.; + } let mean = a.sum() / n; (a.fold(0., |acc, &x| acc + (x - mean).powi(2)) / n).sqrt() } @@ -18,9 +20,7 @@ fn main() { // counts -= np.mean(counts, axis=0) // counts /= np.std(counts, axis=0) - let mut data = array![[-1., -2., -3.], - [ 1., -3., 5.], - [ 2., 2., 2.]]; + let mut data = array![[-1., -2., -3.], [1., -3., 5.], [2., 2., 2.]]; println!("{:8.4}", data); println!("{:8.4} (Mean axis=0)", data.mean_axis(Axis(0))); diff --git a/examples/convo.rs b/examples/convo.rs index 1e6ee0ba6..97ae82919 100644 --- a/examples/convo.rs +++ b/examples/convo.rs @@ -7,14 +7,15 @@ use num_traits::Float; use ndarray::prelude::*; const SOBEL_X: [[f32; 3]; 3] = [[-1., 0., 1.], [-2., 0., 2.], [-1., 0., 1.]]; -const SOBEL_Y: [[f32; 3]; 3] = [[ 1., 2., 1.], [ 0., 0., 0.], [-1., -2., -1.]]; -const SHARPEN: [[f32; 3]; 3] = [[0., -1., 0.], [ -1., 5., -1.], [0., -1., 0.]]; +const SOBEL_Y: [[f32; 3]; 3] = [[1., 2., 1.], [0., 0., 0.], [-1., -2., -1.]]; +const SHARPEN: [[f32; 3]; 3] = [[0., -1., 0.], [-1., 5., -1.], [0., -1., 0.]]; type Kernel3x3 = [[A; 3]; 3]; #[inline(never)] fn conv_3x3(a: &ArrayView2, out: &mut ArrayViewMut2, kernel: &Kernel3x3) - where F: Float, +where + F: Float, { let (n, m) = a.dim(); let (np, mp) = out.dim(); diff --git a/examples/life.rs b/examples/life.rs index 2bb1ccca0..c410ef98c 100644 --- a/examples/life.rs +++ b/examples/life.rs @@ -36,31 +36,29 @@ fn iterate(z: &mut Board, scratch: &mut Board) { neigh.fill(0); neigh += &z.slice(s![0..-2, 0..-2]); neigh += &z.slice(s![0..-2, 1..-1]); - neigh += &z.slice(s![0..-2, 2.. ]); + neigh += &z.slice(s![0..-2, 2..]); neigh += &z.slice(s![1..-1, 0..-2]); - neigh += &z.slice(s![1..-1, 2.. ]); + neigh += &z.slice(s![1..-1, 2..]); - neigh += &z.slice(s![2.. , 0..-2]); - neigh += &z.slice(s![2.. , 1..-1]); - neigh += &z.slice(s![2.. , 2.. ]); + neigh += &z.slice(s![2.., 0..-2]); + neigh += &z.slice(s![2.., 1..-1]); + neigh += &z.slice(s![2.., 2..]); // birth where n = 3 and z[i] = 0, // survive where n = 2 || n = 3 and z[i] = 1 let mut zv = z.slice_mut(s![1..-1, 1..-1]); // this is autovectorized amazingly well! - zv.zip_mut_with(&neigh, |y, &n| { - *y = ((n == 3) || (n == 2 && *y > 0)) as u8 - }); + zv.zip_mut_with(&neigh, |y, &n| *y = ((n == 3) || (n == 2 && *y > 0)) as u8); } fn turn_on_corners(z: &mut Board) { let n = z.rows(); let m = z.cols(); - z[[1 , 1 ]] = 1; - z[[1 , m - 2]] = 1; - z[[n - 2, 1 ]] = 1; + z[[1, 1]] = 1; + z[[1, m - 2]] = 1; + z[[n - 2, 1]] = 1; z[[n - 2, m - 2]] = 1; } diff --git a/examples/rollaxis.rs b/examples/rollaxis.rs index f8c957b66..5c92a2dda 100644 --- a/examples/rollaxis.rs +++ b/examples/rollaxis.rs @@ -4,8 +4,9 @@ use ndarray::prelude::*; use ndarray::Data; pub fn roll_axis(mut a: ArrayBase, to: Axis, from: Axis) -> ArrayBase - where S: Data, - D: Dimension, +where + S: Data, + D: Dimension, { let i = to.index(); let mut j = from.index(); @@ -24,10 +25,12 @@ pub fn roll_axis(mut a: ArrayBase, to: Axis, from: Axis) -> Array } fn main() { - let mut data = array![[[-1., 0., -2.], [1., 7., -3.]], - [[ 1., 0., -3.], [1., 7., 5.]], - [[ 1., 0., -3.], [1., 7., 5.]], - [[ 2., 0., 2.], [1., 7., 2.]]]; + let mut data = array![ + [[-1., 0., -2.], [1., 7., -3.]], + [[1., 0., -3.], [1., 7., 5.]], + [[1., 0., -3.], [1., 7., 5.]], + [[2., 0., 2.], [1., 7., 2.]] + ]; println!("{:8.4?}", data); diff --git a/examples/sort-axis.rs b/examples/sort-axis.rs index 1a23146e9..97429ee70 100644 --- a/examples/sort-axis.rs +++ b/examples/sort-axis.rs @@ -1,12 +1,7 @@ - extern crate ndarray; use ndarray::prelude::*; -use ndarray::{ - Data, - RemoveAxis, - Zip, -}; +use ndarray::{Data, RemoveAxis, Zip}; use std::cmp::Ordering; use std::ptr::copy_nonoverlapping; @@ -34,10 +29,12 @@ impl Permutation { for &i in &self.indices { match seen.get_mut(i) { None => return false, - Some(s) => if *s { - return false; - } else { - *s = true; + Some(s) => { + if *s { + return false; + } else { + *s = true; + } } } } @@ -49,20 +46,23 @@ pub trait SortArray { /// ***Panics*** if `axis` is out of bounds. fn identity(&self, axis: Axis) -> Permutation; fn sort_axis_by(&self, axis: Axis, less_than: F) -> Permutation - where F: FnMut(usize, usize) -> bool; + where + F: FnMut(usize, usize) -> bool; } pub trait PermuteArray { type Elem; type Dim; - fn permute_axis(self, axis: Axis, perm: &Permutation) - -> Array - where Self::Elem: Clone, Self::Dim: RemoveAxis; + fn permute_axis(self, axis: Axis, perm: &Permutation) -> Array + where + Self::Elem: Clone, + Self::Dim: RemoveAxis; } impl SortArray for ArrayBase - where S: Data, - D: Dimension, +where + S: Data, + D: Dimension, { fn identity(&self, axis: Axis) -> Permutation { Permutation { @@ -71,29 +71,33 @@ impl SortArray for ArrayBase } fn sort_axis_by(&self, axis: Axis, mut less_than: F) -> Permutation - where F: FnMut(usize, usize) -> bool + where + F: FnMut(usize, usize) -> bool, { let mut perm = self.identity(axis); - perm.indices.sort_by(move |&a, &b| + perm.indices.sort_by(move |&a, &b| { if less_than(a, b) { Ordering::Less } else if less_than(b, a) { Ordering::Greater } else { Ordering::Equal - }); + } + }); perm } } impl PermuteArray for Array - where D: Dimension, +where + D: Dimension, { type Elem = A; type Dim = D; fn permute_axis(self, axis: Axis, perm: &Permutation) -> Array - where D: RemoveAxis, + where + D: RemoveAxis, { let axis = axis; let axis_len = self.len_of(axis); @@ -111,9 +115,7 @@ impl PermuteArray for Array let perm_i = perm.indices[i]; Zip::from(result.index_axis_mut(axis, perm_i)) .and(self.index_axis(axis, i)) - .apply(|to, from| { - copy_nonoverlapping(from, to, 1) - }); + .apply(|to, from| copy_nonoverlapping(from, to, 1)); } // forget moved array elements but not its vec let mut old_storage = self.into_raw_vec(); @@ -125,14 +127,11 @@ impl PermuteArray for Array } } - fn main() { let a = Array::linspace(0., 63., 64).into_shape((8, 8)).unwrap(); let strings = a.map(|x| x.to_string()); - let perm = a.sort_axis_by(Axis(1), |i, j| { - a[[i, 0]] > a[[j, 0]] - }); + let perm = a.sort_axis_by(Axis(1), |i, j| a[[i, 0]] > a[[j, 0]]); println!("{:?}", perm); let b = a.permute_axis(Axis(0), &perm); println!("{:?}", b); diff --git a/examples/zip_many.rs b/examples/zip_many.rs index 5e839d37c..45992c38a 100644 --- a/examples/zip_many.rs +++ b/examples/zip_many.rs @@ -16,7 +16,6 @@ fn main() { { let a = a.view_mut().reversed_axes(); azip!(mut a (a), b (b.t()) in { *a = b }); - } assert_eq!(a, b); @@ -34,7 +33,6 @@ fn main() { let mut sums = Array::zeros(nchunks); azip!(mut sums, ref a (a.exact_chunks(chunk_sz)) in { *sums = a.sum() }); - // Let's imagine we split to parallelize { let (x, y) = Zip::indexed(&mut a).split(); diff --git a/ndarray-rand/benches/bench.rs b/ndarray-rand/benches/bench.rs index 142bce394..8d47b7b3f 100644 --- a/ndarray-rand/benches/bench.rs +++ b/ndarray-rand/benches/bench.rs @@ -1,15 +1,15 @@ #![feature(test)] -extern crate rand; extern crate ndarray; extern crate ndarray_rand; +extern crate rand; extern crate test; -use rand::distributions::Uniform; -use rand::distributions::Normal; use ndarray::Array; use ndarray_rand::RandomExt; use ndarray_rand::F32; +use rand::distributions::Normal; +use rand::distributions::Uniform; use test::Bencher; diff --git a/ndarray-rand/src/lib.rs b/ndarray-rand/src/lib.rs index 8349eb14f..3ac9486f9 100644 --- a/ndarray-rand/src/lib.rs +++ b/ndarray-rand/src/lib.rs @@ -9,19 +9,15 @@ //! Constructors for randomized arrays. `rand` integration for `ndarray`. //! //! See [**`RandomExt`**](trait.RandomExt.html) for usage examples. -extern crate rand; extern crate ndarray; +extern crate rand; -use rand::{thread_rng, Rng, SeedableRng}; use rand::distributions::Distribution; use rand::rngs::SmallRng; +use rand::{thread_rng, Rng, SeedableRng}; -use ndarray::{ - ArrayBase, - Dimension, - DataOwned, -}; use ndarray::ShapeBuilder; +use ndarray::{ArrayBase, DataOwned, Dimension}; /// Constructors for n-dimensional arrays with random elements. /// @@ -37,8 +33,9 @@ use ndarray::ShapeBuilder; /// documentation for information. You can select a different RNG with /// [`.random_using()`](#tymethod.random_using). pub trait RandomExt - where S: DataOwned, - D: Dimension, +where + S: DataOwned, + D: Dimension, { /// Create an array with shape `dim` with elements drawn from /// `distribution` using the default RNG. @@ -63,26 +60,30 @@ pub trait RandomExt /// // [ 0.0914, 5.5186, 5.8135, 5.2361, 3.1879]] /// # } fn random(shape: Sh, distribution: IdS) -> ArrayBase - where IdS: Distribution, - Sh: ShapeBuilder; + where + IdS: Distribution, + Sh: ShapeBuilder; /// Create an array with shape `dim` with elements drawn from /// `distribution`, using a specific Rng `rng`. /// /// ***Panics*** if the number of elements overflows usize. fn random_using(shape: Sh, distribution: IdS, rng: &mut R) -> ArrayBase - where IdS: Distribution, - R: Rng + ?Sized, - Sh: ShapeBuilder; + where + IdS: Distribution, + R: Rng + ?Sized, + Sh: ShapeBuilder; } impl RandomExt for ArrayBase - where S: DataOwned, - D: Dimension, +where + S: DataOwned, + D: Dimension, { fn random(shape: Sh, dist: IdS) -> ArrayBase - where IdS: Distribution, - Sh: ShapeBuilder, + where + IdS: Distribution, + Sh: ShapeBuilder, { let mut rng = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng failed"); @@ -90,9 +91,10 @@ impl RandomExt for ArrayBase } fn random_using(shape: Sh, dist: IdS, rng: &mut R) -> ArrayBase - where IdS: Distribution, - R: Rng + ?Sized, - Sh: ShapeBuilder, + where + IdS: Distribution, + R: Rng + ?Sized, + Sh: ShapeBuilder, { Self::from_shape_fn(shape, |_| dist.sample(rng)) } @@ -120,7 +122,8 @@ impl RandomExt for ArrayBase pub struct F32(pub S); impl Distribution for F32 - where S: Distribution +where + S: Distribution, { fn sample(&self, rng: &mut R) -> f32 { self.0.sample(rng) as f32 diff --git a/ndarray-rand/tests/tests.rs b/ndarray-rand/tests/tests.rs index aae89c543..621bd9d97 100644 --- a/ndarray-rand/tests/tests.rs +++ b/ndarray-rand/tests/tests.rs @@ -1,11 +1,10 @@ - -extern crate rand; extern crate ndarray; extern crate ndarray_rand; +extern crate rand; -use rand::distributions::Uniform; use ndarray::Array; use ndarray_rand::RandomExt; +use rand::distributions::Uniform; #[test] fn test_dim() { diff --git a/parallel/benches/rayon.rs b/parallel/benches/rayon.rs index 76c1789c9..629cb21c4 100644 --- a/parallel/benches/rayon.rs +++ b/parallel/benches/rayon.rs @@ -1,13 +1,12 @@ - #![feature(test)] extern crate num_cpus; extern crate test; use test::Bencher; -extern crate rayon; extern crate ndarray; extern crate ndarray_parallel; +extern crate rayon; use ndarray::prelude::*; use ndarray_parallel::prelude::*; @@ -21,12 +20,13 @@ use std::cmp::max; fn set_threads() { let n = max(1, num_cpus::get() / 2); //println!("Using {} threads", n); - let _ = rayon::ThreadPoolBuilder::new().num_threads(n).build_global(); + let _ = rayon::ThreadPoolBuilder::new() + .num_threads(n) + .build_global(); } #[bench] -fn map_exp_regular(bench: &mut Bencher) -{ +fn map_exp_regular(bench: &mut Bencher) { let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); bench.iter(|| { @@ -35,8 +35,7 @@ fn map_exp_regular(bench: &mut Bencher) } #[bench] -fn rayon_exp_regular(bench: &mut Bencher) -{ +fn rayon_exp_regular(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); @@ -49,22 +48,18 @@ const FASTEXP: usize = EXP_N; #[inline] fn fastexp(x: f64) -> f64 { - let x = 1. + x/1024.; + let x = 1. + x / 1024.; x.powi(1024) } #[bench] -fn map_fastexp_regular(bench: &mut Bencher) -{ +fn map_fastexp_regular(bench: &mut Bencher) { let mut a = Array2::::zeros((FASTEXP, FASTEXP)); - bench.iter(|| { - a.mapv_inplace(|x| fastexp(x)) - }); + bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_regular(bench: &mut Bencher) -{ +fn rayon_fastexp_regular(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -73,18 +68,14 @@ fn rayon_fastexp_regular(bench: &mut Bencher) } #[bench] -fn map_fastexp_cut(bench: &mut Bencher) -{ +fn map_fastexp_cut(bench: &mut Bencher) { let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); - bench.iter(|| { - a.mapv_inplace(|x| fastexp(x)) - }); + bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_cut(bench: &mut Bencher) -{ +fn rayon_fastexp_cut(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); @@ -94,8 +85,7 @@ fn rayon_fastexp_cut(bench: &mut Bencher) } #[bench] -fn map_fastexp_by_axis(bench: &mut Bencher) -{ +fn map_fastexp_by_axis(bench: &mut Bencher) { let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { for mut sheet in a.axis_iter_mut(Axis(0)) { @@ -105,29 +95,29 @@ fn map_fastexp_by_axis(bench: &mut Bencher) } #[bench] -fn rayon_fastexp_by_axis(bench: &mut Bencher) -{ +fn rayon_fastexp_by_axis(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { - a.axis_iter_mut(Axis(0)).into_par_iter() + a.axis_iter_mut(Axis(0)) + .into_par_iter() .for_each(|mut sheet| sheet.mapv_inplace(fastexp)); }); } #[bench] -fn rayon_fastexp_zip(bench: &mut Bencher) -{ +fn rayon_fastexp_zip(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { - Zip::from(&mut a).into_par_iter().for_each(|(elt, )| *elt = fastexp(*elt)); + Zip::from(&mut a) + .into_par_iter() + .for_each(|(elt,)| *elt = fastexp(*elt)); }); } #[bench] -fn add(bench: &mut Bencher) -{ +fn add(bench: &mut Bencher) { let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); let c = Array2::::zeros((ADDN, ADDN)); @@ -140,8 +130,7 @@ fn add(bench: &mut Bencher) } #[bench] -fn rayon_add(bench: &mut Bencher) -{ +fn rayon_add(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); diff --git a/parallel/src/ext_traits.rs b/parallel/src/ext_traits.rs index 11c79964d..80cb55bee 100644 --- a/parallel/src/ext_traits.rs +++ b/parallel/src/ext_traits.rs @@ -1,11 +1,4 @@ - -use ndarray::{ - Dimension, - NdProducer, - Zip, - ArrayBase, - DataMut, -}; +use ndarray::{ArrayBase, DataMut, Dimension, NdProducer, Zip}; use prelude::*; @@ -15,35 +8,38 @@ use prelude::*; pub trait ParMap { type Item; fn par_map_inplace(&mut self, f: F) - where F: Fn(&mut Self::Item) + Sync + Send; + where + F: Fn(&mut Self::Item) + Sync + Send; fn par_mapv_inplace(&mut self, f: F) - where F: Fn(Self::Item) -> Self::Item + Sync + Send, - Self::Item: Clone; + where + F: Fn(Self::Item) -> Self::Item + Sync + Send, + Self::Item: Clone; } impl ParMap for ArrayBase - where S: DataMut, - D: Dimension, - A: Send + Sync, +where + S: DataMut, + D: Dimension, + A: Send + Sync, { type Item = A; fn par_map_inplace(&mut self, f: F) - where F: Fn(&mut Self::Item) + Sync + Send + where + F: Fn(&mut Self::Item) + Sync + Send, { self.view_mut().into_par_iter().for_each(f) } fn par_mapv_inplace(&mut self, f: F) - where F: Fn(Self::Item) -> Self::Item + Sync + Send, - Self::Item: Clone + where + F: Fn(Self::Item) -> Self::Item + Sync + Send, + Self::Item: Clone, { - self.view_mut().into_par_iter() + self.view_mut() + .into_par_iter() .for_each(move |x| *x = f(x.clone())) } } - - - // Zip macro_rules! zip_impl { @@ -73,7 +69,7 @@ macro_rules! zip_impl { } } -zip_impl!{ +zip_impl! { [ParApply1 P1], [ParApply2 P1 P2], [ParApply3 P1 P2 P3], diff --git a/parallel/src/into_impls.rs b/parallel/src/into_impls.rs index 5c46299e4..9ac578d2e 100644 --- a/parallel/src/into_impls.rs +++ b/parallel/src/into_impls.rs @@ -1,11 +1,12 @@ -use ndarray::{Array, RcArray, Dimension, ArrayView, ArrayViewMut}; +use ndarray::{Array, ArrayView, ArrayViewMut, Dimension, RcArray}; use NdarrayIntoParallelIterator; use Parallel; impl<'a, A, D> NdarrayIntoParallelIterator for &'a Array - where D: Dimension, - A: Sync +where + D: Dimension, + A: Sync, { type Item = &'a A; type Iter = Parallel>; @@ -16,8 +17,9 @@ impl<'a, A, D> NdarrayIntoParallelIterator for &'a Array // This is allowed: goes through `.view()` impl<'a, A, D> NdarrayIntoParallelIterator for &'a RcArray - where D: Dimension, - A: Sync +where + D: Dimension, + A: Sync, { type Item = &'a A; type Iter = Parallel>; @@ -27,8 +29,9 @@ impl<'a, A, D> NdarrayIntoParallelIterator for &'a RcArray } impl<'a, A, D> NdarrayIntoParallelIterator for &'a mut Array - where D: Dimension, - A: Sync + Send +where + D: Dimension, + A: Sync + Send, { type Item = &'a mut A; type Iter = Parallel>; @@ -39,8 +42,9 @@ impl<'a, A, D> NdarrayIntoParallelIterator for &'a mut Array // This is allowed: goes through `.view_mut()`, which is unique access impl<'a, A, D> NdarrayIntoParallelIterator for &'a mut RcArray - where D: Dimension, - A: Sync + Send + Clone, +where + D: Dimension, + A: Sync + Send + Clone, { type Item = &'a mut A; type Iter = Parallel>; diff --git a/parallel/src/into_traits.rs b/parallel/src/into_traits.rs index 9abc630ba..0018756cc 100644 --- a/parallel/src/into_traits.rs +++ b/parallel/src/into_traits.rs @@ -1,26 +1,26 @@ - use rayon::iter::ParallelIterator; pub trait NdarrayIntoParallelIterator { - type Iter: ParallelIterator; + type Iter: ParallelIterator; type Item: Send; fn into_par_iter(self) -> Self::Iter; } pub trait NdarrayIntoParallelRefIterator<'x> { - type Iter: ParallelIterator; + type Iter: ParallelIterator; type Item: Send + 'x; fn par_iter(&'x self) -> Self::Iter; } pub trait NdarrayIntoParallelRefMutIterator<'x> { - type Iter: ParallelIterator; + type Iter: ParallelIterator; type Item: Send + 'x; fn par_iter_mut(&'x mut self) -> Self::Iter; } impl<'data, I: 'data + ?Sized> NdarrayIntoParallelRefIterator<'data> for I - where &'data I: NdarrayIntoParallelIterator +where + &'data I: NdarrayIntoParallelIterator, { type Iter = <&'data I as NdarrayIntoParallelIterator>::Iter; type Item = <&'data I as NdarrayIntoParallelIterator>::Item; @@ -31,7 +31,8 @@ impl<'data, I: 'data + ?Sized> NdarrayIntoParallelRefIterator<'data> for I } impl<'data, I: 'data + ?Sized> NdarrayIntoParallelRefMutIterator<'data> for I - where &'data mut I: NdarrayIntoParallelIterator +where + &'data mut I: NdarrayIntoParallelIterator, { type Iter = <&'data mut I as NdarrayIntoParallelIterator>::Iter; type Item = <&'data mut I as NdarrayIntoParallelIterator>::Item; diff --git a/parallel/src/lib.rs b/parallel/src/lib.rs index 74755ba21..86b5da4c7 100644 --- a/parallel/src/lib.rs +++ b/parallel/src/lib.rs @@ -94,7 +94,6 @@ //! ``` #![doc(html_root_url = "http://docs.rs/ndarray-parallel/0.9/")] - pub extern crate ndarray; pub extern crate rayon; @@ -108,28 +107,19 @@ pub mod prelude { pub use NdarrayIntoParallelRefMutIterator; #[doc(no_inline)] - pub use rayon::prelude::{ParallelIterator, IndexedParallelIterator}; + pub use rayon::prelude::{IndexedParallelIterator, ParallelIterator}; - pub use ext_traits::{ - ParApply1, - ParApply2, - ParApply3, - ParApply4, - ParApply5, - ParApply6, - }; pub use ext_traits::ParMap; + pub use ext_traits::{ParApply1, ParApply2, ParApply3, ParApply4, ParApply5, ParApply6}; } -pub use par::Parallel; pub use into_traits::{ - NdarrayIntoParallelIterator, - NdarrayIntoParallelRefIterator, - NdarrayIntoParallelRefMutIterator, + NdarrayIntoParallelIterator, NdarrayIntoParallelRefIterator, NdarrayIntoParallelRefMutIterator, }; +pub use par::Parallel; -mod par; mod ext_traits; -mod into_traits; mod into_impls; +mod into_traits; +mod par; mod zipmacro; diff --git a/parallel/src/par.rs b/parallel/src/par.rs index e7d4b2970..04bfdd07e 100644 --- a/parallel/src/par.rs +++ b/parallel/src/par.rs @@ -1,17 +1,16 @@ - -use rayon::iter::ParallelIterator; -use rayon::iter::IndexedParallelIterator; -use rayon::iter::plumbing::{Consumer, UnindexedConsumer}; use rayon::iter::plumbing::bridge; -use rayon::iter::plumbing::ProducerCallback; -use rayon::iter::plumbing::Producer; -use rayon::iter::plumbing::UnindexedProducer; use rayon::iter::plumbing::bridge_unindexed; use rayon::iter::plumbing::Folder; +use rayon::iter::plumbing::Producer; +use rayon::iter::plumbing::ProducerCallback; +use rayon::iter::plumbing::UnindexedProducer; +use rayon::iter::plumbing::{Consumer, UnindexedConsumer}; +use rayon::iter::IndexedParallelIterator; +use rayon::iter::ParallelIterator; use ndarray::iter::AxisIter; use ndarray::iter::AxisIterMut; -use ndarray::{Dimension}; +use ndarray::Dimension; use ndarray::{ArrayView, ArrayViewMut}; use super::NdarrayIntoParallelIterator; @@ -111,12 +110,9 @@ macro_rules! par_iter_wrapper { } } - par_iter_wrapper!(AxisIter, [Sync]); par_iter_wrapper!(AxisIterMut, [Send + Sync]); - - macro_rules! par_iter_view_wrapper { // thread_bounds are either Sync or Send + Sync ($view_name:ident, [$($thread_bounds:tt)*]) => { @@ -190,8 +186,7 @@ macro_rules! par_iter_view_wrapper { par_iter_view_wrapper!(ArrayView, [Sync]); par_iter_view_wrapper!(ArrayViewMut, [Sync + Send]); - -use ndarray::{Zip, NdProducer, FoldWhile}; +use ndarray::{FoldWhile, NdProducer, Zip}; macro_rules! zip_impl { ($([$($p:ident)*],)+) => { @@ -260,7 +255,7 @@ macro_rules! zip_impl { } } -zip_impl!{ +zip_impl! { [P1], [P1 P2], [P1 P2 P3], diff --git a/parallel/tests/azip.rs b/parallel/tests/azip.rs index 32d8f4ba8..487552596 100644 --- a/parallel/tests/azip.rs +++ b/parallel/tests/azip.rs @@ -1,11 +1,10 @@ - +extern crate itertools; extern crate ndarray; extern crate ndarray_parallel; -extern crate itertools; +use itertools::enumerate; use ndarray::prelude::*; use ndarray_parallel::par_azip; -use itertools::{enumerate}; use std::sync::atomic::{AtomicUsize, Ordering}; #[test] @@ -19,7 +18,7 @@ fn test_par_azip1() { #[test] fn test_par_azip2() { let mut a = Array::zeros((5, 7)); - let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2*j) as f32); + let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2 * j) as f32); par_azip!(mut a, b in { *a = b; }); assert_eq!(a, b); } @@ -47,7 +46,7 @@ fn test_zip_dim_mismatch_1() { let mut a = Array::zeros((5, 7)); let mut d = a.raw_dim(); d[0] += 1; - let b = Array::from_shape_fn(d, |(i, j)| 1. / (i + 2*j) as f32); + let b = Array::from_shape_fn(d, |(i, j)| 1. / (i + 2 * j) as f32); par_azip!(mut a, b in { *a = b; }); } diff --git a/parallel/tests/rayon.rs b/parallel/tests/rayon.rs index ccae09f0e..9432f9edf 100644 --- a/parallel/tests/rayon.rs +++ b/parallel/tests/rayon.rs @@ -1,7 +1,6 @@ - -extern crate rayon; extern crate ndarray; extern crate ndarray_parallel; +extern crate rayon; use ndarray::prelude::*; use ndarray_parallel::prelude::*; @@ -23,9 +22,13 @@ fn test_axis_iter() { #[test] fn test_axis_iter_mut() { - let mut a = Array::linspace(0., 1.0f64, M * N).into_shape((M, N)).unwrap(); + let mut a = Array::linspace(0., 1.0f64, M * N) + .into_shape((M, N)) + .unwrap(); let b = a.mapv(|x| x.exp()); - a.axis_iter_mut(Axis(0)).into_par_iter().for_each(|mut v| v.mapv_inplace(|x| x.exp())); + a.axis_iter_mut(Axis(0)) + .into_par_iter() + .for_each(|mut v| v.mapv_inplace(|x| x.exp())); println!("{:?}", a.slice(s![..10, ..5])); assert!(a.all_close(&b, 0.001)); } diff --git a/parallel/tests/zip.rs b/parallel/tests/zip.rs index cca0cacd5..63cdd5e92 100644 --- a/parallel/tests/zip.rs +++ b/parallel/tests/zip.rs @@ -1,4 +1,3 @@ - extern crate ndarray; extern crate ndarray_parallel; @@ -14,20 +13,16 @@ const N: usize = 100; fn test_zip_1() { let mut a = Array2::::zeros((M, N)); - Zip::from(&mut a) - .par_apply(|x| { - *x = x.exp() - }); + Zip::from(&mut a).par_apply(|x| *x = x.exp()); } #[test] fn test_zip_index_1() { let mut a = Array2::default((10, 10)); - Zip::indexed(&mut a) - .par_apply(|i, x| { - *x = i; - }); + Zip::indexed(&mut a).par_apply(|i, x| { + *x = i; + }); for (i, elt) in a.indexed_iter() { assert_eq!(*elt, i); @@ -38,10 +33,9 @@ fn test_zip_index_1() { fn test_zip_index_2() { let mut a = Array2::default((M, N)); - Zip::indexed(&mut a) - .par_apply(|i, x| { - *x = i; - }); + Zip::indexed(&mut a).par_apply(|i, x| { + *x = i; + }); for (i, elt) in a.indexed_iter() { assert_eq!(*elt, i); @@ -52,10 +46,9 @@ fn test_zip_index_2() { fn test_zip_index_3() { let mut a = Array::default((1, 2, 1, 2, 3)); - Zip::indexed(&mut a) - .par_apply(|i, x| { - *x = i; - }); + Zip::indexed(&mut a).par_apply(|i, x| { + *x = i; + }); for (i, elt) in a.indexed_iter() { assert_eq!(*elt, i); @@ -67,12 +60,10 @@ fn test_zip_index_4() { let mut a = Array2::zeros((M, N)); let mut b = Array2::zeros((M, N)); - Zip::indexed(&mut a) - .and(&mut b) - .par_apply(|(i, j), x, y| { - *x = i; - *y = j; - }); + Zip::indexed(&mut a).and(&mut b).par_apply(|(i, j), x, y| { + *x = i; + *y = j; + }); for ((i, _), elt) in a.indexed_iter() { assert_eq!(*elt, i); diff --git a/serialization-tests/src/lib.rs b/serialization-tests/src/lib.rs index e69de29bb..8b1378917 100644 --- a/serialization-tests/src/lib.rs +++ b/serialization-tests/src/lib.rs @@ -0,0 +1 @@ + diff --git a/serialization-tests/tests/serialize.rs b/serialization-tests/tests/serialize.rs index d0a2f3b79..398d958a8 100644 --- a/serialization-tests/tests/serialize.rs +++ b/serialization-tests/tests/serialize.rs @@ -9,12 +9,10 @@ extern crate rmp_serde; #[cfg(feature = "ron")] extern crate ron; - -use ndarray::{arr0, arr1, arr2, s, RcArray, RcArray1, RcArray2, ArrayD, IxDyn}; +use ndarray::{arr0, arr1, arr2, s, ArrayD, IxDyn, RcArray, RcArray1, RcArray2}; #[test] -fn serial_many_dim_serde() -{ +fn serial_many_dim_serde() { { let a = arr0::(2.72); let serial = serde_json::to_string(&a).unwrap(); @@ -58,8 +56,7 @@ fn serial_many_dim_serde() } #[test] -fn serial_ixdyn_serde() -{ +fn serial_ixdyn_serde() { { let a = arr0::(2.72).into_dyn(); let serial = serde_json::to_string(&a).unwrap(); @@ -80,7 +77,8 @@ fn serial_ixdyn_serde() { let a = arr2(&[[3., 1., 2.2], [3.1, 4., 7.]]) - .into_shape(IxDyn(&[3, 1, 1, 1, 2, 1])).unwrap(); + .into_shape(IxDyn(&[3, 1, 1, 1, 2, 1])) + .unwrap(); let serial = serde_json::to_string(&a).unwrap(); println!("Serde encode {:?} => {:?}", a, serial); let res = serde_json::from_str::>(&serial); @@ -94,12 +92,10 @@ fn serial_ixdyn_serde() let b = serde_json::from_str::>(text); assert_eq!(a, b.unwrap()); } - } #[test] -fn serial_wrong_count_serde() -{ +fn serial_wrong_count_serde() { // one element too few let text = r##"{"v":1,"dim":[2,3],"data":[3,1,2.2,3.1,4]}"##; let arr = serde_json::from_str::>(text); @@ -114,13 +110,14 @@ fn serial_wrong_count_serde() } #[test] -fn serial_many_dim_serde_msgpack() -{ +fn serial_many_dim_serde_msgpack() { { let a = arr0::(2.72); let mut buf = Vec::new(); - serde::Serialize::serialize(&a, &mut rmp_serde::Serializer::new(&mut buf)).ok().unwrap(); + serde::Serialize::serialize(&a, &mut rmp_serde::Serializer::new(&mut buf)) + .ok() + .unwrap(); let mut deserializer = rmp_serde::Deserializer::new(&buf[..]); let a_de: RcArray = serde::Deserialize::deserialize(&mut deserializer).unwrap(); @@ -132,7 +129,9 @@ fn serial_many_dim_serde_msgpack() let a = arr1::(&[2.72, 1., 2.]); let mut buf = Vec::new(); - serde::Serialize::serialize(&a, &mut rmp_serde::Serializer::new(&mut buf)).ok().unwrap(); + serde::Serialize::serialize(&a, &mut rmp_serde::Serializer::new(&mut buf)) + .ok() + .unwrap(); let mut deserializer = rmp_serde::Deserializer::new(&buf[..]); let a_de: RcArray = serde::Deserialize::deserialize(&mut deserializer).unwrap(); @@ -144,7 +143,9 @@ fn serial_many_dim_serde_msgpack() let a = arr2(&[[3., 1., 2.2], [3.1, 4., 7.]]); let mut buf = Vec::new(); - serde::Serialize::serialize(&a, &mut rmp_serde::Serializer::new(&mut buf)).ok().unwrap(); + serde::Serialize::serialize(&a, &mut rmp_serde::Serializer::new(&mut buf)) + .ok() + .unwrap(); let mut deserializer = rmp_serde::Deserializer::new(&buf[..]); let a_de: RcArray = serde::Deserialize::deserialize(&mut deserializer).unwrap(); @@ -158,7 +159,9 @@ fn serial_many_dim_serde_msgpack() a.slice_collapse(s![..;-1, .., .., ..2]); let mut buf = Vec::new(); - serde::Serialize::serialize(&a, &mut rmp_serde::Serializer::new(&mut buf)).ok().unwrap(); + serde::Serialize::serialize(&a, &mut rmp_serde::Serializer::new(&mut buf)) + .ok() + .unwrap(); let mut deserializer = rmp_serde::Deserializer::new(&buf[..]); let a_de: RcArray = serde::Deserialize::deserialize(&mut deserializer).unwrap(); @@ -169,10 +172,9 @@ fn serial_many_dim_serde_msgpack() #[test] #[cfg(feature = "ron")] -fn serial_many_dim_ron() -{ - use ron::ser::to_string as ron_serialize; +fn serial_many_dim_ron() { use ron::de::from_str as ron_deserialize; + use ron::ser::to_string as ron_serialize; { let a = arr0::(2.72); diff --git a/src/aliases.rs b/src/aliases.rs index 6cd6f29e2..1c82120a4 100644 --- a/src/aliases.rs +++ b/src/aliases.rs @@ -1,37 +1,40 @@ //! Type aliases for common array sizes //! -#[allow(deprecated)] -use crate::{ - Ix, - Array, - ArrayView, - ArrayViewMut, - RcArray, - IxDynImpl, -}; use crate::dimension::Dim; +#[allow(deprecated)] +use crate::{Array, ArrayView, ArrayViewMut, Ix, IxDynImpl, RcArray}; /// Create a zero-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub fn Ix0() -> Ix0 { Dim::new([]) } +pub fn Ix0() -> Ix0 { + Dim::new([]) +} /// Create a one-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub fn Ix1(i0: Ix) -> Ix1 { Dim::new([i0]) } +pub fn Ix1(i0: Ix) -> Ix1 { + Dim::new([i0]) +} /// Create a two-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub fn Ix2(i0: Ix, i1: Ix) -> Ix2 { Dim::new([i0, i1]) } +pub fn Ix2(i0: Ix, i1: Ix) -> Ix2 { + Dim::new([i0, i1]) +} /// Create a three-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub fn Ix3(i0: Ix, i1: Ix, i2: Ix) -> Ix3 { Dim::new([i0, i1, i2]) } +pub fn Ix3(i0: Ix, i1: Ix, i2: Ix) -> Ix3 { + Dim::new([i0, i1, i2]) +} /// Create a four-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub fn Ix4(i0: Ix, i1: Ix, i2: Ix, i3: Ix) -> Ix4 { Dim::new([i0, i1, i2, i3]) } +pub fn Ix4(i0: Ix, i1: Ix, i2: Ix, i3: Ix) -> Ix4 { + Dim::new([i0, i1, i2, i3]) +} /// Create a five-dimensional index #[allow(non_snake_case)] #[inline(always)] @@ -149,9 +152,9 @@ pub type ArrayViewMutD<'a, A> = ArrayViewMut<'a, A, IxDyn>; /// one-dimensional shared ownership array #[allow(deprecated)] -#[deprecated(note="`RcArray` has been renamed to `ArcArray`")] +#[deprecated(note = "`RcArray` has been renamed to `ArcArray`")] pub type RcArray1 = RcArray; /// two-dimensional shared ownership array #[allow(deprecated)] -#[deprecated(note="`RcArray` has been renamed to `ArcArray`")] +#[deprecated(note = "`RcArray` has been renamed to `ArcArray`")] pub type RcArray2 = RcArray; diff --git a/src/array_serde.rs b/src/array_serde.rs index a3bf5bcc0..468be50ff 100644 --- a/src/array_serde.rs +++ b/src/array_serde.rs @@ -5,9 +5,9 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use serde::{Serialize, Serializer, Deserialize, Deserializer}; -use serde::de::{self, Visitor, SeqAccess, MapAccess}; +use serde::de::{self, MapAccess, SeqAccess, Visitor}; use serde::ser::{SerializeSeq, SerializeStruct}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; use std::marker::PhantomData; @@ -21,7 +21,8 @@ use crate::IntoDimension; /// Verifies that the version of the deserialized array matches the current /// `ARRAY_FORMAT_VERSION`. pub fn verify_version(v: u8) -> Result<(), E> - where E: de::Error +where + E: de::Error, { if v != ARRAY_FORMAT_VERSION { let err_msg = format!("unknown array version: {}", v); @@ -33,10 +34,12 @@ pub fn verify_version(v: u8) -> Result<(), E> /// **Requires crate feature `"serde-1"`** impl Serialize for Dim - where I: Serialize, +where + I: Serialize, { fn serialize(&self, serializer: Se) -> Result - where Se: Serializer + where + Se: Serializer, { self.ix().serialize(serializer) } @@ -44,30 +47,32 @@ impl Serialize for Dim /// **Requires crate feature `"serde-1"`** impl<'de, I> Deserialize<'de> for Dim - where I: Deserialize<'de>, +where + I: Deserialize<'de>, { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> + where + D: Deserializer<'de>, { I::deserialize(deserializer).map(Dim::new) } } /// **Requires crate feature `"serde-1"`** -impl Serialize for IxDyn -{ +impl Serialize for IxDyn { fn serialize(&self, serializer: Se) -> Result - where Se: Serializer + where + Se: Serializer, { self.ix().serialize(serializer) } } /// **Requires crate feature `"serde-1"`** -impl<'de> Deserialize<'de> for IxDyn -{ +impl<'de> Deserialize<'de> for IxDyn { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> + where + D: Deserializer<'de>, { let v = Vec::::deserialize(deserializer)?; Ok(v.into_dimension()) @@ -76,13 +81,14 @@ impl<'de> Deserialize<'de> for IxDyn /// **Requires crate feature `"serde-1"`** impl Serialize for ArrayBase - where A: Serialize, - D: Dimension + Serialize, - S: Data - +where + A: Serialize, + D: Dimension + Serialize, + S: Data, { fn serialize(&self, serializer: Se) -> Result - where Se: Serializer + where + Se: Serializer, { let mut state = serializer.serialize_struct("Array", 3)?; state.serialize_field("v", &ARRAY_FORMAT_VERSION)?; @@ -96,11 +102,13 @@ impl Serialize for ArrayBase struct Sequence<'a, A: 'a, D>(Iter<'a, A, D>); impl<'a, A, D> Serialize for Sequence<'a, A, D> - where A: Serialize, - D: Dimension + Serialize +where + A: Serialize, + D: Dimension + Serialize, { fn serialize(&self, serializer: S) -> Result - where S: Serializer + where + S: Serializer, { let iter = &self.0; let mut seq = serializer.serialize_seq(Some(iter.len()))?; @@ -124,7 +132,10 @@ enum ArrayField { impl ArrayVisitor { pub fn new() -> Self { - ArrayVisitor { _marker_a: PhantomData, _marker_b: PhantomData, } + ArrayVisitor { + _marker_a: PhantomData, + _marker_b: PhantomData, + } } } @@ -132,12 +143,14 @@ static ARRAY_FIELDS: &'static [&'static str] = &["v", "dim", "data"]; /// **Requires crate feature `"serde-1"`** impl<'de, A, Di, S> Deserialize<'de> for ArrayBase - where A: Deserialize<'de>, - Di: Deserialize<'de> + Dimension, - S: DataOwned +where + A: Deserialize<'de>, + Di: Deserialize<'de> + Dimension, + S: DataOwned, { fn deserialize(deserializer: D) -> Result, D::Error> - where D: Deserializer<'de> + where + D: Deserializer<'de>, { deserializer.deserialize_struct("Array", ARRAY_FIELDS, ArrayVisitor::new()) } @@ -145,7 +158,8 @@ impl<'de, A, Di, S> Deserialize<'de> for ArrayBase impl<'de> Deserialize<'de> for ArrayField { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> + where + D: Deserializer<'de>, { struct ArrayFieldVisitor; @@ -157,7 +171,8 @@ impl<'de> Deserialize<'de> for ArrayField { } fn visit_str(self, value: &str) -> Result - where E: de::Error + where + E: de::Error, { match value { "v" => Ok(ArrayField::Version), @@ -168,13 +183,17 @@ impl<'de> Deserialize<'de> for ArrayField { } fn visit_bytes(self, value: &[u8]) -> Result - where E: de::Error + where + E: de::Error, { match value { b"v" => Ok(ArrayField::Version), b"dim" => Ok(ArrayField::Dim), b"data" => Ok(ArrayField::Data), - other => Err(de::Error::unknown_field(&format!("{:?}",other), ARRAY_FIELDS)), + other => Err(de::Error::unknown_field( + &format!("{:?}", other), + ARRAY_FIELDS, + )), } } } @@ -183,10 +202,11 @@ impl<'de> Deserialize<'de> for ArrayField { } } -impl<'de, A, Di, S> Visitor<'de> for ArrayVisitor - where A: Deserialize<'de>, - Di: Deserialize<'de> + Dimension, - S: DataOwned +impl<'de, A, Di, S> Visitor<'de> for ArrayVisitor +where + A: Deserialize<'de>, + Di: Deserialize<'de> + Dimension, + S: DataOwned, { type Value = ArrayBase; @@ -195,7 +215,8 @@ impl<'de, A, Di, S> Visitor<'de> for ArrayVisitor } fn visit_seq(self, mut visitor: V) -> Result, V::Error> - where V: SeqAccess<'de>, + where + V: SeqAccess<'de>, { let v: u8 = match visitor.next_element()? { Some(value) => value, @@ -228,7 +249,8 @@ impl<'de, A, Di, S> Visitor<'de> for ArrayVisitor } fn visit_map(self, mut visitor: V) -> Result, V::Error> - where V: MapAccess<'de>, + where + V: MapAccess<'de>, { let mut v: Option = None; let mut data: Option> = None; @@ -240,13 +262,13 @@ impl<'de, A, Di, S> Visitor<'de> for ArrayVisitor let val = visitor.next_value()?; verify_version(val)?; v = Some(val); - }, + } ArrayField::Data => { data = Some(visitor.next_value()?); - }, + } ArrayField::Dim => { dim = Some(visitor.next_value()?); - }, + } } } diff --git a/src/arrayformat.rs b/src/arrayformat.rs index d13add8f7..075ebc982 100644 --- a/src/arrayformat.rs +++ b/src/arrayformat.rs @@ -5,21 +5,19 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::fmt; -use super::{ - ArrayBase, - Data, - Dimension, - NdProducer, -}; +use super::{ArrayBase, Data, Dimension, NdProducer}; use crate::dimension::IntoDimension; +use std::fmt; -fn format_array(view: &ArrayBase, f: &mut fmt::Formatter, - mut format: F) - -> fmt::Result - where F: FnMut(&A, &mut fmt::Formatter) -> fmt::Result, - D: Dimension, - S: Data, +fn format_array( + view: &ArrayBase, + f: &mut fmt::Formatter, + mut format: F, +) -> fmt::Result +where + F: FnMut(&A, &mut fmt::Formatter) -> fmt::Result, + D: Dimension, + S: Data, { let ndim = view.dim.slice().len(); /* private nowadays @@ -42,11 +40,13 @@ fn format_array(view: &ArrayBase, f: &mut fmt::Formatter, let index = index.into_dimension(); let take_n = if ndim == 0 { 1 } else { ndim - 1 }; let mut update_index = false; - for (i, (a, b)) in index.slice() - .iter() - .take(take_n) - .zip(last_index.slice().iter()) - .enumerate() { + for (i, (a, b)) in index + .slice() + .iter() + .take(take_n) + .zip(last_index.slice().iter()) + .enumerate() + { if a != b { // New row. // # of ['s needed @@ -89,7 +89,8 @@ fn format_array(view: &ArrayBase, f: &mut fmt::Formatter, /// /// The array is shown in multiline style. impl<'a, A: fmt::Display, S, D: Dimension> fmt::Display for ArrayBase - where S: Data, +where + S: Data, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { format_array(self, f, <_>::fmt) @@ -101,13 +102,19 @@ impl<'a, A: fmt::Display, S, D: Dimension> fmt::Display for ArrayBase /// /// The array is shown in multiline style. impl<'a, A: fmt::Debug, S, D: Dimension> fmt::Debug for ArrayBase - where S: Data, +where + S: Data, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // Add extra information for Debug format_array(self, f, <_>::fmt)?; - write!(f, " shape={:?}, strides={:?}, layout={:?}", - self.shape(), self.strides(), layout=self.view().layout())?; + write!( + f, + " shape={:?}, strides={:?}, layout={:?}", + self.shape(), + self.strides(), + layout = self.view().layout() + )?; match D::NDIM { Some(ndim) => write!(f, ", const ndim={}", ndim)?, None => write!(f, ", dynamic ndim={}", self.ndim())?, @@ -121,7 +128,8 @@ impl<'a, A: fmt::Debug, S, D: Dimension> fmt::Debug for ArrayBase /// /// The array is shown in multiline style. impl<'a, A: fmt::LowerExp, S, D: Dimension> fmt::LowerExp for ArrayBase - where S: Data, +where + S: Data, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { format_array(self, f, <_>::fmt) @@ -133,7 +141,8 @@ impl<'a, A: fmt::LowerExp, S, D: Dimension> fmt::LowerExp for ArrayBase /// /// The array is shown in multiline style. impl<'a, A: fmt::UpperExp, S, D: Dimension> fmt::UpperExp for ArrayBase - where S: Data, +where + S: Data, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { format_array(self, f, <_>::fmt) @@ -144,7 +153,8 @@ impl<'a, A: fmt::UpperExp, S, D: Dimension> fmt::UpperExp for ArrayBase /// /// The array is shown in multiline style. impl<'a, A: fmt::LowerHex, S, D: Dimension> fmt::LowerHex for ArrayBase - where S: Data, +where + S: Data, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { format_array(self, f, <_>::fmt) @@ -156,7 +166,8 @@ impl<'a, A: fmt::LowerHex, S, D: Dimension> fmt::LowerHex for ArrayBase /// /// The array is shown in multiline style. impl<'a, A: fmt::Binary, S, D: Dimension> fmt::Binary for ArrayBase - where S: Data, +where + S: Data, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { format_array(self, f, <_>::fmt) diff --git a/src/arraytraits.rs b/src/arraytraits.rs index 13b5ce6c8..4877adfe0 100644 --- a/src/arraytraits.rs +++ b/src/arraytraits.rs @@ -10,22 +10,14 @@ use std::hash; use std::iter::FromIterator; use std::iter::IntoIterator; use std::mem; -use std::ops::{ - Index, - IndexMut, -}; +use std::ops::{Index, IndexMut}; use crate::imp_prelude::*; -use crate::iter::{ - Iter, - IterMut, -}; -use crate::{ - NdIndex, -}; +use crate::iter::{Iter, IterMut}; +use crate::NdIndex; use crate::numeric_util; -use crate::{Zip, FoldWhile}; +use crate::{FoldWhile, Zip}; #[cold] #[inline(never)] @@ -35,9 +27,10 @@ pub(crate) fn array_out_of_bounds() -> ! { #[inline(always)] pub fn debug_bounds_check(_a: &ArrayBase, _index: &I) - where D: Dimension, - I: NdIndex, - S: Data, +where + D: Dimension, + I: NdIndex, + S: Data, { debug_bounds_check!(_a, *_index); } @@ -46,17 +39,21 @@ pub fn debug_bounds_check(_a: &ArrayBase, _index: &I) /// /// **Panics** if index is out of bounds. impl Index for ArrayBase - where D: Dimension, - I: NdIndex, - S: Data, +where + D: Dimension, + I: NdIndex, + S: Data, { type Output = S::Elem; #[inline] fn index(&self, index: I) -> &S::Elem { debug_bounds_check!(self, index); unsafe { - &*self.ptr.offset(index.index_checked(&self.dim, &self.strides) - .unwrap_or_else(|| array_out_of_bounds())) + &*self.ptr.offset( + index + .index_checked(&self.dim, &self.strides) + .unwrap_or_else(|| array_out_of_bounds()), + ) } } } @@ -65,16 +62,20 @@ impl Index for ArrayBase /// /// **Panics** if index is out of bounds. impl IndexMut for ArrayBase - where D: Dimension, - I: NdIndex, - S: DataMut, +where + D: Dimension, + I: NdIndex, + S: DataMut, { #[inline] fn index_mut(&mut self, index: I) -> &mut S::Elem { debug_bounds_check!(self, index); unsafe { - &mut *self.as_mut_ptr().offset(index.index_checked(&self.dim, &self.strides) - .unwrap_or_else(|| array_out_of_bounds())) + &mut *self.as_mut_ptr().offset( + index + .index_checked(&self.dim, &self.strides) + .unwrap_or_else(|| array_out_of_bounds()), + ) } } } @@ -82,10 +83,11 @@ impl IndexMut for ArrayBase /// Return `true` if the array shapes and all elements of `self` and /// `rhs` are equal. Return `false` otherwise. impl PartialEq> for ArrayBase - where D: Dimension, - S: Data, - S2: Data, - S::Elem: PartialEq +where + D: Dimension, + S: Data, + S2: Data, + S::Elem: PartialEq, { fn eq(&self, rhs: &ArrayBase) -> bool { if self.shape() != rhs.shape() { @@ -98,34 +100,41 @@ impl PartialEq> for ArrayBase } Zip::from(self) .and(rhs) - .fold_while(true, |_, a, b| - if a != b { - FoldWhile::Done(false) - } else { - FoldWhile::Continue(true) - }).into_inner() + .fold_while(true, |_, a, b| { + if a != b { + FoldWhile::Done(false) + } else { + FoldWhile::Continue(true) + } + }) + .into_inner() } } impl Eq for ArrayBase - where D: Dimension, - S: Data, - S::Elem: Eq, -{ } +where + D: Dimension, + S: Data, + S::Elem: Eq, +{ +} impl FromIterator for ArrayBase - where S: DataOwned +where + S: DataOwned, { fn from_iter(iterable: I) -> ArrayBase - where I: IntoIterator, + where + I: IntoIterator, { ArrayBase::from_iter(iterable) } } impl<'a, S, D> IntoIterator for &'a ArrayBase - where D: Dimension, - S: Data, +where + D: Dimension, + S: Data, { type Item = &'a S::Elem; type IntoIter = Iter<'a, S::Elem, D>; @@ -136,8 +145,9 @@ impl<'a, S, D> IntoIterator for &'a ArrayBase } impl<'a, S, D> IntoIterator for &'a mut ArrayBase - where D: Dimension, - S: DataMut +where + D: Dimension, + S: DataMut, { type Item = &'a mut S::Elem; type IntoIter = IterMut<'a, S::Elem, D>; @@ -148,7 +158,8 @@ impl<'a, S, D> IntoIterator for &'a mut ArrayBase } impl<'a, A, D> IntoIterator for ArrayView<'a, A, D> - where D: Dimension +where + D: Dimension, { type Item = &'a A; type IntoIter = Iter<'a, A, D>; @@ -159,7 +170,8 @@ impl<'a, A, D> IntoIterator for ArrayView<'a, A, D> } impl<'a, A, D> IntoIterator for ArrayViewMut<'a, A, D> - where D: Dimension +where + D: Dimension, { type Item = &'a mut A; type IntoIter = IterMut<'a, A, D>; @@ -170,9 +182,10 @@ impl<'a, A, D> IntoIterator for ArrayViewMut<'a, A, D> } impl<'a, S, D> hash::Hash for ArrayBase - where D: Dimension, - S: Data, - S::Elem: hash::Hash +where + D: Dimension, + S: Data, + S::Elem: hash::Hash, { // Note: elements are hashed in the logical order fn hash(&self, state: &mut H) { @@ -200,23 +213,29 @@ impl<'a, S, D> hash::Hash for ArrayBase /// `ArrayBase` is `Sync` when the storage type is. unsafe impl Sync for ArrayBase - where S: Sync + Data, D: Sync -{ } +where + S: Sync + Data, + D: Sync, +{ +} /// `ArrayBase` is `Send` when the storage type is. unsafe impl Send for ArrayBase - where S: Send + Data, D: Send -{ } +where + S: Send + Data, + D: Send, +{ +} #[cfg(any(feature = "serde"))] // Use version number so we can add a packed format later. pub const ARRAY_FORMAT_VERSION: u8 = 1u8; - // use "raw" form instead of type aliases here so that they show up in docs /// Implementation of `ArrayView::from(&S)` where `S` is a slice or slicable. impl<'a, A, Slice: ?Sized> From<&'a Slice> for ArrayView<'a, A, Ix1> - where Slice: AsRef<[A]> +where + Slice: AsRef<[A]>, { /// Create a one-dimensional read-only array view of the data in `slice`. /// @@ -229,16 +248,15 @@ impl<'a, A, Slice: ?Sized> From<&'a Slice> for ArrayView<'a, A, Ix1> "Slice length must fit in `isize`.", ); } - unsafe { - Self::from_shape_ptr(xs.len(), xs.as_ptr()) - } + unsafe { Self::from_shape_ptr(xs.len(), xs.as_ptr()) } } } /// Implementation of `ArrayView::from(&A)` where `A` is an array. impl<'a, A, S, D> From<&'a ArrayBase> for ArrayView<'a, A, D> - where S: Data, - D: Dimension, +where + S: Data, + D: Dimension, { /// Create a read-only array view of the array. fn from(array: &'a ArrayBase) -> Self { @@ -248,7 +266,8 @@ impl<'a, A, S, D> From<&'a ArrayBase> for ArrayView<'a, A, D> /// Implementation of `ArrayViewMut::from(&mut S)` where `S` is a slice or slicable. impl<'a, A, Slice: ?Sized> From<&'a mut Slice> for ArrayViewMut<'a, A, Ix1> - where Slice: AsMut<[A]> +where + Slice: AsMut<[A]>, { /// Create a one-dimensional read-write array view of the data in `slice`. /// @@ -261,16 +280,15 @@ impl<'a, A, Slice: ?Sized> From<&'a mut Slice> for ArrayViewMut<'a, A, Ix1> "Slice length must fit in `isize`.", ); } - unsafe { - Self::from_shape_ptr(xs.len(), xs.as_mut_ptr()) - } + unsafe { Self::from_shape_ptr(xs.len(), xs.as_mut_ptr()) } } } /// Implementation of `ArrayViewMut::from(&mut A)` where `A` is an array. impl<'a, A, S, D> From<&'a mut ArrayBase> for ArrayViewMut<'a, A, D> - where S: DataMut, - D: Dimension, +where + S: DataMut, + D: Dimension, { /// Create a read-write array view of the array. fn from(array: &'a mut ArrayBase) -> Self { @@ -299,11 +317,17 @@ impl<'a, A, S, D> From<&'a mut ArrayBase> for ArrayViewMut<'a, A, D> /// ); /// /// ``` -pub trait AsArray<'a, A: 'a, D = Ix1> : Into> where D: Dimension { } +pub trait AsArray<'a, A: 'a, D = Ix1>: Into> +where + D: Dimension, +{ +} impl<'a, A: 'a, D, T> AsArray<'a, A, D> for T - where T: Into>, - D: Dimension, -{ } +where + T: Into>, + D: Dimension, +{ +} /// Create an owned array with a default state. /// @@ -318,9 +342,10 @@ impl<'a, A: 'a, D, T> AsArray<'a, A, D> for T /// Since arrays cannot grow, the intention is to use the default value as /// placeholder. impl Default for ArrayBase - where S: DataOwned, - D: Dimension, - A: Default, +where + S: DataOwned, + D: Dimension, + A: Default, { // NOTE: We can implement Default for non-zero dimensional array views by // using an empty slice, however we need a trait for nonzero Dimension. diff --git a/src/data_traits.rs b/src/data_traits.rs index bd4522968..3ea3125ce 100644 --- a/src/data_traits.rs +++ b/src/data_traits.rs @@ -11,15 +11,7 @@ use std::mem::{self, size_of}; use std::sync::Arc; -use crate::{ - ArrayBase, - Dimension, - RawViewRepr, - ViewRepr, - OwnedRepr, - OwnedRcRepr, - OwnedArcRepr, -}; +use crate::{ArrayBase, Dimension, OwnedArcRepr, OwnedRcRepr, OwnedRepr, RawViewRepr, ViewRepr}; /// Array representation trait. /// @@ -30,7 +22,7 @@ use crate::{ /// ***Note:*** `RawData` is not an extension interface at this point. /// Traits in Rust can serve many different roles. This trait is public because /// it is used as a bound on public methods. -pub unsafe trait RawData : Sized { +pub unsafe trait RawData: Sized { /// The array element type. type Elem; @@ -38,7 +30,7 @@ pub unsafe trait RawData : Sized { // This method is only used for debugging fn _data_slice(&self) -> Option<&[Self::Elem]>; - private_decl!{} + private_decl! {} } /// Array representation trait. @@ -46,15 +38,16 @@ pub unsafe trait RawData : Sized { /// For an array with writable elements. /// /// ***Internal trait, see `RawData`.*** -pub unsafe trait RawDataMut : RawData { +pub unsafe trait RawDataMut: RawData { /// If possible, ensures that the array has unique access to its data. /// /// If `Self` provides safe mutable access to array elements, then it /// **must** panic or ensure that the data is unique. #[doc(hidden)] fn try_ensure_unique(_: &mut ArrayBase) - where Self: Sized, - D: Dimension; + where + Self: Sized, + D: Dimension; /// If possible, returns whether the array has unique access to its data. /// @@ -69,13 +62,17 @@ pub unsafe trait RawDataMut : RawData { /// An array representation that can be cloned. /// /// ***Internal trait, see `RawData`.*** -pub unsafe trait RawDataClone : RawData { +pub unsafe trait RawDataClone: RawData { #[doc(hidden)] /// Unsafe because, `ptr` must point inside the current storage. unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem) -> (Self, *mut Self::Elem); #[doc(hidden)] - unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: *mut Self::Elem) -> *mut Self::Elem { + unsafe fn clone_from_with_ptr( + &mut self, + other: &Self, + ptr: *mut Self::Elem, + ) -> *mut Self::Elem { let (data, ptr) = other.clone_with_ptr(ptr); *self = data; ptr @@ -87,7 +84,7 @@ pub unsafe trait RawDataClone : RawData { /// For an array with elements that can be accessed with safe code. /// /// ***Internal trait, see `RawData`.*** -pub unsafe trait Data : RawData { +pub unsafe trait Data: RawData { /// Converts the array to a uniquely owned array, cloning elements if necessary. #[doc(hidden)] fn into_owned(self_: ArrayBase) -> ArrayBase, D> @@ -108,13 +105,14 @@ pub unsafe trait Data : RawData { // `RawDataMut::try_ensure_unique` implementation always panics or ensures that // the data is unique. You are also guaranteeing that `try_is_unique` always // returns `Some(_)`. -pub unsafe trait DataMut : Data + RawDataMut { +pub unsafe trait DataMut: Data + RawDataMut { /// Ensures that the array has unique access to its data. #[doc(hidden)] #[inline] fn ensure_unique(self_: &mut ArrayBase) - where Self: Sized, - D: Dimension + where + Self: Sized, + D: Dimension, { Self::try_ensure_unique(self_) } @@ -133,8 +131,8 @@ pub unsafe trait DataMut : Data + RawDataMut { /// accessed with safe code. /// /// ***Internal trait, see `Data`.*** -#[deprecated(note="use `Data + RawDataClone` instead", since="0.13")] -pub trait DataClone : Data + RawDataClone {} +#[deprecated(note = "use `Data + RawDataClone` instead", since = "0.13")] +pub trait DataClone: Data + RawDataClone {} #[allow(deprecated)] impl DataClone for T where T: Data + RawDataClone {} @@ -144,7 +142,7 @@ unsafe impl RawData for RawViewRepr<*const A> { fn _data_slice(&self) -> Option<&[A]> { None } - private_impl!{} + private_impl! {} } unsafe impl RawDataClone for RawViewRepr<*const A> { @@ -158,15 +156,17 @@ unsafe impl RawData for RawViewRepr<*mut A> { fn _data_slice(&self) -> Option<&[A]> { None } - private_impl!{} + private_impl! {} } unsafe impl RawDataMut for RawViewRepr<*mut A> { #[inline] fn try_ensure_unique(_: &mut ArrayBase) - where Self: Sized, - D: Dimension - {} + where + Self: Sized, + D: Dimension, + { + } #[inline] fn try_is_unique(&mut self) -> Option { @@ -185,7 +185,7 @@ unsafe impl RawData for OwnedArcRepr { fn _data_slice(&self) -> Option<&[A]> { Some(&self.0) } - private_impl!{} + private_impl! {} } // NOTE: Copy on write @@ -194,8 +194,9 @@ where A: Clone, { fn try_ensure_unique(self_: &mut ArrayBase) - where Self: Sized, - D: Dimension + where + Self: Sized, + D: Dimension, { if Arc::get_mut(&mut self_.data.0).is_some() { return; @@ -204,10 +205,10 @@ where // Create a new vec if the current view is less than half of // backing data. unsafe { - *self_ = ArrayBase::from_shape_vec_unchecked(self_.dim.clone(), - self_.iter() - .cloned() - .collect()); + *self_ = ArrayBase::from_shape_vec_unchecked( + self_.dim.clone(), + self_.iter().cloned().collect(), + ); } return; } @@ -215,7 +216,9 @@ where let a_size = mem::size_of::() as isize; let our_off = if a_size != 0 { (self_.ptr as isize - rcvec.as_ptr() as isize) / a_size - } else { 0 }; + } else { + 0 + }; let rvec = Arc::make_mut(rcvec); unsafe { self_.ptr = rvec.as_mut_ptr().offset(our_off); @@ -258,15 +261,17 @@ unsafe impl RawData for OwnedRepr { fn _data_slice(&self) -> Option<&[A]> { Some(&self.0) } - private_impl!{} + private_impl! {} } unsafe impl RawDataMut for OwnedRepr { #[inline] fn try_ensure_unique(_: &mut ArrayBase) - where Self: Sized, - D: Dimension - {} + where + Self: Sized, + D: Dimension, + { + } #[inline] fn try_is_unique(&mut self) -> Option { @@ -285,26 +290,29 @@ unsafe impl Data for OwnedRepr { } } -unsafe impl DataMut for OwnedRepr { } +unsafe impl DataMut for OwnedRepr {} unsafe impl RawDataClone for OwnedRepr - where A: Clone +where + A: Clone, { unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem) -> (Self, *mut Self::Elem) { let mut u = self.clone(); let mut new_ptr = u.0.as_mut_ptr(); if size_of::() != 0 { - let our_off = (ptr as isize - self.0.as_ptr() as isize) / - mem::size_of::() as isize; + let our_off = (ptr as isize - self.0.as_ptr() as isize) / mem::size_of::() as isize; new_ptr = new_ptr.offset(our_off); } (u, new_ptr) } - unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: *mut Self::Elem) -> *mut Self::Elem { + unsafe fn clone_from_with_ptr( + &mut self, + other: &Self, + ptr: *mut Self::Elem, + ) -> *mut Self::Elem { let our_off = if size_of::() != 0 { - (ptr as isize - other.0.as_ptr() as isize) / - mem::size_of::() as isize + (ptr as isize - other.0.as_ptr() as isize) / mem::size_of::() as isize } else { 0 }; @@ -318,7 +326,7 @@ unsafe impl<'a, A> RawData for ViewRepr<&'a A> { fn _data_slice(&self) -> Option<&[A]> { None } - private_impl!{} + private_impl! {} } unsafe impl<'a, A> Data for ViewRepr<&'a A> { @@ -342,14 +350,17 @@ unsafe impl<'a, A> RawData for ViewRepr<&'a mut A> { fn _data_slice(&self) -> Option<&[A]> { None } - private_impl!{} + private_impl! {} } unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> { #[inline] fn try_ensure_unique(_: &mut ArrayBase) - where Self: Sized, - D: Dimension {} + where + Self: Sized, + D: Dimension, + { + } #[inline] fn try_is_unique(&mut self) -> Option { @@ -367,14 +378,14 @@ unsafe impl<'a, A> Data for ViewRepr<&'a mut A> { } } -unsafe impl<'a, A> DataMut for ViewRepr<&'a mut A> { } +unsafe impl<'a, A> DataMut for ViewRepr<&'a mut A> {} /// Array representation trait. /// /// A representation that is a unique or shared owner of its data. /// /// ***Internal trait, see `Data`.*** -pub unsafe trait DataOwned : Data { +pub unsafe trait DataOwned: Data { #[doc(hidden)] fn new(elements: Vec) -> Self; @@ -389,7 +400,7 @@ pub unsafe trait DataOwned : Data { /// A representation that is a lightweight view. /// /// ***Internal trait, see `Data`.*** -pub unsafe trait DataShared : Clone + Data + RawDataClone { } +pub unsafe trait DataShared: Clone + Data + RawDataClone {} unsafe impl DataShared for OwnedRcRepr {} unsafe impl<'a, A> DataShared for ViewRepr<&'a A> {} diff --git a/src/dimension/axes.rs b/src/dimension/axes.rs index f087eb5c5..4ac61afb2 100644 --- a/src/dimension/axes.rs +++ b/src/dimension/axes.rs @@ -1,9 +1,9 @@ - -use crate::{Dimension, Axis, Ix, Ixs}; +use crate::{Axis, Dimension, Ix, Ixs}; /// Create a new Axes iterator pub fn axes_of<'a, D>(d: &'a D, strides: &'a D) -> Axes<'a, D> - where D: Dimension, +where + D: Dimension, { Axes { dim: d, @@ -49,19 +49,26 @@ copy_and_clone!(AxisDescription); impl AxisDescription { /// Return axis #[inline(always)] - pub fn axis(self) -> Axis { self.0 } + pub fn axis(self) -> Axis { + self.0 + } /// Return length #[inline(always)] - pub fn len(self) -> Ix { self.1 } + pub fn len(self) -> Ix { + self.1 + } /// Return stride #[inline(always)] - pub fn stride(self) -> Ixs { self.2 } + pub fn stride(self) -> Ixs { + self.2 + } } copy_and_clone!(['a, D] Axes<'a, D>); impl<'a, D> Iterator for Axes<'a, D> - where D: Dimension, +where + D: Dimension, { /// Description of the axis, its length and its stride. type Item = AxisDescription; @@ -69,7 +76,11 @@ impl<'a, D> Iterator for Axes<'a, D> fn next(&mut self) -> Option { if self.start < self.end { let i = self.start.post_inc(); - Some(AxisDescription(Axis(i), self.dim[i], self.strides[i] as Ixs)) + Some(AxisDescription( + Axis(i), + self.dim[i], + self.strides[i] as Ixs, + )) } else { None } @@ -91,19 +102,24 @@ impl<'a, D> Iterator for Axes<'a, D> } impl<'a, D> DoubleEndedIterator for Axes<'a, D> - where D: Dimension, +where + D: Dimension, { fn next_back(&mut self) -> Option { if self.start < self.end { let i = self.end.pre_dec(); - Some(AxisDescription(Axis(i), self.dim[i], self.strides[i] as Ixs)) + Some(AxisDescription( + Axis(i), + self.dim[i], + self.strides[i] as Ixs, + )) } else { None } } } -trait IncOps : Copy { +trait IncOps: Copy { fn post_inc(&mut self) -> Self; fn post_dec(&mut self) -> Self; fn pre_dec(&mut self) -> Self; @@ -128,4 +144,3 @@ impl IncOps for usize { *self } } - diff --git a/src/dimension/axis.rs b/src/dimension/axis.rs index f09ee95d8..6fc389988 100644 --- a/src/dimension/axis.rs +++ b/src/dimension/axis.rs @@ -21,10 +21,12 @@ pub struct Axis(pub usize); impl Axis { /// Return the index of the axis. #[inline(always)] - pub fn index(&self) -> usize { self.0 } + pub fn index(&self) -> usize { + self.0 + } } -copy_and_clone!{Axis} +copy_and_clone! {Axis} macro_rules! derive_cmp { ($traitname:ident for $typename:ident, $method:ident -> $ret:ty) => { @@ -34,9 +36,8 @@ macro_rules! derive_cmp { (self.0).$method(&rhs.0) } } - } + }; } -derive_cmp!{PartialEq for Axis, eq -> bool} -derive_cmp!{PartialOrd for Axis, partial_cmp -> Option} - +derive_cmp! {PartialEq for Axis, eq -> bool} +derive_cmp! {PartialOrd for Axis, partial_cmp -> Option} diff --git a/src/dimension/conversion.rs b/src/dimension/conversion.rs index c4e49c778..bf48dae2f 100644 --- a/src/dimension/conversion.rs +++ b/src/dimension/conversion.rs @@ -8,10 +8,10 @@ //! Tuple to array conversion, IntoDimension, and related things -use std::ops::{Index, IndexMut}; use num_traits::Zero; +use std::ops::{Index, IndexMut}; -use crate::{Ix, Ix1, IxDyn, Dimension, Dim, IxDynImpl}; +use crate::{Dim, Dimension, Ix, Ix1, IxDyn, IxDynImpl}; /// $m: macro callback /// $m is called with $arg and then the indices corresponding to the size argument @@ -46,25 +46,36 @@ pub trait IntoDimension { impl IntoDimension for Ix { type Dim = Ix1; #[inline(always)] - fn into_dimension(self) -> Ix1 { Ix1(self) } + fn into_dimension(self) -> Ix1 { + Ix1(self) + } } -impl IntoDimension for D where D: Dimension { +impl IntoDimension for D +where + D: Dimension, +{ type Dim = D; #[inline(always)] - fn into_dimension(self) -> Self { self } + fn into_dimension(self) -> Self { + self + } } impl IntoDimension for IxDynImpl { type Dim = IxDyn; #[inline(always)] - fn into_dimension(self) -> Self::Dim { Dim::new(self) } + fn into_dimension(self) -> Self::Dim { + Dim::new(self) + } } impl IntoDimension for Vec { type Dim = IxDyn; #[inline(always)] - fn into_dimension(self) -> Self::Dim { Dim::new(IxDynImpl::from(self)) } + fn into_dimension(self) -> Self::Dim { + Dim::new(IxDynImpl::from(self)) + } } pub trait Convert { @@ -73,7 +84,9 @@ pub trait Convert { } macro_rules! sub { - ($_x:tt $y:tt) => ($y); + ($_x:tt $y:tt) => { + $y + }; } macro_rules! tuple_type { diff --git a/src/dimension/dim.rs b/src/dimension/dim.rs index 4adb71658..62a9e0b1f 100644 --- a/src/dimension/dim.rs +++ b/src/dimension/dim.rs @@ -6,12 +6,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use itertools::zip; use std::fmt; use std::hash; -use itertools::zip; -use super::IntoDimension; use super::Dimension; +use super::IntoDimension; use crate::Ix; /// Dimension description. @@ -44,26 +44,30 @@ pub struct Dim { impl Dim { /// Private constructor and accessors for Dim pub(crate) fn new(index: I) -> Dim { - Dim { - index: index, - } + Dim { index: index } } #[inline(always)] - pub(crate) fn ix(&self) -> &I { &self.index } + pub(crate) fn ix(&self) -> &I { + &self.index + } #[inline(always)] - pub(crate) fn ixm(&mut self) -> &mut I { &mut self.index } + pub(crate) fn ixm(&mut self) -> &mut I { + &mut self.index + } } /// Create a new dimension value. #[allow(non_snake_case)] pub fn Dim(index: T) -> T::Dim - where T: IntoDimension +where + T: IntoDimension, { index.into_dimension() } impl PartialEq for Dim - where I: PartialEq, +where + I: PartialEq, { fn eq(&self, rhs: &I) -> bool { self.index == *rhs @@ -71,7 +75,8 @@ impl PartialEq for Dim } impl hash::Hash for Dim - where Dim: Dimension, +where + Dim: Dimension, { fn hash(&self, state: &mut H) { self.slice().hash(state); @@ -79,101 +84,111 @@ impl hash::Hash for Dim } impl fmt::Debug for Dim - where I: fmt::Debug, +where + I: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.index) } } -use std::ops::{Add, Sub, Mul, AddAssign, SubAssign, MulAssign}; +use std::ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign}; macro_rules! impl_op { ($op:ident, $op_m:ident, $opassign:ident, $opassign_m:ident, $expr:ident) => { - impl $op for Dim - where Dim: Dimension, - { - type Output = Self; - fn $op_m(mut self, rhs: Self) -> Self { - $expr!(self, &rhs); - self + impl $op for Dim + where + Dim: Dimension, + { + type Output = Self; + fn $op_m(mut self, rhs: Self) -> Self { + $expr!(self, &rhs); + self + } } - } - impl $opassign for Dim - where Dim: Dimension, - { - fn $opassign_m(&mut self, rhs: Self) { - $expr!(*self, &rhs); + impl $opassign for Dim + where + Dim: Dimension, + { + fn $opassign_m(&mut self, rhs: Self) { + $expr!(*self, &rhs); + } } - } - impl<'a, I> $opassign<&'a Dim> for Dim - where Dim: Dimension, - { - fn $opassign_m(&mut self, rhs: &Self) { - for (x, &y) in zip(self.slice_mut(), rhs.slice()) { - $expr!(*x, y); + impl<'a, I> $opassign<&'a Dim> for Dim + where + Dim: Dimension, + { + fn $opassign_m(&mut self, rhs: &Self) { + for (x, &y) in zip(self.slice_mut(), rhs.slice()) { + $expr!(*x, y); + } } } - } - - } + }; } macro_rules! impl_single_op { ($op:ident, $op_m:ident, $opassign:ident, $opassign_m:ident, $expr:ident) => { - impl $op for Dim<[Ix; 1]> - { - type Output = Self; - #[inline] - fn $op_m(mut self, rhs: Ix) -> Self { - $expr!(self, rhs); - self + impl $op for Dim<[Ix; 1]> { + type Output = Self; + #[inline] + fn $op_m(mut self, rhs: Ix) -> Self { + $expr!(self, rhs); + self + } } - } - impl $opassign for Dim<[Ix; 1]> { - #[inline] - fn $opassign_m(&mut self, rhs: Ix) { - $expr!((*self)[0], rhs); + impl $opassign for Dim<[Ix; 1]> { + #[inline] + fn $opassign_m(&mut self, rhs: Ix) { + $expr!((*self)[0], rhs); + } } - } }; } macro_rules! impl_scalar_op { ($op:ident, $op_m:ident, $opassign:ident, $opassign_m:ident, $expr:ident) => { - impl $op for Dim - where Dim: Dimension, - { - type Output = Self; - fn $op_m(mut self, rhs: Ix) -> Self { - $expr!(self, rhs); - self + impl $op for Dim + where + Dim: Dimension, + { + type Output = Self; + fn $op_m(mut self, rhs: Ix) -> Self { + $expr!(self, rhs); + self + } } - } - impl $opassign for Dim - where Dim: Dimension, - { - fn $opassign_m(&mut self, rhs: Ix) { - for x in self.slice_mut() { - $expr!(*x, rhs); + impl $opassign for Dim + where + Dim: Dimension, + { + fn $opassign_m(&mut self, rhs: Ix) { + for x in self.slice_mut() { + $expr!(*x, rhs); + } } } - } }; } macro_rules! add { - ($x:expr, $y:expr) => { $x += $y; } + ($x:expr, $y:expr) => { + $x += $y; + }; } macro_rules! sub { - ($x:expr, $y:expr) => { $x -= $y; } + ($x:expr, $y:expr) => { + $x -= $y; + }; } macro_rules! mul { - ($x:expr, $y:expr) => { $x *= $y; } + ($x:expr, $y:expr) => { + $x *= $y; + }; } impl_op!(Add, add, AddAssign, add_assign, add); impl_single_op!(Add, add, AddAssign, add_assign, add); @@ -181,4 +196,3 @@ impl_op!(Sub, sub, SubAssign, sub_assign, sub); impl_single_op!(Sub, sub, SubAssign, sub_assign, sub); impl_op!(Mul, mul, MulAssign, mul_assign, mul); impl_scalar_op!(Mul, mul, MulAssign, mul_assign, mul); - diff --git a/src/dimension/dimension_trait.rs b/src/dimension/dimension_trait.rs index f8357f069..fcf523c3a 100644 --- a/src/dimension/dimension_trait.rs +++ b/src/dimension/dimension_trait.rs @@ -6,24 +6,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - use std::fmt::Debug; +use std::ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign}; use std::ops::{Index, IndexMut}; -use std::ops::{Add, Sub, Mul, AddAssign, SubAssign, MulAssign}; use itertools::{enumerate, izip, zip}; -use crate::{Ix, Ixs, Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6, IxDyn, Dim, SliceOrIndex, IxDynImpl}; +use super::axes_of; +use super::conversion::Convert; +use super::{stride_offset, stride_offset_checked}; +use crate::Axis; use crate::IntoDimension; use crate::RemoveAxis; use crate::{ArrayView1, ArrayViewMut1}; -use crate::Axis; -use super::{ - stride_offset, - stride_offset_checked, -}; -use super::conversion::Convert; -use super::axes_of; +use crate::{Dim, Ix, Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6, IxDyn, IxDynImpl, Ixs, SliceOrIndex}; /// Array shape and index trait. /// @@ -31,15 +27,25 @@ use super::axes_of; /// dimensions and indices. /// /// **Note:** *This trait can not be implemented outside the crate* -pub trait Dimension : Clone + Eq + Debug + Send + Sync + Default + - IndexMut + - Add + - AddAssign + for<'x> AddAssign<&'x Self> + - Sub + - SubAssign + for<'x> SubAssign<&'x Self> + - Mul + Mul + - MulAssign + for<'x> MulAssign<&'x Self> + MulAssign - +pub trait Dimension: + Clone + + Eq + + Debug + + Send + + Sync + + Default + + IndexMut + + Add + + AddAssign + + for<'x> AddAssign<&'x Self> + + Sub + + SubAssign + + for<'x> SubAssign<&'x Self> + + Mul + + Mul + + MulAssign + + for<'x> MulAssign<&'x Self> + + MulAssign { /// For fixed-size dimension representations (e.g. `Ix2`), this should be /// `Some(ndim)`, and for variable-size dimension representations (e.g. @@ -66,7 +72,7 @@ pub trait Dimension : Clone + Eq + Debug + Send + Sync + Default + /// - For `Ix2`: `(usize, usize)` /// - and so on.. /// - For `IxDyn`: `IxDyn` - type Pattern: IntoDimension; + type Pattern: IntoDimension; /// Next smaller dimension (if applicable) type Smaller: Dimension; /// Next larger dimension @@ -85,7 +91,9 @@ pub trait Dimension : Clone + Eq + Debug + Send + Sync + Default + /// Compute the size while checking for overflow. fn size_checked(&self) -> Option { - self.slice().iter().fold(Some(1), |s, &a| s.and_then(|s_| s_.checked_mul(a))) + self.slice() + .iter() + .fold(Some(1), |s, &a| s.and_then(|s_| s_.checked_mul(a))) } #[doc(hidden)] @@ -224,7 +232,6 @@ pub trait Dimension : Clone + Eq + Debug + Send + Sync + Default + !end_iteration } - #[doc(hidden)] /// Return stride offset for index. fn stride_offset(index: &Self, strides: &Self) -> isize { @@ -243,7 +250,11 @@ pub trait Dimension : Clone + Eq + Debug + Send + Sync + Default + #[doc(hidden)] fn last_elem(&self) -> usize { - if self.ndim() == 0 { 0 } else { self.slice()[self.ndim() - 1] } + if self.ndim() == 0 { + 0 + } else { + self.slice()[self.ndim() - 1] + } } #[doc(hidden)] @@ -258,7 +269,9 @@ pub trait Dimension : Clone + Eq + Debug + Send + Sync + Default + if strides.equal(&defaults) { return true; } - if dim.ndim() == 1 { return false; } + if dim.ndim() == 1 { + return false; + } let order = strides._fastest_varying_stride_order(); let strides = strides.slice(); @@ -313,7 +326,7 @@ pub trait Dimension : Clone + Eq + Debug + Send + Sync + Default + match self.ndim() { 0 => panic!("max_stride_axis: Array must have ndim > 0"), 1 => return Axis(0), - _ => { } + _ => {} } axes_of(self, strides) .filter(|ax| ax.len() > 1) @@ -345,7 +358,7 @@ pub trait Dimension : Clone + Eq + Debug + Send + Sync + Default + #[doc(hidden)] fn try_remove_axis(&self, axis: Axis) -> Self::Smaller; - private_decl!{} + private_decl! {} } // Dimension impls @@ -370,15 +383,23 @@ impl Dimension for Dim<[Ix; 0]> { type Larger = Ix1; // empty product is 1 -> size is 1 #[inline] - fn ndim(&self) -> usize { 0 } + fn ndim(&self) -> usize { + 0 + } #[inline] - fn slice(&self) -> &[Ix] { &[] } + fn slice(&self) -> &[Ix] { + &[] + } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { &mut [] } + fn slice_mut(&mut self) -> &mut [Ix] { + &mut [] + } #[inline] - fn _fastest_varying_stride_order(&self) -> Self { Ix0() } + fn _fastest_varying_stride_order(&self) -> Self { + Ix0() + } #[inline] - fn into_pattern(self) -> Self::Pattern { } + fn into_pattern(self) -> Self::Pattern {} #[inline] fn zeros(ndim: usize) -> Self { assert_eq!(ndim, 0); @@ -395,10 +416,9 @@ impl Dimension for Dim<[Ix; 0]> { *self } - private_impl!{} + private_impl! {} } - impl Dimension for Dim<[Ix; 1]> { const NDIM: Option = Some(1); type SliceArg = [SliceOrIndex; 1]; @@ -406,11 +426,17 @@ impl Dimension for Dim<[Ix; 1]> { type Smaller = Ix0; type Larger = Ix2; #[inline] - fn ndim(&self) -> usize { 1 } + fn ndim(&self) -> usize { + 1 + } #[inline] - fn slice(&self) -> &[Ix] { self.ix() } + fn slice(&self) -> &[Ix] { + self.ix() + } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { self.ixm() } + fn slice_mut(&mut self) -> &mut [Ix] { + self.ixm() + } #[inline] fn into_pattern(self) -> Self::Pattern { get!(&self, 0) @@ -436,9 +462,13 @@ impl Dimension for Dim<[Ix; 1]> { } #[inline] - fn size(&self) -> usize { get!(self, 0) } + fn size(&self) -> usize { + get!(self, 0) + } #[inline] - fn size_checked(&self) -> Option { Some(get!(self, 0)) } + fn size_checked(&self) -> Option { + Some(get!(self, 0)) + } #[inline] fn default_strides(&self) -> Self { @@ -494,7 +524,7 @@ impl Dimension for Dim<[Ix; 1]> { fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { self.remove_axis(axis) } - private_impl!{} + private_impl! {} } impl Dimension for Dim<[Ix; 2]> { @@ -504,15 +534,21 @@ impl Dimension for Dim<[Ix; 2]> { type Smaller = Ix1; type Larger = Ix3; #[inline] - fn ndim(&self) -> usize { 2 } + fn ndim(&self) -> usize { + 2 + } #[inline] fn into_pattern(self) -> Self::Pattern { self.ix().convert() } #[inline] - fn slice(&self) -> &[Ix] { self.ix() } + fn slice(&self) -> &[Ix] { + self.ix() + } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { self.ixm() } + fn slice_mut(&mut self) -> &mut [Ix] { + self.ixm() + } #[inline] fn zeros(ndim: usize) -> Self { assert_eq!(ndim, 2); @@ -541,7 +577,9 @@ impl Dimension for Dim<[Ix; 2]> { } #[inline] - fn size(&self) -> usize { get!(self, 0) * get!(self, 1) } + fn size(&self) -> usize { + get!(self, 0) * get!(self, 1) + } #[inline] fn size_checked(&self) -> Option { @@ -583,7 +621,11 @@ impl Dimension for Dim<[Ix; 2]> { #[inline] fn _fastest_varying_stride_order(&self) -> Self { - if get!(self, 0) as Ixs <= get!(self, 1) as Ixs { Ix2(0, 1) } else { Ix2(1, 0) } + if get!(self, 0) as Ixs <= get!(self, 1) as Ixs { + Ix2(0, 1) + } else { + Ix2(1, 0) + } } #[inline] @@ -620,8 +662,7 @@ impl Dimension for Dim<[Ix; 2]> { /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option - { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { let m = get!(self, 0); let n = get!(self, 1); let i = get!(index, 0); @@ -640,7 +681,7 @@ impl Dimension for Dim<[Ix; 2]> { fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { self.remove_axis(axis) } - private_impl!{} + private_impl! {} } impl Dimension for Dim<[Ix; 3]> { @@ -650,15 +691,21 @@ impl Dimension for Dim<[Ix; 3]> { type Smaller = Ix2; type Larger = Ix4; #[inline] - fn ndim(&self) -> usize { 3 } + fn ndim(&self) -> usize { + 3 + } #[inline] fn into_pattern(self) -> Self::Pattern { self.ix().convert() } #[inline] - fn slice(&self) -> &[Ix] { self.ix() } + fn slice(&self) -> &[Ix] { + self.ix() + } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { self.ixm() } + fn slice_mut(&mut self) -> &mut [Ix] { + self.ixm() + } #[inline] fn size(&self) -> usize { @@ -711,8 +758,7 @@ impl Dimension for Dim<[Ix; 3]> { /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option - { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { let m = get!(self, 0); let n = get!(self, 1); let l = get!(self, 2); @@ -739,7 +785,7 @@ impl Dimension for Dim<[Ix; 3]> { $stride.swap($x, $y); $order.ixm().swap($x, $y); } - } + }; } { // stable sorting network for 3 elements @@ -756,7 +802,7 @@ impl Dimension for Dim<[Ix; 3]> { fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { self.remove_axis(axis) } - private_impl!{} + private_impl! {} } macro_rules! large_dim { @@ -812,19 +858,24 @@ large_dim!(6, Ix6, (Ix, Ix, Ix, Ix, Ix, Ix), IxDyn, { /// IxDyn is a "dynamic" index, pretty hard to use when indexing, /// and memory wasteful, but it allows an arbitrary and dynamic number of axes. -impl Dimension for IxDyn -{ +impl Dimension for IxDyn { const NDIM: Option = None; type SliceArg = [SliceOrIndex]; type Pattern = Self; type Smaller = Self; type Larger = Self; #[inline] - fn ndim(&self) -> usize { self.ix().len() } + fn ndim(&self) -> usize { + self.ix().len() + } #[inline] - fn slice(&self) -> &[Ix] { self.ix() } + fn slice(&self) -> &[Ix] { + self.ix() + } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { self.ixm() } + fn slice_mut(&mut self) -> &mut [Ix] { + self.ixm() + } #[inline] fn into_pattern(self) -> Self::Pattern { self @@ -853,19 +904,17 @@ impl Dimension for IxDyn fn from_dimension(d: &D2) -> Option { Some(IxDyn(d.slice())) } - private_impl!{} + private_impl! {} } -impl Index for Dim -{ +impl Index for Dim { type Output = >::Output; fn index(&self, index: usize) -> &Self::Output { &self.ix()[index] } } -impl IndexMut for Dim -{ +impl IndexMut for Dim { fn index_mut(&mut self, index: usize) -> &mut Self::Output { &mut self.ixm()[index] } diff --git a/src/dimension/dynindeximpl.rs b/src/dimension/dynindeximpl.rs index 4af929c86..17e8d3f89 100644 --- a/src/dimension/dynindeximpl.rs +++ b/src/dimension/dynindeximpl.rs @@ -1,11 +1,5 @@ - -use std::ops::{ - Index, - IndexMut, - Deref, - DerefMut, -}; use crate::imp_prelude::*; +use std::ops::{Deref, DerefMut, Index, IndexMut}; const CAP: usize = 4; @@ -22,9 +16,7 @@ impl Deref for IxDynRepr { match *self { IxDynRepr::Inline(len, ref ar) => { debug_assert!(len as (usize) <= ar.len()); - unsafe { - ar.get_unchecked(..len as usize) - } + unsafe { ar.get_unchecked(..len as usize) } } IxDynRepr::Alloc(ref ar) => &*ar, } @@ -36,9 +28,7 @@ impl DerefMut for IxDynRepr { match *self { IxDynRepr::Inline(len, ref mut ar) => { debug_assert!(len as (usize) <= ar.len()); - unsafe { - ar.get_unchecked_mut(..len as usize) - } + unsafe { ar.get_unchecked_mut(..len as usize) } } IxDynRepr::Alloc(ref mut ar) => &mut *ar, } @@ -52,7 +42,6 @@ impl Default for IxDynRepr { } } - use num_traits::Zero; impl IxDynRepr { @@ -93,25 +82,24 @@ impl IxDynRepr { impl Clone for IxDynRepr { fn clone(&self) -> Self { match *self { - IxDynRepr::Inline(len, arr) => { - IxDynRepr::Inline(len, arr) - } - _ => Self::from(&self[..]) + IxDynRepr::Inline(len, arr) => IxDynRepr::Inline(len, arr), + _ => Self::from(&self[..]), } } } -impl Eq for IxDynRepr { } +impl Eq for IxDynRepr {} impl PartialEq for IxDynRepr { fn eq(&self, rhs: &Self) -> bool { match (self, rhs) { (&IxDynRepr::Inline(slen, ref sarr), &IxDynRepr::Inline(rlen, ref rarr)) => { - slen == rlen && - (0..CAP as usize).filter(|&i| i < slen as usize) + slen == rlen + && (0..CAP as usize) + .filter(|&i| i < slen as usize) .all(|i| sarr[i] == rarr[i]) } - _ => self[..] == rhs[..] + _ => self[..] == rhs[..], } } } @@ -130,20 +118,18 @@ impl IxDynImpl { pub(crate) fn insert(&self, i: usize) -> Self { let len = self.len(); debug_assert!(i <= len); - IxDynImpl( - if len < CAP { - let mut out = [1; CAP]; - out[0..i].copy_from_slice(&self[0..i]); - out[i+1..len+1].copy_from_slice(&self[i..len]); - IxDynRepr::Inline((len + 1) as u32, out) - } else { - let mut out = Vec::with_capacity(len + 1); - out.extend_from_slice(&self[0..i]); - out.push(1); - out.extend_from_slice(&self[i..len]); - IxDynRepr::from_vec(out) - } - ) + IxDynImpl(if len < CAP { + let mut out = [1; CAP]; + out[0..i].copy_from_slice(&self[0..i]); + out[i + 1..len + 1].copy_from_slice(&self[i..len]); + IxDynRepr::Inline((len + 1) as u32, out) + } else { + let mut out = Vec::with_capacity(len + 1); + out.extend_from_slice(&self[0..i]); + out.push(1); + out.extend_from_slice(&self[i..len]); + IxDynRepr::from_vec(out) + }) } fn remove(&self, i: usize) -> Self { @@ -182,7 +168,8 @@ impl From> for IxDynImpl { } impl Index for IxDynImpl - where [Ix]: Index, +where + [Ix]: Index, { type Output = <[Ix] as Index>::Output; fn index(&self, index: J) -> &Self::Output { @@ -191,7 +178,8 @@ impl Index for IxDynImpl } impl IndexMut for IxDynImpl - where [Ix]: IndexMut, +where + [Ix]: IndexMut, { fn index_mut(&mut self, index: J) -> &mut Self::Output { &mut self.0[index] diff --git a/src/dimension/macros.rs b/src/dimension/macros.rs index 9f5f681e7..9a576e347 100644 --- a/src/dimension/macros.rs +++ b/src/dimension/macros.rs @@ -1,9 +1,12 @@ - /// Indexing macro for Dim<[usize; N]> this /// gets the index at `$i` in the underlying array macro_rules! get { - ($dim:expr, $i:expr) => { (*$dim.ix())[$i] } + ($dim:expr, $i:expr) => { + (*$dim.ix())[$i] + }; } macro_rules! getm { - ($dim:expr, $i:expr) => { (*$dim.ixm())[$i] } + ($dim:expr, $i:expr) => { + (*$dim.ixm())[$i] + }; } diff --git a/src/dimension/mod.rs b/src/dimension/mod.rs index aebcfc662..cf770c5fb 100644 --- a/src/dimension/mod.rs +++ b/src/dimension/mod.rs @@ -6,24 +6,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{Ix, Ixs, Slice, SliceOrIndex}; use crate::error::{from_kind, ErrorKind, ShapeError}; +use crate::{Ix, Ixs, Slice, SliceOrIndex}; use itertools::izip; use num_integer::div_floor; -pub use self::dim::*; +pub use self::axes::{axes_of, Axes, AxisDescription}; pub use self::axis::Axis; pub use self::conversion::IntoDimension; +pub use self::dim::*; pub use self::dimension_trait::Dimension; +pub use self::dynindeximpl::IxDynImpl; pub use self::ndindex::NdIndex; pub use self::remove_axis::RemoveAxis; -pub use self::axes::{axes_of, Axes, AxisDescription}; -pub use self::dynindeximpl::IxDynImpl; use std::isize; use std::mem; -#[macro_use] mod macros; +#[macro_use] +mod macros; +mod axes; mod axis; mod conversion; pub mod dim; @@ -31,7 +33,6 @@ mod dimension_trait; mod dynindeximpl; mod ndindex; mod remove_axis; -mod axes; /// Calculate offset from `Ix` stride converting sign properly #[inline(always)] @@ -158,7 +159,8 @@ where // Calculate maximum possible absolute movement along this axis. let off = d.saturating_sub(1).checked_mul(s.abs() as usize)?; acc.checked_add(off) - }).ok_or_else(|| from_kind(ErrorKind::Overflow))?; + }) + .ok_or_else(|| from_kind(ErrorKind::Overflow))?; // Condition 2a. if max_offset > isize::MAX as usize { return Err(from_kind(ErrorKind::Overflow)); @@ -208,9 +210,11 @@ where /// condition 4 is sufficient to guarantee that the absolute difference in /// units of `A` and in units of bytes between the least address and greatest /// address accessible by moving along all axes does not exceed `isize::MAX`. -pub fn can_index_slice(data: &[A], dim: &D, strides: &D) - -> Result<(), ShapeError> -{ +pub fn can_index_slice( + data: &[A], + dim: &D, + strides: &D, +) -> Result<(), ShapeError> { // Check conditions 1 and 2 and calculate `max_offset`. let max_offset = max_abs_offset_check_overflow::(dim, strides)?; @@ -257,8 +261,8 @@ pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option /// Implementation-specific extensions to `Dimension` pub trait DimensionExt { -// note: many extensions go in the main trait if they need to be special- -// cased per dimension + // note: many extensions go in the main trait if they need to be special- + // cased per dimension /// Get the dimension at `axis`. /// /// *Panics* if `axis` is out of bounds. @@ -273,7 +277,8 @@ pub trait DimensionExt { } impl DimensionExt for D - where D: Dimension +where + D: Dimension, { #[inline] fn axis(&self, axis: Axis) -> Ix { @@ -286,8 +291,7 @@ impl DimensionExt for D } } -impl<'a> DimensionExt for [Ix] -{ +impl<'a> DimensionExt for [Ix] { #[inline] fn axis(&self, axis: Axis) -> Ix { self[axis.index()] @@ -312,10 +316,14 @@ pub fn do_collapse_axis( ) -> isize { let dim = dims.slice()[axis]; let stride = strides.slice()[axis]; - ndassert!(index < dim, - "collapse_axis: Index {} must be less than axis length {} for \ - array with shape {:?}", - index, dim, *dims); + ndassert!( + index < dim, + "collapse_axis: Index {} must be less than axis length {} for \ + array with shape {:?}", + index, + dim, + *dims + ); dims.slice_mut()[axis] = 1; stride_offset(index, stride) } @@ -567,8 +575,8 @@ pub fn slices_intersect( return false; } } - (SliceOrIndex::Slice { start, end, step }, SliceOrIndex::Index(ind)) | - (SliceOrIndex::Index(ind), SliceOrIndex::Slice { start, end, step }) => { + (SliceOrIndex::Slice { start, end, step }, SliceOrIndex::Index(ind)) + | (SliceOrIndex::Index(ind), SliceOrIndex::Slice { start, end, step }) => { let ind = abs_index(axis_len, ind); let (min, max) = match slice_min_max(axis_len, Slice::new(start, end, step)) { Some(m) => m, @@ -591,7 +599,8 @@ pub fn slices_intersect( } pub fn merge_axes(dim: &mut D, strides: &mut D, take: Axis, into: Axis) -> bool - where D: Dimension, +where + D: Dimension, { let into_len = dim.axis(into); let into_stride = strides.axis(into) as isize; @@ -616,18 +625,17 @@ pub fn merge_axes(dim: &mut D, strides: &mut D, take: Axis, into: Axis) -> bo } } - // NOTE: These tests are not compiled & tested #[cfg(test)] mod test { use super::{ arith_seq_intersect, can_index_slice, can_index_slice_not_custom, extended_gcd, max_abs_offset_check_overflow, slice_min_max, slices_intersect, - solve_linear_diophantine_eq, IntoDimension + solve_linear_diophantine_eq, IntoDimension, }; - use crate::{Dim, Dimension, Ix0, Ix1, Ix2, Ix3, IxDyn}; use crate::error::{from_kind, ErrorKind}; use crate::slice::Slice; + use crate::{Dim, Dimension, Ix0, Ix1, Ix2, Ix3, IxDyn}; use num_integer::gcd; use quickcheck::{quickcheck, TestResult}; @@ -639,8 +647,10 @@ mod test { assert!(super::can_index_slice(&v, &dim, &strides).is_ok()); let strides = (2, 4, 12).into_dimension(); - assert_eq!(super::can_index_slice(&v, &dim, &strides), - Err(from_kind(ErrorKind::OutOfBounds))); + assert_eq!( + super::can_index_slice(&v, &dim, &strides), + Err(from_kind(ErrorKind::OutOfBounds)) + ); } #[test] @@ -875,8 +885,14 @@ mod test { assert_eq!(slice_min_max(10, Slice::new(-8, Some(8), -3)), Some((4, 7))); assert_eq!(slice_min_max(10, Slice::new(1, Some(-2), -3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(2, Some(-2), -3)), Some((4, 7))); - assert_eq!(slice_min_max(10, Slice::new(-9, Some(-2), -3)), Some((1, 7))); - assert_eq!(slice_min_max(10, Slice::new(-8, Some(-2), -3)), Some((4, 7))); + assert_eq!( + slice_min_max(10, Slice::new(-9, Some(-2), -3)), + Some((1, 7)) + ); + assert_eq!( + slice_min_max(10, Slice::new(-8, Some(-2), -3)), + Some((4, 7)) + ); assert_eq!(slice_min_max(9, Slice::new(2, None, -3)), Some((2, 8))); assert_eq!(slice_min_max(9, Slice::new(-7, None, -3)), Some((2, 8))); assert_eq!(slice_min_max(9, Slice::new(3, None, -3)), Some((5, 8))); diff --git a/src/dimension/ndindex.rs b/src/dimension/ndindex.rs index c0a4f4370..c2d872ec4 100644 --- a/src/dimension/ndindex.rs +++ b/src/dimension/ndindex.rs @@ -1,10 +1,11 @@ - use std::fmt::Debug; use itertools::zip; -use crate::{Ix, Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6, IxDyn, IxDynImpl, Dim, Dimension, IntoDimension}; use super::{stride_offset, stride_offset_checked}; +use crate::{ + Dim, Dimension, IntoDimension, Ix, Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6, IxDyn, IxDynImpl, +}; /// Tuple or fixed size arrays that can be used to index an array. /// @@ -18,7 +19,7 @@ use super::{stride_offset, stride_offset_checked}; /// a[[1, 1]] += 1; /// assert_eq!(a[(1, 1)], 4); /// ``` -pub unsafe trait NdIndex : Debug { +pub unsafe trait NdIndex: Debug { #[doc(hidden)] fn index_checked(&self, dim: &E, strides: &E) -> Option; #[doc(hidden)] @@ -26,7 +27,8 @@ pub unsafe trait NdIndex : Debug { } unsafe impl NdIndex for D - where D: Dimension +where + D: Dimension, { fn index_checked(&self, dim: &D, strides: &D) -> Option { dim.stride_offset_checked(strides, self) @@ -54,8 +56,7 @@ unsafe impl NdIndex for (Ix, Ix) { } #[inline] fn index_unchecked(&self, strides: &Ix2) -> isize { - stride_offset(self.0, get!(strides, 0)) + - stride_offset(self.1, get!(strides, 1)) + stride_offset(self.0, get!(strides, 0)) + stride_offset(self.1, get!(strides, 1)) } } unsafe impl NdIndex for (Ix, Ix, Ix) { @@ -66,9 +67,9 @@ unsafe impl NdIndex for (Ix, Ix, Ix) { #[inline] fn index_unchecked(&self, strides: &Ix3) -> isize { - stride_offset(self.0, get!(strides, 0)) + - stride_offset(self.1, get!(strides, 1)) + - stride_offset(self.2, get!(strides, 2)) + stride_offset(self.0, get!(strides, 0)) + + stride_offset(self.1, get!(strides, 1)) + + stride_offset(self.2, get!(strides, 2)) } } @@ -79,7 +80,9 @@ unsafe impl NdIndex for (Ix, Ix, Ix, Ix) { } #[inline] fn index_unchecked(&self, strides: &Ix4) -> isize { - zip(strides.ix(), self.into_dimension().ix()).map(|(&s, &i)| stride_offset(i, s)).sum() + zip(strides.ix(), self.into_dimension().ix()) + .map(|(&s, &i)| stride_offset(i, s)) + .sum() } } unsafe impl NdIndex for (Ix, Ix, Ix, Ix, Ix) { @@ -89,7 +92,9 @@ unsafe impl NdIndex for (Ix, Ix, Ix, Ix, Ix) { } #[inline] fn index_unchecked(&self, strides: &Ix5) -> isize { - zip(strides.ix(), self.into_dimension().ix()).map(|(&s, &i)| stride_offset(i, s)).sum() + zip(strides.ix(), self.into_dimension().ix()) + .map(|(&s, &i)| stride_offset(i, s)) + .sum() } } @@ -183,7 +188,7 @@ macro_rules! ndindex_with_array { }; } -ndindex_with_array!{ +ndindex_with_array! { [0, Ix0] [1, Ix1 0] [2, Ix2 0 1] @@ -214,6 +219,8 @@ unsafe impl<'a> NdIndex for &'a [Ix] { stride_offset_checked(dim.ix(), strides.ix(), *self) } fn index_unchecked(&self, strides: &IxDyn) -> isize { - zip(strides.ix(), *self).map(|(&s, &i)| stride_offset(i, s)).sum() + zip(strides.ix(), *self) + .map(|(&s, &i)| stride_offset(i, s)) + .sum() } } diff --git a/src/dimension/remove_axis.rs b/src/dimension/remove_axis.rs index 67b77c6ce..1706895f7 100644 --- a/src/dimension/remove_axis.rs +++ b/src/dimension/remove_axis.rs @@ -6,14 +6,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -use crate::{Ix, Ix0, Ix1, Dimension, Dim, Axis}; +use crate::{Axis, Dim, Dimension, Ix, Ix0, Ix1}; /// Array shape with a next smaller dimension. /// /// `RemoveAxis` defines a larger-than relation for array shapes: /// removing one axis from *Self* gives smaller dimension *Smaller*. -pub trait RemoveAxis : Dimension { +pub trait RemoveAxis: Dimension { fn remove_axis(&self, axis: Axis) -> Self::Smaller; } @@ -30,7 +29,11 @@ impl RemoveAxis for Dim<[Ix; 2]> { fn remove_axis(&self, axis: Axis) -> Ix1 { let axis = axis.index(); debug_assert!(axis < self.ndim()); - if axis == 0 { Ix1(get!(self, 1)) } else { Ix1(get!(self, 0)) } + if axis == 0 { + Ix1(get!(self, 1)) + } else { + Ix1(get!(self, 0)) + } } } @@ -63,6 +66,3 @@ macro_rules! impl_remove_axis_array( ); impl_remove_axis_array!(3, 4, 5, 6); - - - diff --git a/src/doc/ndarray_for_numpy_users/mod.rs b/src/doc/ndarray_for_numpy_users/mod.rs index b268c9b24..b2ac70171 100644 --- a/src/doc/ndarray_for_numpy_users/mod.rs +++ b/src/doc/ndarray_for_numpy_users/mod.rs @@ -633,6 +633,6 @@ //! [::zeros()]: ../../struct.ArrayBase.html#method.zeros //! [Zip]: ../../struct.Zip.html -pub mod rk_step; pub mod coord_transform; +pub mod rk_step; pub mod simple_math; diff --git a/src/error.rs b/src/error.rs index 6b13a717d..8e5274889 100644 --- a/src/error.rs +++ b/src/error.rs @@ -5,11 +5,9 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::fmt; +use super::Dimension; use std::error::Error; -use super::{ - Dimension, -}; +use std::fmt; /// An error related to array shape or layout. #[derive(Clone)] @@ -55,9 +53,7 @@ pub enum ErrorKind { #[inline(always)] pub fn from_kind(k: ErrorKind) -> ShapeError { - ShapeError { - repr: k - } + ShapeError { repr: k } } impl PartialEq for ErrorKind { @@ -101,8 +97,9 @@ impl fmt::Debug for ShapeError { } pub fn incompatible_shapes(_a: &D, _b: &E) -> ShapeError - where D: Dimension, - E: Dimension +where + D: Dimension, + E: Dimension, { from_kind(ErrorKind::IncompatibleShape) } diff --git a/src/free_functions.rs b/src/free_functions.rs index f289cdffe..89951e90d 100644 --- a/src/free_functions.rs +++ b/src/free_functions.rs @@ -6,8 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::mem::{forget, size_of}; use std::slice; -use std::mem::{size_of, forget}; use crate::dimension; use crate::imp_prelude::*; @@ -53,8 +53,7 @@ macro_rules! array { } /// Create a zero-dimensional array with the element `x`. -pub fn arr0(x: A) -> Array0 -{ +pub fn arr0(x: A) -> Array0 { unsafe { ArrayBase::from_shape_vec_unchecked((), vec![x]) } } @@ -93,7 +92,7 @@ pub fn aview1(xs: &[A]) -> ArrayView1 { /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. (This /// can only occur when `V` is zero-sized.) -pub fn aview2>(xs: &[V]) -> ArrayView2 { +pub fn aview2>(xs: &[V]) -> ArrayView2 { let cols = V::len(); let rows = xs.len(); let dim = Ix2(rows, cols); @@ -159,7 +158,7 @@ pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1 { /// assert_eq!(&data[..3], [[1., -1.], [1., -1.], [1., -1.]]); /// } /// ``` -pub fn aview_mut2>(xs: &mut [V]) -> ArrayViewMut2 { +pub fn aview_mut2>(xs: &mut [V]) -> ArrayViewMut2 { let cols = V::len(); let rows = xs.len(); let dim = Ix2(rows, cols); @@ -221,7 +220,8 @@ impl_arr_init!(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,); /// ); /// ``` pub fn arr2>(xs: &[V]) -> Array2 - where V: Clone, +where + V: Clone, { Array2::from(xs.to_vec()) } @@ -233,7 +233,8 @@ impl From> for Array1 { } impl From> for Array2 - where V: FixedInitializer +where + V: FixedInitializer, { /// Converts the `Vec` of arrays to an owned 2-D array. /// @@ -262,8 +263,9 @@ impl From> for Array2 } impl From> for Array3 - where V: FixedInitializer, - U: FixedInitializer +where + V: FixedInitializer, + U: FixedInitializer, { /// Converts the `Vec` of arrays to an owned 3-D array. /// @@ -314,18 +316,23 @@ pub fn rcarr2>(xs: &[V]) -> ArcA /// a.shape() == [3, 2, 2] /// ); /// ``` -pub fn arr3, U: FixedInitializer>(xs: &[V]) - -> Array3 - where V: Clone, - U: Clone, +pub fn arr3, U: FixedInitializer>( + xs: &[V], +) -> Array3 +where + V: Clone, + U: Clone, { Array3::from(xs.to_vec()) } /// Create a three-dimensional array with elements from `xs`. -pub fn rcarr3, U: FixedInitializer>(xs: &[V]) - -> ArcArray - where V: Clone, U: Clone, +pub fn rcarr3, U: FixedInitializer>( + xs: &[V], +) -> ArcArray +where + V: Clone, + U: Clone, { arr3(xs).into_shared() } diff --git a/src/impl_1d.rs b/src/impl_1d.rs index 8e4e11ae4..73d5e837e 100644 --- a/src/impl_1d.rs +++ b/src/impl_1d.rs @@ -6,13 +6,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - //! Methods for one-dimensional arrays. use crate::imp_prelude::*; /// # Methods For 1-D Arrays impl ArrayBase - where S: RawData, +where + S: RawData, { /// Return an vector with the elements of the one-dimensional array. pub fn to_vec(&self) -> Vec @@ -27,4 +27,3 @@ impl ArrayBase } } } - diff --git a/src/impl_2d.rs b/src/impl_2d.rs index 477b65ef0..7731098aa 100644 --- a/src/impl_2d.rs +++ b/src/impl_2d.rs @@ -6,13 +6,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - //! Methods for two-dimensional arrays. use crate::imp_prelude::*; /// # Methods For 2-D Arrays impl ArrayBase - where S: RawData, +where + S: RawData, { /// Return an array view of row `index`. /// @@ -28,7 +28,8 @@ impl ArrayBase /// /// **Panics** if `index` is out of bounds. pub fn row_mut(&mut self, index: Ix) -> ArrayViewMut1 - where S: DataMut + where + S: DataMut, { self.index_axis_mut(Axis(0), index) } @@ -52,7 +53,8 @@ impl ArrayBase /// /// **Panics** if `index` is out of bounds. pub fn column_mut(&mut self, index: Ix) -> ArrayViewMut1 - where S: DataMut + where + S: DataMut, { self.index_axis_mut(Axis(1), index) } @@ -67,4 +69,3 @@ impl ArrayBase self.rows() == self.cols() } } - diff --git a/src/impl_constructors.rs b/src/impl_constructors.rs index 1c2b84439..e0ee07650 100644 --- a/src/impl_constructors.rs +++ b/src/impl_constructors.rs @@ -10,18 +10,18 @@ //! //! -use num_traits::{Zero, One, Float}; +use num_traits::{Float, One, Zero}; use std::isize; use std::mem; -use crate::imp_prelude::*; -use crate::StrideShape; use crate::dimension; -use crate::linspace; use crate::error::{self, ShapeError}; -use crate::indices; +use crate::imp_prelude::*; use crate::indexes; +use crate::indices; use crate::iterators::{to_vec, to_vec_mapped}; +use crate::linspace; +use crate::StrideShape; /// # Constructor Methods for Owned Arrays /// @@ -30,7 +30,8 @@ use crate::iterators::{to_vec, to_vec_mapped}; /// /// ## Constructor methods for one-dimensional arrays. impl ArrayBase - where S: DataOwned, +where + S: DataOwned, { /// Create a one-dimensional array from a vector (no copying needed). /// @@ -62,7 +63,8 @@ impl ArrayBase /// assert!(array == arr1(&[0, 1, 4, 9, 16])) /// ``` pub fn from_iter(iterable: I) -> Self - where I: IntoIterator + where + I: IntoIterator, { Self::from_vec(iterable.into_iter().collect()) } @@ -79,7 +81,8 @@ impl ArrayBase /// assert!(array == arr1(&[0.0, 0.25, 0.5, 0.75, 1.0])) /// ``` pub fn linspace(start: A, end: A, n: usize) -> Self - where A: Float, + where + A: Float, { Self::from_vec(to_vec(linspace::linspace(start, end, n))) } @@ -97,7 +100,8 @@ impl ArrayBase /// assert!(array == arr1(&[0., 1., 2., 3., 4.])) /// ``` pub fn range(start: A, end: A, step: A) -> Self - where A: Float, + where + A: Float, { Self::from_vec(to_vec(linspace::range(start, end, step))) } @@ -105,14 +109,16 @@ impl ArrayBase /// ## Constructor methods for two-dimensional arrays. impl ArrayBase - where S: DataOwned, +where + S: DataOwned, { /// Create an identity matrix of size `n` (square 2D array). /// /// **Panics** if `n * n` would overflow `isize`. pub fn eye(n: Ix) -> Self - where S: DataMut, - A: Clone + Zero + One, + where + S: DataMut, + A: Clone + Zero + One, { let mut eye = Self::zeros((n, n)); for a_ii in eye.diag_mut() { @@ -127,9 +133,11 @@ macro_rules! size_of_shape_checked_unwrap { ($dim:expr) => { match dimension::size_of_shape_checked($dim) { Ok(sz) => sz, - Err(_) => panic!("ndarray: Shape too large, product of non-zero axis lengths overflows isize"), + Err(_) => { + panic!("ndarray: Shape too large, product of non-zero axis lengths overflows isize") + } } - } + }; } #[cfg(debug_assertions)] @@ -143,7 +151,7 @@ macro_rules! size_of_shape_checked_unwrap { $dim ), } - } + }; } /// ## Constructor methods for n-dimensional arrays. @@ -163,8 +171,9 @@ macro_rules! size_of_shape_checked_unwrap { /// `Into` argument *optionally* support custom strides, for /// example a shape given like `(10, 2, 2).strides((1, 10, 20))` is valid. impl ArrayBase - where S: DataOwned, - D: Dimension, +where + S: DataOwned, + D: Dimension, { /// Create an array with copies of `elem`, shape `shape`. /// @@ -187,8 +196,9 @@ impl ArrayBase /// assert!(b.strides() == &[1, 2, 4]); /// ``` pub fn from_elem(shape: Sh, elem: A) -> Self - where A: Clone, - Sh: ShapeBuilder, + where + A: Clone, + Sh: ShapeBuilder, { let shape = shape.into_shape(); let size = size_of_shape_checked_unwrap!(&shape.dim); @@ -200,8 +210,9 @@ impl ArrayBase /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. pub fn zeros(shape: Sh) -> Self - where A: Clone + Zero, - Sh: ShapeBuilder, + where + A: Clone + Zero, + Sh: ShapeBuilder, { Self::from_elem(shape, A::zero()) } @@ -210,8 +221,9 @@ impl ArrayBase /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. pub fn ones(shape: Sh) -> Self - where A: Clone + One, - Sh: ShapeBuilder, + where + A: Clone + One, + Sh: ShapeBuilder, { Self::from_elem(shape, A::one()) } @@ -220,8 +232,9 @@ impl ArrayBase /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. pub fn default(shape: Sh) -> Self - where A: Default, - Sh: ShapeBuilder, + where + A: Default, + Sh: ShapeBuilder, { let shape = shape.into_shape(); let size = size_of_shape_checked_unwrap!(&shape.dim); @@ -236,8 +249,9 @@ impl ArrayBase /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. pub fn from_shape_fn(shape: Sh, f: F) -> Self - where Sh: ShapeBuilder, - F: FnMut(D::Pattern) -> A, + where + Sh: ShapeBuilder, + F: FnMut(D::Pattern) -> A, { let shape = shape.into_shape(); let _ = size_of_shape_checked_unwrap!(&shape.dim); @@ -285,14 +299,14 @@ impl ArrayBase /// ); /// ``` pub fn from_shape_vec(shape: Sh, v: Vec) -> Result - where Sh: Into>, + where + Sh: Into>, { // eliminate the type parameter Sh as soon as possible Self::from_shape_vec_impl(shape.into(), v) } - fn from_shape_vec_impl(shape: StrideShape, v: Vec) -> Result - { + fn from_shape_vec_impl(shape: StrideShape, v: Vec) -> Result { let dim = shape.dim; let strides = shape.strides; if shape.custom { @@ -328,22 +342,21 @@ impl ArrayBase /// 5. The strides must not allow any element to be referenced by two different /// indices. pub unsafe fn from_shape_vec_unchecked(shape: Sh, v: Vec) -> Self - where Sh: Into>, + where + Sh: Into>, { let shape = shape.into(); Self::from_vec_dim_stride_unchecked(shape.dim, shape.strides, v) } - unsafe fn from_vec_dim_stride_unchecked(dim: D, strides: D, mut v: Vec) - -> Self - { + unsafe fn from_vec_dim_stride_unchecked(dim: D, strides: D, mut v: Vec) -> Self { // debug check for issues that indicates wrong use of this constructor debug_assert!(dimension::can_index_slice(&v, &dim, &strides).is_ok()); ArrayBase { ptr: v.as_mut_ptr(), data: DataOwned::new(v), strides: strides, - dim: dim + dim: dim, } } @@ -393,8 +406,9 @@ impl ArrayBase /// # } /// ``` pub unsafe fn uninitialized(shape: Sh) -> Self - where A: Copy, - Sh: ShapeBuilder, + where + A: Copy, + Sh: ShapeBuilder, { let shape = shape.into_shape(); let size = size_of_shape_checked_unwrap!(&shape.dim); @@ -402,5 +416,4 @@ impl ArrayBase v.set_len(size); Self::from_shape_vec_unchecked(shape, v) } - } diff --git a/src/impl_methods.rs b/src/impl_methods.rs index de74af795..ea5dd80d1 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -16,33 +16,19 @@ use crate::imp_prelude::*; use crate::arraytraits; use crate::dimension; -use crate::error::{self, ShapeError, ErrorKind}; use crate::dimension::IntoDimension; -use crate::dimension::{abs_index, axes_of, Axes, do_slice, merge_axes, size_of_shape_checked, stride_offset}; +use crate::dimension::{ + abs_index, axes_of, do_slice, merge_axes, size_of_shape_checked, stride_offset, Axes, +}; +use crate::error::{self, ErrorKind, ShapeError}; use crate::zip::Zip; -use crate::{ - NdIndex, - Slice, - SliceInfo, - SliceOrIndex -}; use crate::iter::{ - AxisChunksIter, - AxisChunksIterMut, - Iter, - IterMut, - IndexedIter, - IndexedIterMut, - Lanes, - LanesMut, - AxisIter, - AxisIterMut, - ExactChunks, - ExactChunksMut, - Windows + AxisChunksIter, AxisChunksIterMut, AxisIter, AxisIterMut, ExactChunks, ExactChunksMut, + IndexedIter, IndexedIterMut, Iter, IterMut, Lanes, LanesMut, Windows, }; use crate::stacking::stack; +use crate::{NdIndex, Slice, SliceInfo, SliceOrIndex}; /// # Methods For All Array Types impl ArrayBase @@ -96,9 +82,7 @@ where pub fn strides(&self) -> &[Ixs] { let s = self.strides.slice(); // reinterpret unsigned integer as signed - unsafe { - slice::from_raw_parts(s.as_ptr() as *const _, s.len()) - } + unsafe { slice::from_raw_parts(s.as_ptr() as *const _, s.len()) } } /// Return the stride of `axis`. @@ -118,19 +102,16 @@ where S: Data, { debug_assert!(self.pointer_is_inbounds()); - unsafe { - ArrayView::new_(self.ptr, self.dim.clone(), self.strides.clone()) - } + unsafe { ArrayView::new_(self.ptr, self.dim.clone(), self.strides.clone()) } } /// Return a read-write view of the array pub fn view_mut(&mut self) -> ArrayViewMut - where S: DataMut, + where + S: DataMut, { self.ensure_unique(); - unsafe { - ArrayViewMut::new_(self.ptr, self.dim.clone(), self.strides.clone()) - } + unsafe { ArrayViewMut::new_(self.ptr, self.dim.clone(), self.strides.clone()) } } /// Return an uniquely owned copy of the array. @@ -170,9 +151,10 @@ where { if let Some(slc) = self.as_slice_memory_order() { unsafe { - Array::from_shape_vec_unchecked(self.dim.clone() - .strides(self.strides.clone()), - slc.to_vec()) + Array::from_shape_vec_unchecked( + self.dim.clone().strides(self.strides.clone()), + slc.to_vec(), + ) } } else { self.map(|x| x.clone()) @@ -202,7 +184,8 @@ where /// Turn the array into a shared ownership (copy on write) array, /// without any copying. pub fn into_shared(self) -> ArcArray - where S: DataOwned, + where + S: DataOwned, { let data = self.data.into_shared(); ArrayBase { @@ -260,7 +243,8 @@ where /// /// Iterator element type is `&mut A`. pub fn iter_mut(&mut self) -> IterMut - where S: DataMut, + where + S: DataMut, { self.view_mut().into_iter_() } @@ -287,12 +271,12 @@ where /// /// Iterator element type is `(D::Pattern, &mut A)`. pub fn indexed_iter_mut(&mut self) -> IndexedIterMut - where S: DataMut, + where + S: DataMut, { IndexedIterMut::new(self.view_mut().into_elements_base()) } - /// Return a sliced view of the array. /// /// See [*Slicing*](#slicing) for full documentation. @@ -354,7 +338,7 @@ where let mut new_strides = Do::zeros(out_ndim); izip!(self.dim.slice(), self.strides.slice(), indices) .filter_map(|(d, s, slice_or_index)| match slice_or_index { - &SliceOrIndex::Slice {..} => Some((d, s)), + &SliceOrIndex::Slice { .. } => Some((d, s)), &SliceOrIndex::Index(_) => None, }) .zip(izip!(new_dim.slice_mut(), new_strides.slice_mut())) @@ -406,7 +390,7 @@ where /// /// **Panics** if an index is out of bounds or step size is zero.
/// (**Panics** if `D` is `IxDyn` and `indices` does not match the number of array axes.) - #[deprecated(note="renamed to `slice_collapse`", since="0.12.1")] + #[deprecated(note = "renamed to `slice_collapse`", since = "0.12.1")] pub fn slice_inplace(&mut self, indices: &D::SliceArg) { self.slice_collapse(indices) } @@ -476,39 +460,40 @@ where I: NdIndex, S: Data, { - unsafe { - self.get_ptr(index).map(|ptr| &*ptr) - } + unsafe { self.get_ptr(index).map(|ptr| &*ptr) } } pub(crate) fn get_ptr(&self, index: I) -> Option<*const A> - where I: NdIndex, + where + I: NdIndex, { let ptr = self.ptr; - index.index_checked(&self.dim, &self.strides) - .map(move |offset| unsafe { ptr.offset(offset) as *const _ }) + index + .index_checked(&self.dim, &self.strides) + .map(move |offset| unsafe { ptr.offset(offset) as *const _ }) } /// Return a mutable reference to the element at `index`, or return `None` /// if the index is out of bounds. pub fn get_mut(&mut self, index: I) -> Option<&mut A> - where S: DataMut, - I: NdIndex, + where + S: DataMut, + I: NdIndex, { - unsafe { - self.get_ptr_mut(index).map(|ptr| &mut *ptr) - } + unsafe { self.get_ptr_mut(index).map(|ptr| &mut *ptr) } } pub(crate) fn get_ptr_mut(&mut self, index: I) -> Option<*mut A> - where S: RawDataMut, - I: NdIndex, + where + S: RawDataMut, + I: NdIndex, { // const and mut are separate to enforce &mutness as well as the // extra code in as_mut_ptr let ptr = self.as_mut_ptr(); - index.index_checked(&self.dim, &self.strides) - .map(move |offset| unsafe { ptr.offset(offset) }) + index + .index_checked(&self.dim, &self.strides) + .map(move |offset| unsafe { ptr.offset(offset) }) } /// Perform *unchecked* array indexing. @@ -535,8 +520,9 @@ where /// **Note:** (For `ArcArray`) The array must be uniquely held when mutating it. #[inline] pub unsafe fn uget_mut(&mut self, index: I) -> &mut A - where S: DataMut, - I: NdIndex, + where + S: DataMut, + I: NdIndex, { debug_assert!(self.data.is_unique()); arraytraits::debug_bounds_check(self, &index); @@ -550,8 +536,9 @@ where /// /// ***Panics*** if an index is out of bounds. pub fn swap(&mut self, index1: I, index2: I) - where S: DataMut, - I: NdIndex, + where + S: DataMut, + I: NdIndex, { let ptr1: *mut _ = &mut self[index1]; let ptr2: *mut _ = &mut self[index2]; @@ -567,8 +554,9 @@ where /// **Note:** only unchecked for non-debug builds of ndarray.
/// **Note:** (For `ArcArray`) The array must be uniquely held. pub unsafe fn uswap(&mut self, index1: I, index2: I) - where S: DataMut, - I: NdIndex, + where + S: DataMut, + I: NdIndex, { debug_assert!(self.data.is_unique()); arraytraits::debug_bounds_check(self, &index1); @@ -585,9 +573,7 @@ where S: Data, { assert!(self.ndim() == 0); - unsafe { - &*self.as_ptr() - } + unsafe { &*self.as_ptr() } } /// Returns a view restricted to `index` along the axis, with the axis @@ -684,7 +670,7 @@ where /// view with that axis removed. /// /// **Panics** if `axis` or `index` is out of bounds. - #[deprecated(note="renamed to `index_axis`", since="0.12.1")] + #[deprecated(note = "renamed to `index_axis`", since = "0.12.1")] pub fn subview(&self, axis: Axis, index: Ix) -> ArrayView where S: Data, @@ -697,11 +683,11 @@ where /// with the axis removed. /// /// **Panics** if `axis` or `index` is out of bounds. - #[deprecated(note="renamed to `index_axis_mut`", since="0.12.1")] - pub fn subview_mut(&mut self, axis: Axis, index: Ix) - -> ArrayViewMut - where S: DataMut, - D: RemoveAxis, + #[deprecated(note = "renamed to `index_axis_mut`", since = "0.12.1")] + pub fn subview_mut(&mut self, axis: Axis, index: Ix) -> ArrayViewMut + where + S: DataMut, + D: RemoveAxis, { self.index_axis_mut(axis, index) } @@ -710,16 +696,17 @@ where /// and select the subview of `index` along that axis. /// /// **Panics** if `index` is past the length of the axis. - #[deprecated(note="renamed to `collapse_axis`", since="0.12.1")] + #[deprecated(note = "renamed to `collapse_axis`", since = "0.12.1")] pub fn subview_inplace(&mut self, axis: Axis, index: Ix) { self.collapse_axis(axis, index) } /// Along `axis`, select the subview `index` and return `self` /// with that axis removed. - #[deprecated(note="renamed to `index_axis_move`", since="0.12.1")] + #[deprecated(note = "renamed to `index_axis_move`", since = "0.12.1")] pub fn into_subview(self, axis: Axis, index: Ix) -> ArrayBase - where D: RemoveAxis, + where + D: RemoveAxis, { self.index_axis_move(axis, index) } @@ -758,9 +745,7 @@ where if subs.is_empty() { let mut dim = self.raw_dim(); dim.set_axis(axis, 0); - unsafe { - Array::from_shape_vec_unchecked(dim, vec![]) - } + unsafe { Array::from_shape_vec_unchecked(dim, vec![]) } } else { stack(axis, &subs).unwrap() } @@ -797,7 +782,9 @@ where S: Data, { let mut n = self.ndim(); - if n == 0 { n += 1; } + if n == 0 { + n += 1; + } Lanes::new(self.view(), Axis(n - 1)) } @@ -806,10 +793,13 @@ where /// /// Iterator element is `ArrayView1
` (1D read-write array view). pub fn genrows_mut(&mut self) -> LanesMut - where S: DataMut + where + S: DataMut, { let mut n = self.ndim(); - if n == 0 { n += 1; } + if n == 0 { + n += 1; + } LanesMut::new(self.view_mut(), Axis(n - 1)) } @@ -851,7 +841,8 @@ where /// /// Iterator element is `ArrayView1` (1D read-write array view). pub fn gencolumns_mut(&mut self) -> LanesMut - where S: DataMut + where + S: DataMut, { LanesMut::new(self.view_mut(), Axis(0)) } @@ -896,12 +887,12 @@ where /// /// Iterator element is `ArrayViewMut1` (1D read-write array view). pub fn lanes_mut(&mut self, axis: Axis) -> LanesMut - where S: DataMut + where + S: DataMut, { LanesMut::new(self.view_mut(), axis) } - /// Return an iterator that traverses over the outermost dimension /// and yields each subview. /// @@ -925,8 +916,9 @@ where /// Iterator element is `ArrayViewMut` (read-write array view). #[allow(deprecated)] pub fn outer_iter_mut(&mut self) -> AxisIterMut - where S: DataMut, - D: RemoveAxis, + where + S: DataMut, + D: RemoveAxis, { self.view_mut().into_outer_iter() } @@ -954,7 +946,6 @@ where AxisIter::new(self.view(), axis) } - /// Return an iterator that traverses over `axis` /// and yields each mutable subview along it. /// @@ -963,13 +954,13 @@ where /// /// **Panics** if `axis` is out of bounds. pub fn axis_iter_mut(&mut self, axis: Axis) -> AxisIterMut - where S: DataMut, - D: RemoveAxis, + where + S: DataMut, + D: RemoveAxis, { AxisIterMut::new(self.view_mut(), axis) } - /// Return an iterator that traverses over `axis` by chunks of `size`, /// yielding non-overlapping views along that axis. /// @@ -1009,9 +1000,9 @@ where /// Iterator element is `ArrayViewMut` /// /// **Panics** if `axis` is out of bounds. - pub fn axis_chunks_iter_mut(&mut self, axis: Axis, size: usize) - -> AxisChunksIterMut - where S: DataMut + pub fn axis_chunks_iter_mut(&mut self, axis: Axis, size: usize) -> AxisChunksIterMut + where + S: DataMut, { AxisChunksIterMut::new(self.view_mut(), axis, size) } @@ -1029,7 +1020,7 @@ where /// number of array axes.) pub fn exact_chunks(&self, chunk_size: E) -> ExactChunks where - E: IntoDimension, + E: IntoDimension, S: Data, { ExactChunks::new(self.view(), chunk_size) @@ -1068,8 +1059,9 @@ where /// [6, 6, 7, 7, 8, 8, 0]])); /// ``` pub fn exact_chunks_mut(&mut self, chunk_size: E) -> ExactChunksMut - where E: IntoDimension, - S: DataMut + where + E: IntoDimension, + S: DataMut, { ExactChunksMut::new(self.view_mut(), chunk_size) } @@ -1090,7 +1082,7 @@ where /// number of array axes.) pub fn windows(&self, window_size: E) -> Windows where - E: IntoDimension, + E: IntoDimension, S: Data, { Windows::new(self.view(), window_size) @@ -1100,9 +1092,7 @@ where fn diag_params(&self) -> (Ix, Ixs) { /* empty shape has len 1 */ let len = self.dim.slice().iter().cloned().min().unwrap_or(1); - let stride = self.strides() - .iter() - .fold(0, |sum, s| sum + s); + let stride = self.strides().iter().fold(0, |sum, s| sum + s); (len, stride) } @@ -1119,7 +1109,8 @@ where /// Return a read-write view over the diagonal elements of the array. pub fn diag_mut(&mut self) -> ArrayViewMut1 - where S: DataMut, + where + S: DataMut, { self.view_mut().into_diag() } @@ -1141,7 +1132,8 @@ where /// /// This method is mostly only useful with unsafe code. fn try_ensure_unique(&mut self) - where S: RawDataMut + where + S: RawDataMut, { debug_assert!(self.pointer_is_inbounds()); S::try_ensure_unique(self); @@ -1152,7 +1144,8 @@ where /// /// This method is mostly only useful with unsafe code. fn ensure_unique(&mut self) - where S: DataMut + where + S: DataMut, { debug_assert!(self.pointer_is_inbounds()); S::ensure_unique(self); @@ -1168,7 +1161,7 @@ where fn is_standard_layout(dim: &D, strides: &D) -> bool { match D::NDIM { Some(1) => return strides[0] == 1 || dim[0] <= 1, - _ => { } + _ => {} } if dim.slice().iter().any(|&d| d == 0) { return true; @@ -1206,7 +1199,8 @@ where /// Return a mutable pointer to the first element in the array. #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut A - where S: RawDataMut + where + S: RawDataMut, { self.try_ensure_unique(); // for RcArray self.ptr @@ -1221,7 +1215,8 @@ where /// Return a raw mutable view of the array. #[inline] pub fn raw_view_mut(&mut self) -> RawArrayViewMut - where S: RawDataMut + where + S: RawDataMut, { self.try_ensure_unique(); // for RcArray unsafe { RawArrayViewMut::new_(self.ptr, self.dim.clone(), self.strides.clone()) } @@ -1237,9 +1232,7 @@ where S: Data, { if self.is_standard_layout() { - unsafe { - Some(slice::from_raw_parts(self.ptr, self.len())) - } + unsafe { Some(slice::from_raw_parts(self.ptr, self.len())) } } else { None } @@ -1248,13 +1241,12 @@ where /// Return the array’s data as a slice, if it is contiguous and in standard order. /// Return `None` otherwise. pub fn as_slice_mut(&mut self) -> Option<&mut [A]> - where S: DataMut + where + S: DataMut, { if self.is_standard_layout() { self.ensure_unique(); - unsafe { - Some(slice::from_raw_parts_mut(self.ptr, self.len())) - } + unsafe { Some(slice::from_raw_parts_mut(self.ptr, self.len())) } } else { None } @@ -1272,9 +1264,7 @@ where S: Data, { if self.is_contiguous() { - unsafe { - Some(slice::from_raw_parts(self.ptr, self.len())) - } + unsafe { Some(slice::from_raw_parts(self.ptr, self.len())) } } else { None } @@ -1283,13 +1273,12 @@ where /// Return the array’s data as a slice if it is contiguous, /// return `None` otherwise. pub fn as_slice_memory_order_mut(&mut self) -> Option<&mut [A]> - where S: DataMut + where + S: DataMut, { if self.is_contiguous() { self.ensure_unique(); - unsafe { - Some(slice::from_raw_parts_mut(self.ptr, self.len())) - } + unsafe { Some(slice::from_raw_parts_mut(self.ptr, self.len())) } } else { None } @@ -1312,7 +1301,8 @@ where /// ); /// ``` pub fn into_shape(self, shape: E) -> Result, ShapeError> - where E: IntoDimension, + where + E: IntoDimension, { let shape = shape.into_dimension(); if size_of_shape_checked(&shape) != Ok(self.dim.size()) { @@ -1359,15 +1349,18 @@ where /// ); /// ``` pub fn reshape(&self, shape: E) -> ArrayBase - where S: DataShared + DataOwned, - A: Clone, - E: IntoDimension, + where + S: DataShared + DataOwned, + A: Clone, + E: IntoDimension, { let shape = shape.into_dimension(); if size_of_shape_checked(&shape) != Ok(self.dim.size()) { - panic!("ndarray: incompatible shapes in reshape, attempted from: {:?}, to: {:?}", - self.dim.slice(), - shape.slice()) + panic!( + "ndarray: incompatible shapes in reshape, attempted from: {:?}, to: {:?}", + self.dim.slice(), + shape.slice() + ) } // Check if contiguous, if not => copy all, else just adapt strides if self.is_standard_layout() { @@ -1380,9 +1373,7 @@ where } } else { let v = self.iter().map(|x| x.clone()).collect::>(); - unsafe { - ArrayBase::from_shape_vec_unchecked(shape, v) - } + unsafe { ArrayBase::from_shape_vec_unchecked(shape, v) } } } @@ -1418,7 +1409,8 @@ where /// assert!(array.into_dimensionality::().is_ok()); /// ``` pub fn into_dimensionality(self) -> Result, ShapeError> - where D2: Dimension + where + D2: Dimension, { if let Some(dim) = D2::from_dimension(&self.dim) { if let Some(strides) = D2::from_dimension(&self.strides) { @@ -1485,9 +1477,12 @@ where { let mut new_stride_iter = new_stride.slice_mut().iter_mut().rev(); - for ((er, es), dr) in from.slice().iter().rev() - .zip(stride.slice().iter().rev()) - .zip(new_stride_iter.by_ref()) + for ((er, es), dr) in from + .slice() + .iter() + .rev() + .zip(stride.slice().iter().rev()) + .zip(new_stride_iter.by_ref()) { /* update strides */ if *dr == *er { @@ -1706,10 +1701,14 @@ where /// ``` /// /// ***Panics*** if the axis is out of bounds. - pub fn insert_axis(self, axis: Axis) -> ArrayBase - { + pub fn insert_axis(self, axis: Axis) -> ArrayBase { assert!(axis.index() <= self.ndim()); - let ArrayBase { ptr, data, dim, strides } = self; + let ArrayBase { + ptr, + data, + dim, + strides, + } = self; ArrayBase { ptr, data, @@ -1721,9 +1720,10 @@ where /// Remove array axis `axis` and return the result. /// /// **Panics** if the axis is out of bounds or its length is zero. - #[deprecated(note="use `.index_axis_move(Axis(_), 0)` instead", since="0.12.1")] + #[deprecated(note = "use `.index_axis_move(Axis(_), 0)` instead", since = "0.12.1")] pub fn remove_axis(self, axis: Axis) -> ArrayBase - where D: RemoveAxis, + where + D: RemoveAxis, { self.index_axis_move(axis, 0) } @@ -1736,9 +1736,7 @@ where } Some(slc) => { let ptr = slc.as_ptr() as *mut A; - let end = unsafe { - ptr.offset(slc.len() as isize) - }; + let end = unsafe { ptr.offset(slc.len() as isize) }; self.ptr >= ptr && self.ptr <= end } } @@ -1750,25 +1748,29 @@ where /// /// **Panics** if broadcasting isn’t possible. pub fn assign(&mut self, rhs: &ArrayBase) - where S: DataMut, - A: Clone, - S2: Data, + where + S: DataMut, + A: Clone, + S2: Data, { self.zip_mut_with(rhs, |x, y| *x = y.clone()); } /// Perform an elementwise assigment to `self` from element `x`. pub fn fill(&mut self, x: A) - where S: DataMut, A: Clone, + where + S: DataMut, + A: Clone, { self.unordered_foreach_mut(move |elt| *elt = x.clone()); } fn zip_mut_with_same_shape(&mut self, rhs: &ArrayBase, mut f: F) - where S: DataMut, - S2: Data, - E: Dimension, - F: FnMut(&mut A, &B) + where + S: DataMut, + S2: Data, + E: Dimension, + F: FnMut(&mut A, &B), { debug_assert_eq!(self.shape(), rhs.shape()); if let Some(self_s) = self.as_slice_mut() { @@ -1789,10 +1791,11 @@ where // zip two arrays where they have different layout or strides #[inline(always)] fn zip_mut_with_by_rows(&mut self, rhs: &ArrayBase, mut f: F) - where S: DataMut, - S2: Data, - E: Dimension, - F: FnMut(&mut A, &B) + where + S: DataMut, + S2: Data, + E: Dimension, + F: FnMut(&mut A, &B), { debug_assert_eq!(self.shape(), rhs.shape()); debug_assert_ne!(self.ndim(), 0); @@ -1802,15 +1805,13 @@ where let dim = self.raw_dim(); Zip::from(LanesMut::new(self.view_mut(), Axis(n - 1))) .and(Lanes::new(rhs.broadcast_assume(dim), Axis(n - 1))) - .apply(move |s_row, r_row| { - Zip::from(s_row).and(r_row).apply(|a, b| f(a, b)) - }); + .apply(move |s_row, r_row| Zip::from(s_row).and(r_row).apply(|a, b| f(a, b))); } - fn zip_mut_with_elem(&mut self, rhs_elem: &B, mut f: F) - where S: DataMut, - F: FnMut(&mut A, &B) + where + S: DataMut, + F: FnMut(&mut A, &B), { self.unordered_foreach_mut(move |elt| f(elt, rhs_elem)); } @@ -1823,10 +1824,11 @@ where /// **Panics** if broadcasting isn’t possible. #[inline] pub fn zip_mut_with(&mut self, rhs: &ArrayBase, f: F) - where S: DataMut, - S2: Data, - E: Dimension, - F: FnMut(&mut A, &B) + where + S: DataMut, + S2: Data, + E: Dimension, + F: FnMut(&mut A, &B), { if rhs.dim.ndim() == 0 { // Skip broadcast from 0-dim array @@ -1897,21 +1899,22 @@ where /// ); /// ``` pub fn map<'a, B, F>(&'a self, f: F) -> Array - where F: FnMut(&'a A) -> B, - A: 'a, - S: Data, + where + F: FnMut(&'a A) -> B, + A: 'a, + S: Data, { if let Some(slc) = self.as_slice_memory_order() { let v = crate::iterators::to_vec_mapped(slc.iter(), f); unsafe { ArrayBase::from_shape_vec_unchecked( - self.dim.clone().strides(self.strides.clone()), v) + self.dim.clone().strides(self.strides.clone()), + v, + ) } } else { let v = crate::iterators::to_vec_mapped(self.iter(), f); - unsafe { - ArrayBase::from_shape_vec_unchecked(self.dim.clone(), v) - } + unsafe { ArrayBase::from_shape_vec_unchecked(self.dim.clone(), v) } } } @@ -1922,24 +1925,20 @@ where /// /// Return an array with the same shape as `self`. pub fn map_mut<'a, B, F>(&'a mut self, f: F) -> Array - where F: FnMut(&'a mut A) -> B, - A: 'a, - S: DataMut + where + F: FnMut(&'a mut A) -> B, + A: 'a, + S: DataMut, { let dim = self.dim.clone(); if self.is_contiguous() { let strides = self.strides.clone(); let slc = self.as_slice_memory_order_mut().unwrap(); let v = crate::iterators::to_vec_mapped(slc.iter_mut(), f); - unsafe { - ArrayBase::from_shape_vec_unchecked( - dim.strides(strides), v) - } + unsafe { ArrayBase::from_shape_vec_unchecked(dim.strides(strides), v) } } else { let v = crate::iterators::to_vec_mapped(self.iter_mut(), f); - unsafe { - ArrayBase::from_shape_vec_unchecked(dim, v) - } + unsafe { ArrayBase::from_shape_vec_unchecked(dim, v) } } } @@ -1961,9 +1960,10 @@ where /// ); /// ``` pub fn mapv(&self, mut f: F) -> Array - where F: FnMut(A) -> B, - A: Clone, - S: Data, + where + F: FnMut(A) -> B, + A: Clone, + S: Data, { self.map(move |x| f(x.clone())) } @@ -1973,9 +1973,10 @@ where /// /// Elements are visited in arbitrary order. pub fn mapv_into(mut self, f: F) -> Self - where S: DataMut, - F: FnMut(A) -> A, - A: Clone, + where + S: DataMut, + F: FnMut(A) -> A, + A: Clone, { self.mapv_inplace(f); self @@ -1985,8 +1986,9 @@ where /// /// Elements are visited in arbitrary order. pub fn map_inplace(&mut self, f: F) - where S: DataMut, - F: FnMut(&mut A), + where + S: DataMut, + F: FnMut(&mut A), { self.unordered_foreach_mut(f); } @@ -2008,9 +2010,10 @@ where /// ); /// ``` pub fn mapv_inplace(&mut self, mut f: F) - where S: DataMut, - F: FnMut(A) -> A, - A: Clone, + where + S: DataMut, + F: FnMut(A) -> A, + A: Clone, { self.unordered_foreach_mut(move |x| *x = f(x.clone())); } @@ -2020,9 +2023,10 @@ where /// /// Elements are visited in arbitrary order. pub fn visit<'a, F>(&'a self, mut f: F) - where F: FnMut(&'a A), - A: 'a, - S: Data, + where + F: FnMut(&'a A), + A: 'a, + S: Data, { self.fold((), move |(), elt| f(elt)) } @@ -2035,12 +2039,12 @@ where /// Return the result as an `Array`. /// /// **Panics** if `axis` is out of bounds. - pub fn fold_axis(&self, axis: Axis, init: B, mut fold: F) - -> Array - where D: RemoveAxis, - F: FnMut(&B, &A) -> B, - B: Clone, - S: Data, + pub fn fold_axis(&self, axis: Axis, init: B, mut fold: F) -> Array + where + D: RemoveAxis, + F: FnMut(&B, &A) -> B, + B: Clone, + S: Data, { let mut res = Array::from_elem(self.raw_dim().remove_axis(axis), init); for subview in self.axis_iter(axis) { @@ -2057,21 +2061,19 @@ where /// Return the result as an `Array`. /// /// **Panics** if `axis` is out of bounds. - pub fn map_axis<'a, B, F>(&'a self, axis: Axis, mut mapping: F) - -> Array - where D: RemoveAxis, - F: FnMut(ArrayView1<'a, A>) -> B, - A: 'a, - S: Data, + pub fn map_axis<'a, B, F>(&'a self, axis: Axis, mut mapping: F) -> Array + where + D: RemoveAxis, + F: FnMut(ArrayView1<'a, A>) -> B, + A: 'a, + S: Data, { let view_len = self.len_of(axis); let view_stride = self.strides.axis(axis); // use the 0th subview as a map to each 1d array view extended from // the 0th element. - self.index_axis(axis, 0).map(|first_elt| { - unsafe { - mapping(ArrayView::new_(first_elt, Ix1(view_len), Ix1(view_stride))) - } + self.index_axis(axis, 0).map(|first_elt| unsafe { + mapping(ArrayView::new_(first_elt, Ix1(view_len), Ix1(view_stride))) }) } @@ -2085,21 +2087,24 @@ where /// Return the result as an `Array`. /// /// **Panics** if `axis` is out of bounds. - pub fn map_axis_mut<'a, B, F>(&'a mut self, axis: Axis, mut mapping: F) - -> Array - where D: RemoveAxis, - F: FnMut(ArrayViewMut1<'a, A>) -> B, - A: 'a, - S: DataMut, + pub fn map_axis_mut<'a, B, F>(&'a mut self, axis: Axis, mut mapping: F) -> Array + where + D: RemoveAxis, + F: FnMut(ArrayViewMut1<'a, A>) -> B, + A: 'a, + S: DataMut, { let view_len = self.len_of(axis); let view_stride = self.strides.axis(axis); // use the 0th subview as a map to each 1d array view extended from // the 0th element. - self.index_axis_mut(axis, 0).map_mut(|first_elt: &mut A| { - unsafe { - mapping(ArrayViewMut::new_(first_elt, Ix1(view_len), Ix1(view_stride))) - } - }) + self.index_axis_mut(axis, 0) + .map_mut(|first_elt: &mut A| unsafe { + mapping(ArrayViewMut::new_( + first_elt, + Ix1(view_len), + Ix1(view_stride), + )) + }) } } diff --git a/src/impl_ops.rs b/src/impl_ops.rs index 137d0016d..096734144 100644 --- a/src/impl_ops.rs +++ b/src/impl_ops.rs @@ -29,24 +29,24 @@ use num_complex::Complex; /// This trait ***does not*** limit which elements can be stored in an array in general. /// Non-`ScalarOperand` types can still participate in arithmetic as array elements in /// in array-array operations. -pub trait ScalarOperand : 'static + Clone { } -impl ScalarOperand for bool { } -impl ScalarOperand for i8 { } -impl ScalarOperand for u8 { } -impl ScalarOperand for i16 { } -impl ScalarOperand for u16 { } -impl ScalarOperand for i32 { } -impl ScalarOperand for u32 { } -impl ScalarOperand for i64 { } -impl ScalarOperand for u64 { } -impl ScalarOperand for i128 { } -impl ScalarOperand for u128 { } -impl ScalarOperand for isize { } -impl ScalarOperand for usize { } -impl ScalarOperand for f32 { } -impl ScalarOperand for f64 { } -impl ScalarOperand for Complex { } -impl ScalarOperand for Complex { } +pub trait ScalarOperand: 'static + Clone {} +impl ScalarOperand for bool {} +impl ScalarOperand for i8 {} +impl ScalarOperand for u8 {} +impl ScalarOperand for i16 {} +impl ScalarOperand for u16 {} +impl ScalarOperand for i32 {} +impl ScalarOperand for u32 {} +impl ScalarOperand for i64 {} +impl ScalarOperand for u64 {} +impl ScalarOperand for i128 {} +impl ScalarOperand for u128 {} +impl ScalarOperand for isize {} +impl ScalarOperand for usize {} +impl ScalarOperand for f32 {} +impl ScalarOperand for f64 {} +impl ScalarOperand for Complex {} +impl ScalarOperand for Complex {} macro_rules! impl_binary_op( ($trt:ident, $operator:tt, $mth:ident, $iop:tt, $doc:expr) => ( @@ -162,8 +162,12 @@ impl<'a, A, S, D, B> $trt for &'a ArrayBase // Pick the expression $a for commutative and $b for ordered binop macro_rules! if_commutative { - (Commute { $a:expr } or { $b:expr }) => ($a); - (Ordered { $a:expr } or { $b:expr }) => ($b); + (Commute { $a:expr } or { $b:expr }) => { + $a + }; + (Ordered { $a:expr } or { $b:expr }) => { + $b + }; } macro_rules! impl_scalar_lhs_op { @@ -211,13 +215,12 @@ impl<'a, S, D> $trt<&'a ArrayBase> for $scalar ); } - mod arithmetic_ops { use super::*; use crate::imp_prelude::*; - use std::ops::*; use num_complex::Complex; + use std::ops::*; impl_binary_op!(Add, +, add, +=, "addition"); impl_binary_op!(Sub, -, sub, -=, "subtraction"); @@ -282,9 +285,10 @@ mod arithmetic_ops { impl_scalar_lhs_op!(Complex, Ordered, /, Div, div, "division"); impl Neg for ArrayBase - where A: Clone + Neg, - S: DataOwned + DataMut, - D: Dimension + where + A: Clone + Neg, + S: DataOwned + DataMut, + D: Dimension, { type Output = Self; /// Perform an elementwise negation of `self` and return the result. @@ -297,9 +301,10 @@ mod arithmetic_ops { } impl<'a, A, S, D> Neg for &'a ArrayBase - where &'a A: 'a + Neg, - S: Data, - D: Dimension + where + &'a A: 'a + Neg, + S: Data, + D: Dimension, { type Output = Array; /// Perform an elementwise negation of reference `self` and return the @@ -310,9 +315,10 @@ mod arithmetic_ops { } impl Not for ArrayBase - where A: Clone + Not, - S: DataOwned + DataMut, - D: Dimension + where + A: Clone + Not, + S: DataOwned + DataMut, + D: Dimension, { type Output = Self; /// Perform an elementwise unary not of `self` and return the result. @@ -325,9 +331,10 @@ mod arithmetic_ops { } impl<'a, A, S, D> Not for &'a ArrayBase - where &'a A: 'a + Not, - S: Data, - D: Dimension + where + &'a A: 'a + Not, + S: Data, + D: Dimension, { type Output = Array; /// Perform an elementwise unary not of reference `self` and return the @@ -344,60 +351,91 @@ mod assign_ops { macro_rules! impl_assign_op { ($trt:ident, $method:ident, $doc:expr) => { - use std::ops::$trt; + use std::ops::$trt; - #[doc=$doc] - /// If their shapes disagree, `rhs` is broadcast to the shape of `self`. - /// - /// **Panics** if broadcasting isn’t possible. - impl<'a, A, S, S2, D, E> $trt<&'a ArrayBase> for ArrayBase - where A: Clone + $trt, - S: DataMut, - S2: Data, - D: Dimension, - E: Dimension, - { - fn $method(&mut self, rhs: &ArrayBase) { - self.zip_mut_with(rhs, |x, y| { - x.$method(y.clone()); - }); - } - } - - #[doc=$doc] - impl $trt for ArrayBase - where A: ScalarOperand + $trt, - S: DataMut, - D: Dimension, - { - fn $method(&mut self, rhs: A) { - self.unordered_foreach_mut(move |elt| { - elt.$method(rhs.clone()); - }); - } - } + #[doc=$doc] + /// If their shapes disagree, `rhs` is broadcast to the shape of `self`. + /// + /// **Panics** if broadcasting isn’t possible. + impl<'a, A, S, S2, D, E> $trt<&'a ArrayBase> for ArrayBase + where + A: Clone + $trt, + S: DataMut, + S2: Data, + D: Dimension, + E: Dimension, + { + fn $method(&mut self, rhs: &ArrayBase) { + self.zip_mut_with(rhs, |x, y| { + x.$method(y.clone()); + }); + } + } + #[doc=$doc] + impl $trt for ArrayBase + where + A: ScalarOperand + $trt, + S: DataMut, + D: Dimension, + { + fn $method(&mut self, rhs: A) { + self.unordered_foreach_mut(move |elt| { + elt.$method(rhs.clone()); + }); + } + } }; } - impl_assign_op!(AddAssign, add_assign, - "Perform `self += rhs` as elementwise addition (in place).\n"); - impl_assign_op!(SubAssign, sub_assign, - "Perform `self -= rhs` as elementwise subtraction (in place).\n"); - impl_assign_op!(MulAssign, mul_assign, - "Perform `self *= rhs` as elementwise multiplication (in place).\n"); - impl_assign_op!(DivAssign, div_assign, - "Perform `self /= rhs` as elementwise division (in place).\n"); - impl_assign_op!(RemAssign, rem_assign, - "Perform `self %= rhs` as elementwise remainder (in place).\n"); - impl_assign_op!(BitAndAssign, bitand_assign, - "Perform `self &= rhs` as elementwise bit and (in place).\n"); - impl_assign_op!(BitOrAssign, bitor_assign, - "Perform `self |= rhs` as elementwise bit or (in place).\n"); - impl_assign_op!(BitXorAssign, bitxor_assign, - "Perform `self ^= rhs` as elementwise bit xor (in place).\n"); - impl_assign_op!(ShlAssign, shl_assign, - "Perform `self <<= rhs` as elementwise left shift (in place).\n"); - impl_assign_op!(ShrAssign, shr_assign, - "Perform `self >>= rhs` as elementwise right shift (in place).\n"); + impl_assign_op!( + AddAssign, + add_assign, + "Perform `self += rhs` as elementwise addition (in place).\n" + ); + impl_assign_op!( + SubAssign, + sub_assign, + "Perform `self -= rhs` as elementwise subtraction (in place).\n" + ); + impl_assign_op!( + MulAssign, + mul_assign, + "Perform `self *= rhs` as elementwise multiplication (in place).\n" + ); + impl_assign_op!( + DivAssign, + div_assign, + "Perform `self /= rhs` as elementwise division (in place).\n" + ); + impl_assign_op!( + RemAssign, + rem_assign, + "Perform `self %= rhs` as elementwise remainder (in place).\n" + ); + impl_assign_op!( + BitAndAssign, + bitand_assign, + "Perform `self &= rhs` as elementwise bit and (in place).\n" + ); + impl_assign_op!( + BitOrAssign, + bitor_assign, + "Perform `self |= rhs` as elementwise bit or (in place).\n" + ); + impl_assign_op!( + BitXorAssign, + bitxor_assign, + "Perform `self ^= rhs` as elementwise bit xor (in place).\n" + ); + impl_assign_op!( + ShlAssign, + shl_assign, + "Perform `self <<= rhs` as elementwise left shift (in place).\n" + ); + impl_assign_op!( + ShrAssign, + shr_assign, + "Perform `self >>= rhs` as elementwise right shift (in place).\n" + ); } diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index 7cb64e1ad..35086ffcf 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -45,7 +45,8 @@ impl Array { /// /// [`ArrayBase`]: struct.ArrayBase.html impl Array - where D: Dimension +where + D: Dimension, { /// Return a vector of the elements in the array, in the way they are /// stored internally. diff --git a/src/impl_views.rs b/src/impl_views.rs index e4ac909db..98e324418 100644 --- a/src/impl_views.rs +++ b/src/impl_views.rs @@ -8,25 +8,20 @@ use std::slice; -use crate::imp_prelude::*; +use crate::arraytraits::array_out_of_bounds; use crate::dimension; use crate::error::ShapeError; -use crate::arraytraits::array_out_of_bounds; +use crate::imp_prelude::*; use crate::{is_aligned, NdIndex, StrideShape}; -use crate::{ - ElementsBase, - ElementsBaseMut, - Iter, - IterMut, - Baseiter, -}; +use crate::{Baseiter, ElementsBase, ElementsBaseMut, Iter, IterMut}; use crate::iter::{self, AxisIter, AxisIterMut}; /// Methods for read-only array views. impl<'a, A, D> ArrayView<'a, A, D> - where D: Dimension, +where + D: Dimension, { /// Create a read-only array view borrowing its data from a slice. /// @@ -52,9 +47,9 @@ impl<'a, A, D> ArrayView<'a, A, D> /// ); /// assert!(a.strides() == &[1, 4, 2]); /// ``` - pub fn from_shape(shape: Sh, xs: &'a [A]) - -> Result - where Sh: Into>, + pub fn from_shape(shape: Sh, xs: &'a [A]) -> Result + where + Sh: Into>, { // eliminate the type parameter Sh as soon as possible Self::from_shape_impl(shape.into(), xs) @@ -109,7 +104,8 @@ impl<'a, A, D> ArrayView<'a, A, D> /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *const A) -> Self - where Sh: Into> + where + Sh: Into>, { RawArrayView::from_shape_ptr(shape, ptr).deref_into_view() } @@ -117,11 +113,10 @@ impl<'a, A, D> ArrayView<'a, A, D> /// Convert the view into an `ArrayView<'b, A, D>` where `'b` is a lifetime /// outlived by `'a'`. pub fn reborrow<'b>(self) -> ArrayView<'b, A, D> - where 'a: 'b + where + 'a: 'b, { - unsafe { - ArrayView::new_(self.as_ptr(), self.dim, self.strides) - } + unsafe { ArrayView::new_(self.as_ptr(), self.dim, self.strides) } } /// Split the array view along `axis` and return one view strictly before the @@ -144,9 +139,7 @@ impl<'a, A, D> ArrayView<'a, A, D> /// Return `None` otherwise. pub fn into_slice(&self) -> Option<&'a [A]> { if self.is_standard_layout() { - unsafe { - Some(slice::from_raw_parts(self.ptr, self.len())) - } + unsafe { Some(slice::from_raw_parts(self.ptr, self.len())) } } else { None } @@ -158,7 +151,6 @@ impl<'a, A, D> ArrayView<'a, A, D> } } - /// Extra indexing methods for array views /// /// These methods are very similar to regular indexing or calling of the @@ -243,8 +235,9 @@ pub trait IndexLonger { } impl<'a, 'b, I, A, D> IndexLonger for &'b ArrayView<'a, A, D> - where I: NdIndex, - D: Dimension, +where + I: NdIndex, + D: Dimension, { type Output = &'a A; @@ -260,19 +253,13 @@ impl<'a, 'b, I, A, D> IndexLonger for &'b ArrayView<'a, A, D> /// [1]: struct.ArrayBase.html#method.get /// /// **Panics** if index is out of bounds. - fn index(self, index: I) -> &'a A - { + fn index(self, index: I) -> &'a A { debug_bounds_check!(self, index); - unsafe { - &*self.get_ptr(index).unwrap_or_else(|| array_out_of_bounds()) - } + unsafe { &*self.get_ptr(index).unwrap_or_else(|| array_out_of_bounds()) } } - fn get(self, index: I) -> Option<&'a A> - { - unsafe { - self.get_ptr(index).map(|ptr| &*ptr) - } + fn get(self, index: I) -> Option<&'a A> { + unsafe { self.get_ptr(index).map(|ptr| &*ptr) } } /// Get a reference of a element through the view without boundary check @@ -286,8 +273,7 @@ impl<'a, 'b, I, A, D> IndexLonger for &'b ArrayView<'a, A, D> /// [1]: struct.ArrayBase.html#method.uget /// /// **Note:** only unchecked for non-debug builds of ndarray. - unsafe fn uget(self, index: I) -> &'a A - { + unsafe fn uget(self, index: I) -> &'a A { debug_bounds_check!(self, index); &*self.as_ptr().offset(index.index_unchecked(&self.strides)) } @@ -295,7 +281,8 @@ impl<'a, 'b, I, A, D> IndexLonger for &'b ArrayView<'a, A, D> /// Methods for read-write array views. impl<'a, A, D> ArrayViewMut<'a, A, D> - where D: Dimension, +where + D: Dimension, { /// Create a read-write array view borrowing its data from a slice. /// @@ -322,9 +309,9 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> /// ); /// assert!(a.strides() == &[1, 4, 2]); /// ``` - pub fn from_shape(shape: Sh, xs: &'a mut [A]) - -> Result - where Sh: Into>, + pub fn from_shape(shape: Sh, xs: &'a mut [A]) -> Result + where + Sh: Into>, { // eliminate the type parameter Sh as soon as possible Self::from_shape_impl(shape.into(), xs) @@ -379,7 +366,8 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *mut A) -> Self - where Sh: Into> + where + Sh: Into>, { RawArrayViewMut::from_shape_ptr(shape, ptr).deref_into_view_mut() } @@ -387,11 +375,10 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> /// Convert the view into an `ArrayViewMut<'b, A, D>` where `'b` is a lifetime /// outlived by `'a'`. pub fn reborrow<'b>(mut self) -> ArrayViewMut<'b, A, D> - where 'a: 'b + where + 'a: 'b, { - unsafe { - ArrayViewMut::new_(self.as_mut_ptr(), self.dim, self.strides) - } + unsafe { ArrayViewMut::new_(self.as_mut_ptr(), self.dim, self.strides) } } /// Split the array view along `axis` and return one mutable view strictly @@ -410,12 +397,12 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> pub fn into_slice(self) -> Option<&'a mut [A]> { self.into_slice_().ok() } - } impl<'a, I, A, D> IndexLonger for ArrayViewMut<'a, A, D> - where I: NdIndex, - D: Dimension, +where + I: NdIndex, + D: Dimension, { type Output = &'a mut A; @@ -470,13 +457,16 @@ impl<'a, I, A, D> IndexLonger for ArrayViewMut<'a, A, D> /// **Note:** only unchecked for non-debug builds of ndarray. unsafe fn uget(mut self, index: I) -> &'a mut A { debug_bounds_check!(self, index); - &mut *self.as_mut_ptr().offset(index.index_unchecked(&self.strides)) + &mut *self + .as_mut_ptr() + .offset(index.index_unchecked(&self.strides)) } } /// Private array view methods impl<'a, A, D> ArrayView<'a, A, D> - where D: Dimension, +where + D: Dimension, { /// Create a new `ArrayView` /// @@ -493,9 +483,7 @@ impl<'a, A, D> ArrayView<'a, A, D> #[inline] pub(crate) fn into_base_iter(self) -> Baseiter { - unsafe { - Baseiter::new(self.ptr, self.dim, self.strides) - } + unsafe { Baseiter::new(self.ptr, self.dim, self.strides) } } #[inline] @@ -509,17 +497,18 @@ impl<'a, A, D> ArrayView<'a, A, D> /// Return an outer iterator for this view. #[doc(hidden)] // not official - #[deprecated(note="This method will be replaced.")] + #[deprecated(note = "This method will be replaced.")] pub fn into_outer_iter(self) -> iter::AxisIter<'a, A, D::Smaller> - where D: RemoveAxis, + where + D: RemoveAxis, { AxisIter::new(self, Axis(0)) } - } impl<'a, A, D> ArrayViewMut<'a, A, D> - where D: Dimension, +where + D: Dimension, { /// Create a new `ArrayView` /// @@ -541,9 +530,7 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> // Convert into a read-only view pub(crate) fn into_view(self) -> ArrayView<'a, A, D> { - unsafe { - ArrayView::new_(self.ptr, self.dim, self.strides) - } + unsafe { ArrayView::new_(self.ptr, self.dim, self.strides) } } /// Converts to a mutable raw array view. @@ -553,9 +540,7 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> #[inline] pub(crate) fn into_base_iter(self) -> Baseiter { - unsafe { - Baseiter::new(self.ptr, self.dim, self.strides) - } + unsafe { Baseiter::new(self.ptr, self.dim, self.strides) } } #[inline] @@ -565,9 +550,7 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> pub(crate) fn into_slice_(self) -> Result<&'a mut [A], Self> { if self.is_standard_layout() { - unsafe { - Ok(slice::from_raw_parts_mut(self.ptr, self.len())) - } + unsafe { Ok(slice::from_raw_parts_mut(self.ptr, self.len())) } } else { Err(self) } @@ -579,11 +562,11 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> /// Return an outer iterator for this view. #[doc(hidden)] // not official - #[deprecated(note="This method will be replaced.")] + #[deprecated(note = "This method will be replaced.")] pub fn into_outer_iter(self) -> iter::AxisIterMut<'a, A, D::Smaller> - where D: RemoveAxis, + where + D: RemoveAxis, { AxisIterMut::new(self, Axis(0)) } } - diff --git a/src/indexes.rs b/src/indexes.rs index 3c83ad510..ee5e5e9e5 100644 --- a/src/indexes.rs +++ b/src/indexes.rs @@ -5,13 +5,13 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{ArrayBase, Data}; use super::Dimension; use crate::dimension::IntoDimension; +use crate::zip::{Offset, Splittable}; use crate::Axis; use crate::Layout; use crate::NdProducer; -use crate::zip::{Offset, Splittable}; +use crate::{ArrayBase, Data}; /// An iterator over the indexes of an array shape. /// @@ -27,7 +27,8 @@ pub struct IndicesIter { /// *Note:* prefer higher order methods, arithmetic operations and /// non-indexed iteration before using indices. pub fn indices(shape: E) -> Indices - where E: IntoDimension, +where + E: IntoDimension, { let dim = shape.into_dimension(); Indices { @@ -41,13 +42,16 @@ pub fn indices(shape: E) -> Indices /// *Note:* prefer higher order methods, arithmetic operations and /// non-indexed iteration before using indices. pub fn indices_of(array: &ArrayBase) -> Indices - where S: Data, D: Dimension, +where + S: Data, + D: Dimension, { indices(array.dim()) } impl Iterator for IndicesIter - where D: Dimension, +where + D: Dimension, { type Item = D::Pattern; #[inline] @@ -64,12 +68,13 @@ impl Iterator for IndicesIter let l = match self.index { None => 0, Some(ref ix) => { - let gone = self.dim - .default_strides() - .slice() - .iter() - .zip(ix.slice().iter()) - .fold(0, |s, (&a, &b)| s + a as usize * b as usize); + let gone = self + .dim + .default_strides() + .slice() + .iter() + .zip(ix.slice().iter()) + .fold(0, |s, (&a, &b)| s + a as usize * b as usize); self.dim.size() - gone } }; @@ -77,12 +82,11 @@ impl Iterator for IndicesIter } } -impl ExactSizeIterator for IndicesIter - where D: Dimension -{} +impl ExactSizeIterator for IndicesIter where D: Dimension {} impl IntoIterator for Indices - where D: Dimension +where + D: Dimension, { type Item = D::Pattern; type IntoIter = IndicesIter; @@ -101,7 +105,8 @@ impl IntoIterator for Indices /// `Indices` is an `NdProducer` that produces the indices of an array shape. #[derive(Copy, Clone, Debug)] pub struct Indices - where D: Dimension +where + D: Dimension, { start: D, dim: D, @@ -113,7 +118,8 @@ pub struct IndexPtr { } impl Offset for IndexPtr - where D: Dimension + Copy, +where + D: Dimension + Copy, { // stride: The axis to increment type Stride = usize; @@ -122,7 +128,7 @@ impl Offset for IndexPtr self.index[stride] += index; self } - private_impl!{} + private_impl! {} } impl NdProducer for Indices { @@ -131,7 +137,7 @@ impl NdProducer for Indices { type Ptr = IndexPtr; type Stride = usize; - private_impl!{} + private_impl! {} #[doc(hidden)] fn raw_dim(&self) -> Self::Dim { @@ -145,9 +151,7 @@ impl NdProducer for Indices { #[doc(hidden)] fn as_ptr(&self) -> Self::Ptr { - IndexPtr { - index: self.start, - } + IndexPtr { index: self.start } } #[doc(hidden)] @@ -177,7 +181,9 @@ impl NdProducer for Indices { } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { 0 } + fn contiguous_stride(&self) -> Self::Stride { + 0 + } #[doc(hidden)] fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { @@ -185,14 +191,16 @@ impl NdProducer for Indices { let mut start_b = start_a; let (a, b) = self.dim.split_at(axis, index); start_b[axis.index()] += index; - (Indices { - start: start_a, - dim: a, - }, - Indices { - start: start_b, - dim: b, - }) + ( + Indices { + start: start_a, + dim: a, + }, + Indices { + start: start_b, + dim: b, + }, + ) } } @@ -207,7 +215,8 @@ pub struct IndicesIterF { } pub fn indices_iter_f(shape: E) -> IndicesIterF - where E: IntoDimension, +where + E: IntoDimension, { let dim = shape.into_dimension(); let zero = E::Dim::zeros(dim.ndim()); @@ -219,7 +228,8 @@ pub fn indices_iter_f(shape: E) -> IndicesIterF } impl Iterator for IndicesIterF - where D: Dimension, +where + D: Dimension, { type Item = D::Pattern; #[inline] @@ -239,12 +249,13 @@ impl Iterator for IndicesIterF } let l = match self.index { ref ix => { - let gone = self.dim - .fortran_strides() - .slice() - .iter() - .zip(ix.slice().iter()) - .fold(0, |s, (&a, &b)| s + a as usize * b as usize); + let gone = self + .dim + .fortran_strides() + .slice() + .iter() + .zip(ix.slice().iter()) + .fold(0, |s, (&a, &b)| s + a as usize * b as usize); self.dim.size() - gone } }; @@ -252,10 +263,7 @@ impl Iterator for IndicesIterF } } -impl ExactSizeIterator for IndicesIterF - where D: Dimension -{} - +impl ExactSizeIterator for IndicesIterF where D: Dimension {} #[cfg(test)] mod tests { diff --git a/src/iterators/chunks.rs b/src/iterators/chunks.rs index a56914e3d..19c7529be 100644 --- a/src/iterators/chunks.rs +++ b/src/iterators/chunks.rs @@ -1,9 +1,8 @@ - use crate::imp_prelude::*; -use crate::IntoDimension; -use crate::{NdProducer, Layout}; use crate::ElementsBase; use crate::ElementsBaseMut; +use crate::IntoDimension; +use crate::{Layout, NdProducer}; impl_ndproducer! { ['a, A, D: Dimension] @@ -47,10 +46,16 @@ impl<'a, A, D: Dimension> ExactChunks<'a, A, D> { E: IntoDimension, { let chunk = chunk.into_dimension(); - ndassert!(a.ndim() == chunk.ndim(), - concat!("Chunk dimension {} does not match array dimension {} ", - "(with array of shape {:?})"), - chunk.ndim(), a.ndim(), a.shape()); + ndassert!( + a.ndim() == chunk.ndim(), + concat!( + "Chunk dimension {} does not match array dimension {} ", + "(with array of shape {:?})" + ), + chunk.ndim(), + a.ndim(), + a.shape() + ); for i in 0..a.ndim() { a.dim[i] /= chunk[i]; } @@ -66,8 +71,9 @@ impl<'a, A, D: Dimension> ExactChunks<'a, A, D> { } impl<'a, A, D> IntoIterator for ExactChunks<'a, A, D> - where D: Dimension, - A: 'a, +where + D: Dimension, + A: 'a, { type Item = ::Item; type IntoIter = ExactChunksIter<'a, A, D>; @@ -130,10 +136,16 @@ impl<'a, A, D: Dimension> ExactChunksMut<'a, A, D> { E: IntoDimension, { let chunk = chunk.into_dimension(); - ndassert!(a.ndim() == chunk.ndim(), - concat!("Chunk dimension {} does not match array dimension {} ", - "(with array of shape {:?})"), - chunk.ndim(), a.ndim(), a.shape()); + ndassert!( + a.ndim() == chunk.ndim(), + concat!( + "Chunk dimension {} does not match array dimension {} ", + "(with array of shape {:?})" + ), + chunk.ndim(), + a.ndim(), + a.shape() + ); for i in 0..a.ndim() { a.dim[i] /= chunk[i]; } @@ -149,8 +161,9 @@ impl<'a, A, D: Dimension> ExactChunksMut<'a, A, D> { } impl<'a, A, D> IntoIterator for ExactChunksMut<'a, A, D> - where D: Dimension, - A: 'a, +where + D: Dimension, + A: 'a, { type Item = ::Item; type IntoIter = ExactChunksIterMut<'a, A, D>; @@ -163,8 +176,7 @@ impl<'a, A, D> IntoIterator for ExactChunksMut<'a, A, D> } } - -impl_iterator!{ +impl_iterator! { ['a, A, D: Dimension] [Clone => 'a, A, D: Clone] ExactChunksIter { @@ -186,7 +198,7 @@ impl_iterator!{ } } -impl_iterator!{ +impl_iterator! { ['a, A, D: Dimension] [Clone => ] ExactChunksIterMut { diff --git a/src/iterators/iter.rs b/src/iterators/iter.rs index 6a4563bd9..7352e5e18 100644 --- a/src/iterators/iter.rs +++ b/src/iterators/iter.rs @@ -1,4 +1,3 @@ - //! Producers, iterables and iterators. //! //! This module collects all concrete producer, iterable and iterator @@ -7,30 +6,10 @@ //! //! See also [`NdProducer`](../trait.NdProducer.html). - -pub use crate::dimension::{ - Axes, -}; -pub use crate::indexes::{ - Indices, - IndicesIter, -}; +pub use crate::dimension::Axes; +pub use crate::indexes::{Indices, IndicesIter}; pub use crate::iterators::{ - Iter, - IterMut, - IndexedIter, - IndexedIterMut, - Lanes, - LanesMut, - LanesIter, - LanesIterMut, - AxisIter, - AxisIterMut, - AxisChunksIter, - AxisChunksIterMut, - ExactChunks, - ExactChunksIter, - ExactChunksMut, - ExactChunksIterMut, - Windows + AxisChunksIter, AxisChunksIterMut, AxisIter, AxisIterMut, ExactChunks, ExactChunksIter, + ExactChunksIterMut, ExactChunksMut, IndexedIter, IndexedIterMut, Iter, IterMut, Lanes, + LanesIter, LanesIterMut, LanesMut, Windows, }; diff --git a/src/iterators/lanes.rs b/src/iterators/lanes.rs index 34d25a323..b847b43db 100644 --- a/src/iterators/lanes.rs +++ b/src/iterators/lanes.rs @@ -1,9 +1,9 @@ use std::marker::PhantomData; -use crate::imp_prelude::*; -use crate::{NdProducer, Layout}; use super::LanesIter; use super::LanesIterMut; +use crate::imp_prelude::*; +use crate::{Layout, NdProducer}; impl_ndproducer! { ['a, A, D: Dimension] @@ -77,7 +77,8 @@ impl_ndproducer! { } impl<'a, A, D> IntoIterator for Lanes<'a, A, D> - where D: Dimension, +where + D: Dimension, { type Item = ::Item; type IntoIter = LanesIter<'a, A, D>; @@ -127,7 +128,8 @@ impl<'a, A, D: Dimension> LanesMut<'a, A, D> { } impl<'a, A, D> IntoIterator for LanesMut<'a, A, D> - where D: Dimension, +where + D: Dimension, { type Item = ::Item; type IntoIter = LanesIterMut<'a, A, D>; diff --git a/src/iterators/macros.rs b/src/iterators/macros.rs index bf54f3567..d3a54453e 100644 --- a/src/iterators/macros.rs +++ b/src/iterators/macros.rs @@ -1,21 +1,40 @@ - // Send and Sync // All the iterators are thread safe the same way the slice's iterator are // read-only iterators use Sync => Send rules, same as `std::slice::Iter`. macro_rules! send_sync_read_only { ($name:ident) => { - unsafe impl<'a, A, D> Send for $name<'a, A, D> where A: Sync, D: Send { } - unsafe impl<'a, A, D> Sync for $name<'a, A, D> where A: Sync, D: Sync { } - } + unsafe impl<'a, A, D> Send for $name<'a, A, D> + where + A: Sync, + D: Send, + { + } + unsafe impl<'a, A, D> Sync for $name<'a, A, D> + where + A: Sync, + D: Sync, + { + } + }; } // read-write iterators use Send => Send rules, same as `std::slice::IterMut`. macro_rules! send_sync_read_write { ($name:ident) => { - unsafe impl<'a, A, D> Send for $name<'a, A, D> where A: Send, D: Send { } - unsafe impl<'a, A, D> Sync for $name<'a, A, D> where A: Sync, D: Sync { } - } + unsafe impl<'a, A, D> Send for $name<'a, A, D> + where + A: Send, + D: Send, + { + } + unsafe impl<'a, A, D> Sync for $name<'a, A, D> + where + A: Sync, + D: Sync, + { + } + }; } macro_rules! impl_ndproducer { diff --git a/src/iterators/mod.rs b/src/iterators/mod.rs index b9aab1aa9..e1eae220e 100644 --- a/src/iterators/mod.rs +++ b/src/iterators/mod.rs @@ -6,40 +6,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -#[macro_use] mod macros; +#[macro_use] +mod macros; mod chunks; -mod windows; -mod lanes; pub mod iter; +mod lanes; +mod windows; use std::marker::PhantomData; use std::ptr; use crate::Ix1; +use super::{ArrayBase, ArrayView, ArrayViewMut, Axis, Data, NdProducer, RemoveAxis}; use super::{Dimension, Ix, Ixs}; -use super::{ - ArrayBase, - Data, - ArrayView, - ArrayViewMut, - RemoveAxis, - Axis, - NdProducer, -}; +pub use self::chunks::{ExactChunks, ExactChunksIter, ExactChunksIterMut, ExactChunksMut}; +pub use self::lanes::{Lanes, LanesMut}; pub use self::windows::Windows; -pub use self::chunks::{ - ExactChunks, - ExactChunksIter, - ExactChunksMut, - ExactChunksIterMut, -}; -pub use self::lanes::{ - Lanes, - LanesMut, -}; use std::slice::{self, Iter as SliceIter, IterMut as SliceIterMut}; @@ -53,7 +37,6 @@ pub struct Baseiter { index: Option, } - impl Baseiter { /// Creating a Baseiter is unsafe because shape and stride parameters need /// to be correct to avoid performing an unsafe pointer offset while @@ -89,7 +72,8 @@ impl Iterator for Baseiter { } fn fold(mut self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, *mut A) -> Acc, + where + G: FnMut(Acc, *mut A) -> Acc, { let ndim = self.dim.ndim(); debug_assert_ne!(ndim, 0); @@ -121,12 +105,13 @@ impl<'a, A, D: Dimension> ExactSizeIterator for Baseiter { match self.index { None => 0, Some(ref ix) => { - let gone = self.dim - .default_strides() - .slice() - .iter() - .zip(ix.slice().iter()) - .fold(0, |s, (&a, &b)| s + a as usize * b as usize); + let gone = self + .dim + .default_strides() + .slice() + .iter() + .zip(ix.slice().iter()) + .fold(0, |s, (&a, &b)| s + a as usize * b as usize); self.dim.size() - gone } } @@ -150,7 +135,8 @@ impl DoubleEndedIterator for Baseiter { } fn rfold(mut self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, *mut A) -> Acc, + where + G: FnMut(Acc, *mut A) -> Acc, { let mut accum = init; if let Some(index) = self.index { @@ -159,7 +145,11 @@ impl DoubleEndedIterator for Baseiter { // self.dim[0] is the current length while self.dim[0] > elem_index { self.dim[0] -= 1; - accum = g(accum, self.ptr.offset(Ix1::stride_offset(&self.dim, &self.strides))); + accum = g( + accum, + self.ptr + .offset(Ix1::stride_offset(&self.dim, &self.strides)), + ); } } } @@ -210,11 +200,10 @@ impl<'a, A, D: Dimension> Iterator for ElementsBase<'a, A, D> { } fn fold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc, + where + G: FnMut(Acc, Self::Item) -> Acc, { - unsafe { - self.inner.fold(init, move |acc, ptr| g(acc, &*ptr)) - } + unsafe { self.inner.fold(init, move |acc, ptr| g(acc, &*ptr)) } } } @@ -225,16 +214,16 @@ impl<'a, A> DoubleEndedIterator for ElementsBase<'a, A, Ix1> { } fn rfold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc, + where + G: FnMut(Acc, Self::Item) -> Acc, { - unsafe { - self.inner.rfold(init, move |acc, ptr| g(acc, &*ptr)) - } + unsafe { self.inner.rfold(init, move |acc, ptr| g(acc, &*ptr)) } } } impl<'a, A, D> ExactSizeIterator for ElementsBase<'a, A, D> - where D: Dimension +where + D: Dimension, { fn len(&self) -> usize { self.inner.len() @@ -242,21 +231,21 @@ impl<'a, A, D> ExactSizeIterator for ElementsBase<'a, A, D> } macro_rules! either { - ($value:expr, $inner:pat => $result:expr) => ( + ($value:expr, $inner:pat => $result:expr) => { match $value { ElementsRepr::Slice($inner) => $result, ElementsRepr::Counted($inner) => $result, } - ) + }; } macro_rules! either_mut { - ($value:expr, $inner:ident => $result:expr) => ( + ($value:expr, $inner:ident => $result:expr) => { match $value { ElementsRepr::Slice(ref mut $inner) => $result, ElementsRepr::Counted(ref mut $inner) => $result, } - ) + }; } clone_bounds!( @@ -269,7 +258,8 @@ clone_bounds!( ); impl<'a, A, D> Iter<'a, A, D> - where D: Dimension +where + D: Dimension, { pub(crate) fn new(self_: ArrayView<'a, A, D>) -> Self { Iter { @@ -282,18 +272,16 @@ impl<'a, A, D> Iter<'a, A, D> } } - - impl<'a, A, D> IterMut<'a, A, D> - where D: Dimension +where + D: Dimension, { pub(crate) fn new(self_: ArrayViewMut<'a, A, D>) -> Self { IterMut { - inner: - match self_.into_slice_() { + inner: match self_.into_slice_() { Ok(x) => ElementsRepr::Slice(x.into_iter()), Err(self_) => ElementsRepr::Counted(self_.into_elements_base()), - } + }, } } } @@ -356,7 +344,8 @@ pub struct IndexedIter<'a, A: 'a, D>(ElementsBase<'a, A, D>); pub struct IndexedIterMut<'a, A: 'a, D>(ElementsBaseMut<'a, A, D>); impl<'a, A, D> IndexedIter<'a, A, D> - where D: Dimension +where + D: Dimension, { pub(crate) fn new(x: ElementsBase<'a, A, D>) -> Self { IndexedIter(x) @@ -364,14 +353,14 @@ impl<'a, A, D> IndexedIter<'a, A, D> } impl<'a, A, D> IndexedIterMut<'a, A, D> - where D: Dimension +where + D: Dimension, { pub(crate) fn new(x: ElementsBaseMut<'a, A, D>) -> Self { IndexedIterMut(x) } } - impl<'a, A, D: Dimension> Iterator for Iter<'a, A, D> { type Item = &'a A; #[inline] @@ -384,7 +373,8 @@ impl<'a, A, D: Dimension> Iterator for Iter<'a, A, D> { } fn fold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, { either!(self.inner, iter => iter.fold(init, g)) } @@ -397,21 +387,22 @@ impl<'a, A> DoubleEndedIterator for Iter<'a, A, Ix1> { } fn rfold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, { either!(self.inner, iter => iter.rfold(init, g)) } } impl<'a, A, D> ExactSizeIterator for Iter<'a, A, D> - where D: Dimension +where + D: Dimension, { fn len(&self) -> usize { either!(self.inner, ref iter => iter.len()) } } - impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> { type Item = (D::Pattern, &'a A); #[inline] @@ -432,7 +423,8 @@ impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> { } impl<'a, A, D> ExactSizeIterator for IndexedIter<'a, A, D> - where D: Dimension +where + D: Dimension, { fn len(&self) -> usize { self.0.inner.len() @@ -451,7 +443,8 @@ impl<'a, A, D: Dimension> Iterator for IterMut<'a, A, D> { } fn fold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, { either!(self.inner, iter => iter.fold(init, g)) } @@ -464,14 +457,16 @@ impl<'a, A> DoubleEndedIterator for IterMut<'a, A, Ix1> { } fn rfold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, { either!(self.inner, iter => iter.rfold(init, g)) } } impl<'a, A, D> ExactSizeIterator for IterMut<'a, A, D> - where D: Dimension +where + D: Dimension, { fn len(&self) -> usize { either!(self.inner, ref iter => iter.len()) @@ -490,11 +485,10 @@ impl<'a, A, D: Dimension> Iterator for ElementsBaseMut<'a, A, D> { } fn fold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, { - unsafe { - self.inner.fold(init, move |acc, ptr| g(acc, &mut *ptr)) - } + unsafe { self.inner.fold(init, move |acc, ptr| g(acc, &mut *ptr)) } } } @@ -505,23 +499,22 @@ impl<'a, A> DoubleEndedIterator for ElementsBaseMut<'a, A, Ix1> { } fn rfold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, { - unsafe { - self.inner.rfold(init, move |acc, ptr| g(acc, &mut *ptr)) - } + unsafe { self.inner.rfold(init, move |acc, ptr| g(acc, &mut *ptr)) } } } impl<'a, A, D> ExactSizeIterator for ElementsBaseMut<'a, A, D> - where D: Dimension +where + D: Dimension, { fn len(&self) -> usize { self.inner.len() } } - impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> { type Item = (D::Pattern, &'a mut A); #[inline] @@ -542,7 +535,8 @@ impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> { } impl<'a, A, D> ExactSizeIterator for IndexedIterMut<'a, A, D> - where D: Dimension +where + D: Dimension, { fn len(&self) -> usize { self.0.inner.len() @@ -573,12 +567,13 @@ clone_bounds!( ); impl<'a, A, D> Iterator for LanesIter<'a, A, D> - where D: Dimension +where + D: Dimension, { type Item = ArrayView<'a, A, Ix1>; fn next(&mut self) -> Option { - self.iter.next().map(|ptr| { - unsafe { ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) } + self.iter.next().map(|ptr| unsafe { + ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } @@ -588,7 +583,8 @@ impl<'a, A, D> Iterator for LanesIter<'a, A, D> } impl<'a, A, D> ExactSizeIterator for LanesIter<'a, A, D> - where D: Dimension +where + D: Dimension, { fn len(&self) -> usize { self.iter.len() @@ -611,14 +607,13 @@ pub struct LanesIterMut<'a, A: 'a, D> { } impl<'a, A, D> Iterator for LanesIterMut<'a, A, D> - where D: Dimension, +where + D: Dimension, { type Item = ArrayViewMut<'a, A, Ix1>; fn next(&mut self) -> Option { - self.iter.next().map(|ptr| { - unsafe { - ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) - } + self.iter.next().map(|ptr| unsafe { + ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } @@ -628,7 +623,8 @@ impl<'a, A, D> Iterator for LanesIterMut<'a, A, D> } impl<'a, A, D> ExactSizeIterator for LanesIterMut<'a, A, D> - where D: Dimension, +where + D: Dimension, { fn len(&self) -> usize { self.iter.len() @@ -679,8 +675,13 @@ impl AxisIterCore { } unsafe fn offset(&self, index: usize) -> *mut A { - debug_assert!(index <= self.len, - "index={}, len={}, stride={}", index, self.len, self.stride); + debug_assert!( + index <= self.len, + "index={}, len={}, stride={}", + index, + self.len, + self.stride + ); self.ptr.offset(index as isize * self.stride) } @@ -715,7 +716,8 @@ impl AxisIterCore { } impl Iterator for AxisIterCore - where D: Dimension, +where + D: Dimension, { type Item = *mut A; @@ -736,7 +738,8 @@ impl Iterator for AxisIterCore } impl DoubleEndedIterator for AxisIterCore - where D: Dimension, +where + D: Dimension, { fn next_back(&mut self) -> Option { if self.index >= self.len { @@ -810,16 +813,13 @@ impl<'a, A, D: Dimension> AxisIter<'a, A, D> { } impl<'a, A, D> Iterator for AxisIter<'a, A, D> - where D: Dimension +where + D: Dimension, { type Item = ArrayView<'a, A, D>; fn next(&mut self) -> Option { - self.iter.next().map(|ptr| { - unsafe { - self.as_ref(ptr) - } - }) + self.iter.next().map(|ptr| unsafe { self.as_ref(ptr) }) } fn size_hint(&self) -> (usize, Option) { @@ -828,19 +828,17 @@ impl<'a, A, D> Iterator for AxisIter<'a, A, D> } impl<'a, A, D> DoubleEndedIterator for AxisIter<'a, A, D> - where D: Dimension +where + D: Dimension, { fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| { - unsafe { - self.as_ref(ptr) - } - }) + self.iter.next_back().map(|ptr| unsafe { self.as_ref(ptr) }) } } impl<'a, A, D> ExactSizeIterator for AxisIter<'a, A, D> - where D: Dimension +where + D: Dimension, { fn len(&self) -> usize { self.size_hint().0 @@ -897,16 +895,13 @@ impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> { } impl<'a, A, D> Iterator for AxisIterMut<'a, A, D> - where D: Dimension +where + D: Dimension, { type Item = ArrayViewMut<'a, A, D>; fn next(&mut self) -> Option { - self.iter.next().map(|ptr| { - unsafe { - self.as_ref(ptr) - } - }) + self.iter.next().map(|ptr| unsafe { self.as_ref(ptr) }) } fn size_hint(&self) -> (usize, Option) { @@ -915,27 +910,24 @@ impl<'a, A, D> Iterator for AxisIterMut<'a, A, D> } impl<'a, A, D> DoubleEndedIterator for AxisIterMut<'a, A, D> - where D: Dimension +where + D: Dimension, { fn next_back(&mut self) -> Option { - self.iter.next_back().map(|ptr| { - unsafe { - self.as_ref(ptr) - } - }) + self.iter.next_back().map(|ptr| unsafe { self.as_ref(ptr) }) } } impl<'a, A, D> ExactSizeIterator for AxisIterMut<'a, A, D> - where D: Dimension +where + D: Dimension, { fn len(&self) -> usize { self.size_hint().0 } } -impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> -{ +impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> { type Item = ::Item; type Dim = Ix1; type Ptr = *mut A; @@ -960,9 +952,11 @@ impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> #[doc(hidden)] unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { - ArrayView::new_(ptr, - self.iter.inner_dim.clone(), - self.iter.inner_strides.clone()) + ArrayView::new_( + ptr, + self.iter.inner_dim.clone(), + self.iter.inner_strides.clone(), + ) } #[doc(hidden)] unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { @@ -978,11 +972,10 @@ impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) { self.split_at(index) } - private_impl!{} + private_impl! {} } -impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> -{ +impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> { type Item = ::Item; type Dim = Ix1; type Ptr = *mut A; @@ -1007,9 +1000,11 @@ impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> #[doc(hidden)] unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { - ArrayViewMut::new_(ptr, - self.iter.inner_dim.clone(), - self.iter.inner_strides.clone()) + ArrayViewMut::new_( + ptr, + self.iter.inner_dim.clone(), + self.iter.inner_strides.clone(), + ) } #[doc(hidden)] unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { @@ -1025,7 +1020,7 @@ impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) { self.split_at(index) } - private_impl!{} + private_impl! {} } /// An iterator that traverses over the specified axis @@ -1064,14 +1059,20 @@ clone_bounds!( /// /// Returns an axis iterator with the correct stride to move between chunks, /// the number of chunks, and the shape of the last chunk. -fn chunk_iter_parts(v: ArrayView, axis: Axis, size: usize) - -> (AxisIterCore, usize, D) -{ +fn chunk_iter_parts( + v: ArrayView, + axis: Axis, + size: usize, +) -> (AxisIterCore, usize, D) { let axis_len = v.len_of(axis); let size = if size > axis_len { axis_len } else { size }; let n_whole_chunks = axis_len / size; let chunk_remainder = axis_len % size; - let iter_len = if chunk_remainder == 0 { n_whole_chunks } else { n_whole_chunks + 1 }; + let iter_len = if chunk_remainder == 0 { + n_whole_chunks + } else { + n_whole_chunks + 1 + }; let stride = v.stride_of(axis) * size as isize; let axis = axis.index(); @@ -1079,7 +1080,11 @@ fn chunk_iter_parts(v: ArrayView, axis: Axis, size: usize inner_dim[axis] = size; let mut last_dim = v.dim; - last_dim[axis] = if chunk_remainder == 0 { size } else { chunk_remainder }; + last_dim[axis] = if chunk_remainder == 0 { + size + } else { + chunk_remainder + }; let iter = AxisIterCore { index: 0, @@ -1106,26 +1111,32 @@ impl<'a, A, D: Dimension> AxisChunksIter<'a, A, D> { } macro_rules! chunk_iter_impl { - ($iter:ident, $array:ident) => ( + ($iter:ident, $array:ident) => { impl<'a, A, D> $iter<'a, A, D> - where D: Dimension + where + D: Dimension, { - fn get_subview(&self, iter_item: Option<*mut A>, is_uneven: bool) - -> Option<$array<'a, A, D>> - { + fn get_subview( + &self, + iter_item: Option<*mut A>, + is_uneven: bool, + ) -> Option<$array<'a, A, D>> { iter_item.map(|ptr| { if !is_uneven { unsafe { - $array::new_(ptr, - self.iter.inner_dim.clone(), - self.iter.inner_strides.clone()) + $array::new_( + ptr, + self.iter.inner_dim.clone(), + self.iter.inner_strides.clone(), + ) } - } - else { + } else { unsafe { - $array::new_(ptr, - self.last_dim.clone(), - self.iter.inner_strides.clone()) + $array::new_( + ptr, + self.last_dim.clone(), + self.iter.inner_strides.clone(), + ) } } }) @@ -1133,7 +1144,8 @@ macro_rules! chunk_iter_impl { } impl<'a, A, D> Iterator for $iter<'a, A, D> - where D: Dimension, + where + D: Dimension, { type Item = $array<'a, A, D>; @@ -1149,7 +1161,8 @@ macro_rules! chunk_iter_impl { } impl<'a, A, D> DoubleEndedIterator for $iter<'a, A, D> - where D: Dimension, + where + D: Dimension, { fn next_back(&mut self) -> Option { let is_uneven = self.iter.len > self.n_whole_chunks; @@ -1158,10 +1171,8 @@ macro_rules! chunk_iter_impl { } } - impl<'a, A, D> ExactSizeIterator for $iter<'a, A, D> - where D: Dimension, - { } - ) + impl<'a, A, D> ExactSizeIterator for $iter<'a, A, D> where D: Dimension {} + }; } /// An iterator that traverses over the specified axis @@ -1197,7 +1208,6 @@ impl<'a, A, D: Dimension> AxisChunksIterMut<'a, A, D> { chunk_iter_impl!(AxisChunksIter, ArrayView); chunk_iter_impl!(AxisChunksIterMut, ArrayViewMut); - send_sync_read_only!(Iter); send_sync_read_only!(IndexedIter); send_sync_read_only!(LanesIter); @@ -1214,37 +1224,37 @@ send_sync_read_write!(ElementsBaseMut); /// (Trait used internally) An iterator that we trust /// to deliver exactly as many items as it said it would. -pub unsafe trait TrustedIterator { } +pub unsafe trait TrustedIterator {} -use std; -use crate::linspace::Linspace; -use crate::iter::IndicesIter; use crate::indexes::IndicesIterF; +use crate::iter::IndicesIter; +use crate::linspace::Linspace; +use std; -unsafe impl TrustedIterator for Linspace { } -unsafe impl<'a, A, D> TrustedIterator for Iter<'a, A, D> { } -unsafe impl<'a, A, D> TrustedIterator for IterMut<'a, A, D> { } -unsafe impl TrustedIterator for std::iter::Map - where I: TrustedIterator { } -unsafe impl<'a, A> TrustedIterator for slice::Iter<'a, A> { } -unsafe impl<'a, A> TrustedIterator for slice::IterMut<'a, A> { } -unsafe impl TrustedIterator for ::std::ops::Range { } +unsafe impl TrustedIterator for Linspace {} +unsafe impl<'a, A, D> TrustedIterator for Iter<'a, A, D> {} +unsafe impl<'a, A, D> TrustedIterator for IterMut<'a, A, D> {} +unsafe impl TrustedIterator for std::iter::Map where I: TrustedIterator {} +unsafe impl<'a, A> TrustedIterator for slice::Iter<'a, A> {} +unsafe impl<'a, A> TrustedIterator for slice::IterMut<'a, A> {} +unsafe impl TrustedIterator for ::std::ops::Range {} // FIXME: These indices iter are dubious -- size needs to be checked up front. -unsafe impl TrustedIterator for IndicesIter where D: Dimension { } -unsafe impl TrustedIterator for IndicesIterF where D: Dimension { } - +unsafe impl TrustedIterator for IndicesIter where D: Dimension {} +unsafe impl TrustedIterator for IndicesIterF where D: Dimension {} /// Like Iterator::collect, but only for trusted length iterators pub fn to_vec(iter: I) -> Vec - where I: TrustedIterator + ExactSizeIterator +where + I: TrustedIterator + ExactSizeIterator, { to_vec_mapped(iter, |x| x) } /// Like Iterator::collect, but only for trusted length iterators pub fn to_vec_mapped(iter: I, mut f: F) -> Vec - where I: TrustedIterator + ExactSizeIterator, - F: FnMut(I::Item) -> B, +where + I: TrustedIterator + ExactSizeIterator, + F: FnMut(I::Item) -> B, { // Use an `unsafe` block to do this efficiently. // We know that iter will produce exactly .size() elements, @@ -1253,13 +1263,11 @@ pub fn to_vec_mapped(iter: I, mut f: F) -> Vec let mut result = Vec::with_capacity(size); let mut out_ptr = result.as_mut_ptr(); let mut len = 0; - iter.fold((), |(), elt| { - unsafe { - ptr::write(out_ptr, f(elt)); - len += 1; - result.set_len(len); - out_ptr = out_ptr.offset(1); - } + iter.fold((), |(), elt| unsafe { + ptr::write(out_ptr, f(elt)); + len += 1; + result.set_len(len); + out_ptr = out_ptr.offset(1); }); debug_assert_eq!(size, result.len()); result diff --git a/src/iterators/windows.rs b/src/iterators/windows.rs index 5846832de..964e05c4d 100644 --- a/src/iterators/windows.rs +++ b/src/iterators/windows.rs @@ -1,6 +1,5 @@ - -use crate::imp_prelude::*; use super::ElementsBase; +use crate::imp_prelude::*; use crate::IntoDimension; use crate::Layout; use crate::NdProducer; @@ -21,10 +20,16 @@ impl<'a, A, D: Dimension> Windows<'a, A, D> { E: IntoDimension, { let window = window_size.into_dimension(); - ndassert!(a.ndim() == window.ndim(), - concat!("Window dimension {} does not match array dimension {} ", - "(with array of shape {:?})"), - window.ndim(), a.ndim(), a.shape()); + ndassert!( + a.ndim() == window.ndim(), + concat!( + "Window dimension {} does not match array dimension {} ", + "(with array of shape {:?})" + ), + window.ndim(), + a.ndim(), + a.shape() + ); let mut size = a.dim; for (sz, &ws) in size.slice_mut().iter_mut().zip(window.slice()) { assert_ne!(ws, 0, "window-size must not be zero!"); @@ -64,8 +69,9 @@ impl_ndproducer! { } impl<'a, A, D> IntoIterator for Windows<'a, A, D> - where D: Dimension, - A: 'a, +where + D: Dimension, + A: 'a, { type Item = ::Item; type IntoIter = WindowsIter<'a, A, D>; @@ -88,7 +94,7 @@ pub struct WindowsIter<'a, A: 'a, D> { strides: D, } -impl_iterator!{ +impl_iterator! { ['a, A, D: Dimension] [Clone => 'a, A, D: Clone] WindowsIter { diff --git a/src/layout/layoutfmt.rs b/src/layout/layoutfmt.rs index 6312a3417..3ad8aa3c7 100644 --- a/src/layout/layoutfmt.rs +++ b/src/layout/layoutfmt.rs @@ -1,4 +1,3 @@ - // Copyright 2017 bluss and ndarray developers. // // Licensed under the Apache License, Version 2.0 Self; fn and(self, flag: Self) -> Self; fn is(self, flag: u32) -> bool; @@ -16,7 +15,9 @@ pub trait LayoutPriv : Sized { impl LayoutPriv for Layout { #[inline(always)] - fn new(x: u32) -> Self { Layout(x) } + fn new(x: u32) -> Self { + Layout(x) + } #[inline(always)] fn is(self, flag: u32) -> bool { diff --git a/src/lib.rs b/src/lib.rs index 7be810f9b..13a7fd4b1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,7 +5,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -#![crate_name="ndarray"] +#![crate_name = "ndarray"] #![doc(html_root_url = "https://docs.rs/ndarray/0.12/")] //! The `ndarray` crate provides an *n*-dimensional container for general elements @@ -87,20 +87,20 @@ #[cfg(feature = "serde-1")] extern crate serde; -#[cfg(feature="rayon")] +#[cfg(feature = "rayon")] extern crate rayon; -#[cfg(feature="blas")] -extern crate cblas_sys; -#[cfg(feature="blas")] +#[cfg(feature = "blas")] extern crate blas_src; +#[cfg(feature = "blas")] +extern crate cblas_sys; extern crate matrixmultiply; extern crate itertools; -extern crate num_traits; extern crate num_complex; extern crate num_integer; +extern crate num_traits; #[cfg(test)] extern crate quickcheck; @@ -111,23 +111,18 @@ pub mod doc; use std::marker::PhantomData; use std::sync::Arc; +pub use crate::dimension::dim::*; pub use crate::dimension::{ - Dimension, - IntoDimension, - RemoveAxis, - Axis, - AxisDescription, - slices_intersect, + slices_intersect, Axis, AxisDescription, Dimension, IntoDimension, RemoveAxis, }; -pub use crate::dimension::dim::*; -pub use crate::dimension::NdIndex; pub use crate::dimension::IxDynImpl; +pub use crate::dimension::NdIndex; +pub use crate::error::{ErrorKind, ShapeError}; pub use crate::indexes::{indices, indices_of}; -pub use crate::error::{ShapeError, ErrorKind}; pub use crate::slice::{ - deref_raw_view_mut_into_view_with_life, deref_raw_view_mut_into_view_mut_with_life, - life_of_view_mut, Slice, SliceInfo, SliceNextDim, SliceOrIndex + deref_raw_view_mut_into_view_mut_with_life, deref_raw_view_mut_into_view_with_life, + life_of_view_mut, Slice, SliceInfo, SliceNextDim, SliceOrIndex, }; use crate::iterators::Baseiter; @@ -137,44 +132,40 @@ pub use crate::arraytraits::AsArray; pub use crate::linalg_traits::{LinalgScalar, NdFloat}; pub use crate::stacking::stack; -pub use crate::shape_builder::{ ShapeBuilder}; pub use crate::impl_views::IndexLonger; +pub use crate::shape_builder::ShapeBuilder; -#[macro_use] mod macro_utils; -#[macro_use] mod private; +#[macro_use] +mod macro_utils; +#[macro_use] +mod private; mod aliases; -mod arraytraits; #[cfg(feature = "serde-1")] mod array_serde; mod arrayformat; +mod arraytraits; mod data_traits; pub use crate::aliases::*; #[allow(deprecated)] pub use crate::data_traits::{ - RawData, - RawDataMut, - RawDataClone, - Data, - DataMut, - DataOwned, - DataShared, - DataClone, + Data, DataClone, DataMut, DataOwned, DataShared, RawData, RawDataClone, RawDataMut, }; mod free_functions; pub use crate::free_functions::*; pub use crate::iterators::iter; -#[macro_use] mod slice; -mod layout; +#[macro_use] +mod slice; +mod error; mod indexes; mod iterators; +mod layout; mod linalg_traits; mod linspace; mod numeric_util; -mod error; mod shape_builder; mod stacking; #[macro_use] @@ -182,32 +173,19 @@ mod zip; mod dimension; -pub use crate::zip::{ - Zip, - NdProducer, - IntoNdProducer, - FoldWhile, -}; +pub use crate::zip::{FoldWhile, IntoNdProducer, NdProducer, Zip}; pub use crate::layout::Layout; /// Implementation's prelude. Common types used everywhere. mod imp_prelude { + pub use crate::dimension::DimensionExt; pub use crate::prelude::*; pub use crate::ArcArray; pub use crate::{ - RemoveAxis, - RawData, - RawDataMut, - Data, - DataMut, - DataOwned, - DataShared, - RawViewRepr, - ViewRepr, - Ix, Ixs, + Data, DataMut, DataOwned, DataShared, Ix, Ixs, RawData, RawDataMut, RawViewRepr, + RemoveAxis, ViewRepr, }; - pub use crate::dimension::DimensionExt; } pub mod prelude; @@ -1065,7 +1043,8 @@ pub type Ixs = isize; // // [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset-1 pub struct ArrayBase - where S: RawData +where + S: RawData, { /// Data buffer / ownership information. (If owned, contains the data /// buffer; if borrowed, contains the lifetime and mutability.) @@ -1084,7 +1063,7 @@ pub struct ArrayBase /// It can act as both an owner as the data as well as a shared reference (view like). /// /// **Note: this type alias is obsolete.** See the equivalent [`ArcArray`] instead. -#[deprecated(note="`RcArray` has been renamed to `ArcArray`")] +#[deprecated(note = "`RcArray` has been renamed to `ArcArray`")] pub type RcArray = ArrayBase, D>; /// An array where the data has shared ownership and is copy on write. @@ -1226,7 +1205,7 @@ pub struct OwnedRepr(Vec); /// /// *Don’t use this type directly—use the type alias /// [`RcArray`](type.RcArray.html) for the array type!* -#[deprecated(note="RcArray is replaced by ArcArray")] +#[deprecated(note = "RcArray is replaced by ArcArray")] pub use self::OwnedArcRepr as OwnedRcRepr; /// ArcArray's representation. @@ -1287,20 +1266,27 @@ mod impl_owned_array; /// Private Methods impl ArrayBase - where S: Data, D: Dimension +where + S: Data, + D: Dimension, { #[inline] fn broadcast_unwrap(&self, dim: E) -> ArrayView - where E: Dimension, + where + E: Dimension, { #[cold] #[inline(never)] fn broadcast_panic(from: &D, to: &E) -> ! - where D: Dimension, - E: Dimension, + where + D: Dimension, + E: Dimension, { - panic!("ndarray: could not broadcast array from shape: {:?} to: {:?}", - from.slice(), to.slice()) + panic!( + "ndarray: could not broadcast array from shape: {:?} to: {:?}", + from.slice(), + to.slice() + ) } match self.broadcast(dim.clone()) { @@ -1313,16 +1299,15 @@ impl ArrayBase // (Checked in debug assertions). #[inline] fn broadcast_assume(&self, dim: E) -> ArrayView - where E: Dimension, + where + E: Dimension, { let dim = dim.into_dimension(); debug_assert_eq!(self.shape(), dim.slice()); let ptr = self.ptr; let mut strides = dim.clone(); strides.slice_mut().copy_from_slice(self.strides.slice()); - unsafe { - ArrayView::new_(ptr, dim, strides) - } + unsafe { ArrayView::new_(ptr, dim, strides) } } fn raw_strides(&self) -> D { @@ -1332,8 +1317,9 @@ impl ArrayBase /// Apply closure `f` to each element in the array, in whatever /// order is the fastest to visit. fn unordered_foreach_mut(&mut self, mut f: F) - where S: DataMut, - F: FnMut(&mut A) + where + S: DataMut, + F: FnMut(&mut A), { if let Some(slc) = self.as_slice_memory_order_mut() { // FIXME: Use for loop when slice iterator is perf is restored @@ -1348,8 +1334,7 @@ impl ArrayBase } /// Remove array axis `axis` and return the result. - fn try_remove_axis(self, axis: Axis) -> ArrayBase - { + fn try_remove_axis(self, axis: Axis) -> ArrayBase { let d = self.dim.try_remove_axis(axis); let s = self.strides.try_remove_axis(axis); ArrayBase { @@ -1361,24 +1346,23 @@ impl ArrayBase } /// n-d generalization of rows, just like inner iter - fn inner_rows(&self) -> iterators::Lanes - { + fn inner_rows(&self) -> iterators::Lanes { let n = self.ndim(); Lanes::new(self.view(), Axis(n.saturating_sub(1))) } /// n-d generalization of rows, just like inner iter fn inner_rows_mut(&mut self) -> iterators::LanesMut - where S: DataMut + where + S: DataMut, { let n = self.ndim(); LanesMut::new(self.view_mut(), Axis(n.saturating_sub(1))) } } - // parallel methods -#[cfg(feature="rayon")] +#[cfg(feature = "rayon")] pub mod parallel; mod impl_1d; diff --git a/src/linalg/impl_linalg.rs b/src/linalg/impl_linalg.rs index 6e8bbca44..a40f38616 100644 --- a/src/linalg/impl_linalg.rs +++ b/src/linalg/impl_linalg.rs @@ -9,38 +9,35 @@ use crate::imp_prelude::*; use crate::numeric_util; -use crate::{ - LinalgScalar, - Zip, -}; +use crate::{LinalgScalar, Zip}; use std::any::TypeId; -#[cfg(feature="blas")] +#[cfg(feature = "blas")] use std::cmp; -#[cfg(feature="blas")] +#[cfg(feature = "blas")] use std::mem::swap; -#[cfg(feature="blas")] +#[cfg(feature = "blas")] use std::os::raw::c_int; -#[cfg(feature="blas")] +#[cfg(feature = "blas")] use cblas_sys as blas_sys; -#[cfg(feature="blas")] -use cblas_sys::{CblasNoTrans, CblasTrans, CblasRowMajor, CBLAS_LAYOUT}; +#[cfg(feature = "blas")] +use cblas_sys::{CblasNoTrans, CblasRowMajor, CblasTrans, CBLAS_LAYOUT}; /// len of vector before we use blas -#[cfg(feature="blas")] +#[cfg(feature = "blas")] const DOT_BLAS_CUTOFF: usize = 32; /// side of matrix before we use blas -#[cfg(feature="blas")] +#[cfg(feature = "blas")] const GEMM_BLAS_CUTOFF: usize = 7; -#[cfg(feature="blas")] +#[cfg(feature = "blas")] #[allow(non_camel_case_types)] type blas_index = c_int; // blas index type - impl ArrayBase - where S: Data, +where + S: Data, { /// Perform dot product or matrix multiplication of arrays `self` and `rhs`. /// @@ -60,14 +57,16 @@ impl ArrayBase /// *Note:* If enabled, uses blas `dot` for elements of `f32, f64` when memory /// layout allows. pub fn dot(&self, rhs: &Rhs) -> >::Output - where Self: Dot + where + Self: Dot, { Dot::dot(self, rhs) } fn dot_generic(&self, rhs: &ArrayBase) -> A - where S2: Data, - A: LinalgScalar, + where + S2: Data, + A: LinalgScalar, { debug_assert_eq!(self.len(), rhs.len()); assert!(self.len() == rhs.len()); @@ -85,18 +84,20 @@ impl ArrayBase sum } - #[cfg(not(feature="blas"))] + #[cfg(not(feature = "blas"))] fn dot_impl(&self, rhs: &ArrayBase) -> A - where S2: Data, - A: LinalgScalar, + where + S2: Data, + A: LinalgScalar, { self.dot_generic(rhs) } - #[cfg(feature="blas")] + #[cfg(feature = "blas")] fn dot_impl(&self, rhs: &ArrayBase) -> A - where S2: Data, - A: LinalgScalar, + where + S2: Data, + A: LinalgScalar, { // Use only if the vector is large enough to be worth it if self.len() >= DOT_BLAS_CUTOFF { @@ -104,28 +105,27 @@ impl ArrayBase assert!(self.len() == rhs.len()); macro_rules! dot { ($ty:ty, $func:ident) => {{ - if blas_compat_1d::<$ty, _>(self) && blas_compat_1d::<$ty, _>(rhs) { - unsafe { - let (lhs_ptr, n, incx) = blas_1d_params(self.ptr, - self.len(), - self.strides()[0]); - let (rhs_ptr, _, incy) = blas_1d_params(rhs.ptr, - rhs.len(), - rhs.strides()[0]); - let ret = blas_sys::$func( - n, - lhs_ptr as *const $ty, - incx, - rhs_ptr as *const $ty, - incy); - return cast_as::<$ty, A>(&ret); - } - } - }} + if blas_compat_1d::<$ty, _>(self) && blas_compat_1d::<$ty, _>(rhs) { + unsafe { + let (lhs_ptr, n, incx) = + blas_1d_params(self.ptr, self.len(), self.strides()[0]); + let (rhs_ptr, _, incy) = + blas_1d_params(rhs.ptr, rhs.len(), rhs.strides()[0]); + let ret = blas_sys::$func( + n, + lhs_ptr as *const $ty, + incx, + rhs_ptr as *const $ty, + incy, + ); + return cast_as::<$ty, A>(&ret); + } + } + }}; } - dot!{f32, cblas_sdot}; - dot!{f64, cblas_ddot}; + dot! {f32, cblas_sdot}; + dot! {f64, cblas_ddot}; } self.dot_generic(rhs) } @@ -136,10 +136,12 @@ impl ArrayBase /// BLAS wants a pointer to the element with lowest address, /// which agrees with our pointer for non-negative strides, but /// is at the opposite end for negative strides. -#[cfg(feature="blas")] -unsafe fn blas_1d_params(ptr: *const A, len: usize, stride: isize) - -> (*const A, blas_index, blas_index) -{ +#[cfg(feature = "blas")] +unsafe fn blas_1d_params( + ptr: *const A, + len: usize, + stride: isize, +) -> (*const A, blas_index, blas_index) { // [x x x x] // ^--ptr // stride = -1 @@ -165,9 +167,10 @@ pub trait Dot { } impl Dot> for ArrayBase - where S: Data, - S2: Data, - A: LinalgScalar, +where + S: Data, + S2: Data, + A: LinalgScalar, { type Output = A; @@ -179,16 +182,16 @@ impl Dot> for ArrayBase /// **Panics** if the arrays are not of the same length.
/// *Note:* If enabled, uses blas `dot` for elements of `f32, f64` when memory /// layout allows. - fn dot(&self, rhs: &ArrayBase) -> A - { + fn dot(&self, rhs: &ArrayBase) -> A { self.dot_impl(rhs) } } impl Dot> for ArrayBase - where S: Data, - S2: Data, - A: LinalgScalar, +where + S: Data, + S2: Data, + A: LinalgScalar, { type Output = Array; @@ -201,14 +204,14 @@ impl Dot> for ArrayBase /// Return a result array with shape *N*. /// /// **Panics** if shapes are incompatible. - fn dot(&self, rhs: &ArrayBase) -> Array - { + fn dot(&self, rhs: &ArrayBase) -> Array { rhs.t().dot(self) } } impl ArrayBase - where S: Data, +where + S: Data, { /// Perform matrix multiplication of rectangular arrays `self` and `rhs`. /// @@ -240,20 +243,21 @@ impl ArrayBase /// ); /// ``` pub fn dot(&self, rhs: &Rhs) -> >::Output - where Self: Dot + where + Self: Dot, { Dot::dot(self, rhs) } } impl Dot> for ArrayBase - where S: Data, - S2: Data, - A: LinalgScalar, +where + S: Data, + S2: Data, + A: LinalgScalar, { type Output = Array2
; - fn dot(&self, b: &ArrayBase) -> Array2 - { + fn dot(&self, b: &ArrayBase) -> Array2 { let a = self.view(); let b = b.view(); let ((m, k), (k2, n)) = (a.dim(), b.dim()); @@ -281,11 +285,13 @@ impl Dot> for ArrayBase #[inline(never)] fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! { match m.checked_mul(n) { - Some(len) if len <= ::std::isize::MAX as usize => {}, + Some(len) if len <= ::std::isize::MAX as usize => {} _ => panic!("ndarray: shape {} × {} overflows isize", m, n), } - panic!("ndarray: inputs {} × {} and {} × {} are not compatible for matrix multiplication", - m, k, k2, n); + panic!( + "ndarray: inputs {} × {} and {} × {} are not compatible for matrix multiplication", + m, k, k2, n + ); } #[cold] @@ -305,13 +311,13 @@ fn general_dot_shape_error(m: usize, k: usize, k2: usize, n: usize, c1: usize, c /// /// **Panics** if shapes are incompatible. impl Dot> for ArrayBase - where S: Data, - S2: Data, - A: LinalgScalar, +where + S: Data, + S2: Data, + A: LinalgScalar, { type Output = Array; - fn dot(&self, rhs: &ArrayBase) -> Array - { + fn dot(&self, rhs: &ArrayBase) -> Array { let ((m, a), n) = (self.dim(), rhs.dim()); if a != n { dot_shape_error(m, a, n, 1); @@ -327,8 +333,9 @@ impl Dot> for ArrayBase } impl ArrayBase - where S: Data, - D: Dimension, +where + S: Data, + D: Dimension, { /// Perform the operation `self += alpha * rhs` efficiently, where /// `alpha` is a scalar and `rhs` is another array. This operation is @@ -338,10 +345,11 @@ impl ArrayBase /// /// **Panics** if broadcasting isn’t possible. pub fn scaled_add(&mut self, alpha: A, rhs: &ArrayBase) - where S: DataMut, - S2: Data, - A: LinalgScalar, - E: Dimension, + where + S: DataMut, + S2: Data, + A: LinalgScalar, + E: Dimension, { self.zip_mut_with(rhs, move |y, &x| *y = *y + (alpha * x)); } @@ -349,22 +357,23 @@ impl ArrayBase // mat_mul_impl uses ArrayView arguments to send all array kinds into // the same instantiated implementation. -#[cfg(not(feature="blas"))] +#[cfg(not(feature = "blas"))] use self::mat_mul_general as mat_mul_impl; -#[cfg(feature="blas")] -fn mat_mul_impl(alpha: A, - lhs: &ArrayView2, - rhs: &ArrayView2, - beta: A, - c: &mut ArrayViewMut2) - where A: LinalgScalar, +#[cfg(feature = "blas")] +fn mat_mul_impl( + alpha: A, + lhs: &ArrayView2, + rhs: &ArrayView2, + beta: A, + c: &mut ArrayViewMut2, +) where + A: LinalgScalar, { // size cutoff for using BLAS let cut = GEMM_BLAS_CUTOFF; let ((mut m, a), (_, mut n)) = (lhs.dim(), rhs.dim()); - if !(m > cut || n > cut || a > cut) || - !(same_type::() || same_type::()) { + if !(m > cut || n > cut || a > cut) || !(same_type::() || same_type::()) { return mat_mul_general(alpha, lhs, rhs, beta, c); } { @@ -420,25 +429,25 @@ fn mat_mul_impl(alpha: A, // Where Op is notrans/trans/conjtrans unsafe { blas_sys::$gemm( - CblasRowMajor, - lhs_trans, - rhs_trans, - m as blas_index, // m, rows of Op(a) - n as blas_index, // n, cols of Op(b) - k as blas_index, // k, cols of Op(a) - cast_as(&alpha), // alpha - lhs_.ptr as *const _, // a - lhs_stride, // lda - rhs_.ptr as *const _, // b - rhs_stride, // ldb - cast_as(&beta), // beta - c_.ptr as *mut _, // c - c_stride, // ldc - ); + CblasRowMajor, + lhs_trans, + rhs_trans, + m as blas_index, // m, rows of Op(a) + n as blas_index, // n, cols of Op(b) + k as blas_index, // k, cols of Op(a) + cast_as(&alpha), // alpha + lhs_.ptr as *const _, // a + lhs_stride, // lda + rhs_.ptr as *const _, // b + rhs_stride, // ldb + cast_as(&beta), // beta + c_.ptr as *mut _, // c + c_stride, // ldc + ); } - return; + return; } - } + }; } gemm!(f32, cblas_sgemm); gemm!(f64, cblas_dgemm); @@ -447,12 +456,14 @@ fn mat_mul_impl(alpha: A, } /// C ← α A B + β C -fn mat_mul_general(alpha: A, - lhs: &ArrayView2, - rhs: &ArrayView2, - beta: A, - c: &mut ArrayViewMut2) - where A: LinalgScalar, +fn mat_mul_general( + alpha: A, + lhs: &ArrayView2, + rhs: &ArrayView2, + beta: A, + c: &mut ArrayViewMut2, +) where + A: LinalgScalar, { let ((m, k), (_, n)) = (lhs.dim(), rhs.dim()); @@ -464,7 +475,9 @@ fn mat_mul_general(alpha: A, if same_type::() { unsafe { ::matrixmultiply::sgemm( - m, k, n, + m, + k, + n, cast_as(&alpha), ap as *const _, lhs.strides()[0], @@ -474,13 +487,16 @@ fn mat_mul_general(alpha: A, rhs.strides()[1], cast_as(&beta), cp as *mut _, - rsc, csc + rsc, + csc, ); } } else if same_type::() { unsafe { ::matrixmultiply::dgemm( - m, k, n, + m, + k, + n, cast_as(&alpha), ap as *const _, lhs.strides()[0], @@ -490,7 +506,8 @@ fn mat_mul_general(alpha: A, rhs.strides()[1], cast_as(&beta), cp as *mut _, - rsc, csc + rsc, + csc, ); } } else { @@ -509,8 +526,11 @@ fn mat_mul_general(alpha: A, loop { unsafe { let elt = c.uget_mut((i, j)); - *elt = *elt * beta + alpha * (0..k).fold(A::zero(), - move |s, x| s + *lhs.uget((i, x)) * *rhs.uget((x, j))); + *elt = *elt * beta + + alpha + * (0..k).fold(A::zero(), move |s, x| { + s + *lhs.uget((i, x)) * *rhs.uget((x, j)) + }); } j += 1; if j == n { @@ -535,15 +555,17 @@ fn mat_mul_general(alpha: A, /// *Note:* If enabled, uses blas `gemm` for elements of `f32, f64` when memory /// layout allows. The default matrixmultiply backend is otherwise used for /// `f32, f64` for all memory layouts. -pub fn general_mat_mul(alpha: A, - a: &ArrayBase, - b: &ArrayBase, - beta: A, - c: &mut ArrayBase) - where S1: Data, - S2: Data, - S3: DataMut, - A: LinalgScalar, +pub fn general_mat_mul( + alpha: A, + a: &ArrayBase, + b: &ArrayBase, + beta: A, + c: &mut ArrayBase, +) where + S1: Data, + S2: Data, + S3: DataMut, + A: LinalgScalar, { let ((m, k), (k2, n)) = (a.dim(), b.dim()); let (m2, n2) = c.dim(); @@ -564,15 +586,17 @@ pub fn general_mat_mul(alpha: A, /// ***Panics*** if array shapes are not compatible
/// *Note:* If enabled, uses blas `gemv` for elements of `f32, f64` when memory /// layout allows. -pub fn general_mat_vec_mul(alpha: A, - a: &ArrayBase, - x: &ArrayBase, - beta: A, - y: &mut ArrayBase) - where S1: Data, - S2: Data, - S3: DataMut, - A: LinalgScalar, +pub fn general_mat_vec_mul( + alpha: A, + a: &ArrayBase, + x: &ArrayBase, + beta: A, + y: &mut ArrayBase, +) where + S1: Data, + S2: Data, + S3: DataMut, + A: LinalgScalar, { let ((m, k), k2) = (a.dim(), x.dim()); let m2 = y.dim(); @@ -583,9 +607,7 @@ pub fn general_mat_vec_mul(alpha: A, macro_rules! gemv { ($ty:ty, $gemv:ident) => { if let Some(layout) = blas_layout::<$ty, _>(&a) { - if blas_compat_1d::<$ty, _>(&x) - && blas_compat_1d::<$ty, _>(&y) - { + if blas_compat_1d::<$ty, _>(&x) && blas_compat_1d::<$ty, _>(&y) { let a_trans = CblasNoTrans; let a_stride = match layout { CBLAS_LAYOUT::CblasRowMajor => a.strides()[0] as blas_index, @@ -599,22 +621,22 @@ pub fn general_mat_vec_mul(alpha: A, blas_sys::$gemv( layout, a_trans, - m as blas_index, // m, rows of Op(a) - k as blas_index, // n, cols of Op(a) - cast_as(&alpha), // alpha - a.ptr as *const _, // a - a_stride, // lda - x.ptr as *const _, // x + m as blas_index, // m, rows of Op(a) + k as blas_index, // n, cols of Op(a) + cast_as(&alpha), // alpha + a.ptr as *const _, // a + a_stride, // lda + x.ptr as *const _, // x x_stride, - cast_as(&beta), // beta - y.ptr as *mut _, // x + cast_as(&beta), // beta + y.ptr as *mut _, // x y_stride, ); } return; } } - } + }; } #[cfg(feature = "blas")] gemv!(f32, cblas_sgemv); @@ -624,17 +646,13 @@ pub fn general_mat_vec_mul(alpha: A, /* general */ if beta.is_zero() { - Zip::from(a.outer_iter()) - .and(y) - .apply(|row, elt| { - *elt = row.dot(x) * alpha; - }); + Zip::from(a.outer_iter()).and(y).apply(|row, elt| { + *elt = row.dot(x) * alpha; + }); } else { - Zip::from(a.outer_iter()) - .and(y) - .apply(|row, elt| { - *elt = *elt * beta + row.dot(x) * alpha; - }); + Zip::from(a.outer_iter()).and(y).apply(|row, elt| { + *elt = *elt * beta + row.dot(x) * alpha; + }); } } } @@ -650,16 +668,15 @@ fn same_type() -> bool { // **Panics** if `A` and `B` are not the same type fn cast_as(a: &A) -> B { assert!(same_type::()); - unsafe { - ::std::ptr::read(a as *const _ as *const B) - } + unsafe { ::std::ptr::read(a as *const _ as *const B) } } -#[cfg(feature="blas")] +#[cfg(feature = "blas")] fn blas_compat_1d(a: &ArrayBase) -> bool - where S: Data, - A: 'static, - S::Elem: 'static, +where + S: Data, + A: 'static, + S::Elem: 'static, { if !same_type::() { return false; @@ -668,24 +685,24 @@ fn blas_compat_1d(a: &ArrayBase) -> bool return false; } let stride = a.strides()[0]; - if stride > blas_index::max_value() as isize || - stride < blas_index::min_value() as isize { + if stride > blas_index::max_value() as isize || stride < blas_index::min_value() as isize { return false; } true } -#[cfg(feature="blas")] +#[cfg(feature = "blas")] enum MemoryOrder { C, F, } -#[cfg(feature="blas")] +#[cfg(feature = "blas")] fn blas_row_major_2d(a: &ArrayBase) -> bool - where S: Data, - A: 'static, - S::Elem: 'static, +where + S: Data, + A: 'static, + S::Elem: 'static, { if !same_type::() { return false; @@ -693,11 +710,12 @@ fn blas_row_major_2d(a: &ArrayBase) -> bool is_blas_2d(&a.dim, &a.strides, MemoryOrder::C) } -#[cfg(feature="blas")] +#[cfg(feature = "blas")] fn blas_column_major_2d(a: &ArrayBase) -> bool - where S: Data, - A: 'static, - S::Elem: 'static, +where + S: Data, + A: 'static, + S::Elem: 'static, { if !same_type::() { return false; @@ -705,7 +723,7 @@ fn blas_column_major_2d(a: &ArrayBase) -> bool is_blas_2d(&a.dim, &a.strides, MemoryOrder::F) } -#[cfg(feature="blas")] +#[cfg(feature = "blas")] fn is_blas_2d(dim: &Ix2, stride: &Ix2, order: MemoryOrder) -> bool { let (m, n) = dim.into_pattern(); let s0 = stride[0] as isize; @@ -720,28 +738,27 @@ fn is_blas_2d(dim: &Ix2, stride: &Ix2, order: MemoryOrder) -> bool { if s0 < 1 || s1 < 1 { return false; } - if (s0 > blas_index::max_value() as isize || s0 < blas_index::min_value() as isize) || - (s1 > blas_index::max_value() as isize || s1 < blas_index::min_value() as isize) + if (s0 > blas_index::max_value() as isize || s0 < blas_index::min_value() as isize) + || (s1 > blas_index::max_value() as isize || s1 < blas_index::min_value() as isize) { return false; } - if m > blas_index::max_value() as usize || - n > blas_index::max_value() as usize - { + if m > blas_index::max_value() as usize || n > blas_index::max_value() as usize { return false; } true } -#[cfg(feature="blas")] +#[cfg(feature = "blas")] fn blas_layout(a: &ArrayBase) -> Option - where S: Data, - A: 'static, - S::Elem: 'static, +where + S: Data, + A: 'static, + S::Elem: 'static, { if blas_row_major_2d::(a) { Some(CBLAS_LAYOUT::CblasRowMajor) - } else if blas_column_major_2d::(a) { + } else if blas_column_major_2d::(a) { Some(CBLAS_LAYOUT::CblasColMajor) } else { None @@ -749,7 +766,7 @@ fn blas_layout(a: &ArrayBase) -> Option } #[cfg(test)] -#[cfg(feature="blas")] +#[cfg(feature = "blas")] mod blas_tests { use super::*; diff --git a/src/linalg/mod.rs b/src/linalg/mod.rs index 1dbc63e34..8575905cd 100644 --- a/src/linalg/mod.rs +++ b/src/linalg/mod.rs @@ -6,11 +6,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - //! Linear algebra. -pub use self::impl_linalg::Dot; pub use self::impl_linalg::general_mat_mul; pub use self::impl_linalg::general_mat_vec_mul; +pub use self::impl_linalg::Dot; mod impl_linalg; diff --git a/src/linalg_traits.rs b/src/linalg_traits.rs index 78edfbb82..a7f5a1a3e 100644 --- a/src/linalg_traits.rs +++ b/src/linalg_traits.rs @@ -5,42 +5,39 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use num_traits::{Zero, One, Float}; -use std::fmt; -use std::ops::{Add, Sub, Mul, Div}; -use std::ops::{ - AddAssign, - SubAssign, - MulAssign, - DivAssign, - RemAssign, -}; use crate::ScalarOperand; +use num_traits::{Float, One, Zero}; +use std::fmt; +use std::ops::{Add, Div, Mul, Sub}; +use std::ops::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign}; /// Elements that support linear algebra operations. /// /// `'static` for type-based specialization, `Copy` so that they don't need move /// semantics or destructors, and the rest are numerical traits. -pub trait LinalgScalar : - 'static + - Copy + - Zero + One + - Add + - Sub + - Mul + - Div -{ } +pub trait LinalgScalar: + 'static + + Copy + + Zero + + One + + Add + + Sub + + Mul + + Div +{ +} -impl LinalgScalar for T - where T: - 'static + - Copy + - Zero + One + - Add + - Sub + - Mul + - Div -{ } +impl LinalgScalar for T where + T: 'static + + Copy + + Zero + + One + + Add + + Sub + + Mul + + Div +{ +} /// Floating-point element types `f32` and `f64`. /// @@ -50,12 +47,23 @@ impl LinalgScalar for T /// operations (`ScalarOperand`). /// /// This trait can only be implemented by `f32` and `f64`. -pub trait NdFloat : - Float + - AddAssign + SubAssign + MulAssign + DivAssign + RemAssign + - fmt::Display + fmt::Debug + fmt::LowerExp + fmt::UpperExp + - ScalarOperand + LinalgScalar + Send + Sync -{ } +pub trait NdFloat: + Float + + AddAssign + + SubAssign + + MulAssign + + DivAssign + + RemAssign + + fmt::Display + + fmt::Debug + + fmt::LowerExp + + fmt::UpperExp + + ScalarOperand + + LinalgScalar + + Send + + Sync +{ +} -impl NdFloat for f32 { } -impl NdFloat for f64 { } +impl NdFloat for f32 {} +impl NdFloat for f64 {} diff --git a/src/linspace.rs b/src/linspace.rs index 51ede80a7..11cc1fba0 100644 --- a/src/linspace.rs +++ b/src/linspace.rs @@ -18,7 +18,8 @@ pub struct Linspace { } impl Iterator for Linspace - where F: Float, +where + F: Float, { type Item = F; @@ -42,7 +43,8 @@ impl Iterator for Linspace } impl DoubleEndedIterator for Linspace - where F: Float, +where + F: Float, { #[inline] fn next_back(&mut self) -> Option { @@ -57,9 +59,7 @@ impl DoubleEndedIterator for Linspace } } -impl ExactSizeIterator for Linspace - where Linspace: Iterator -{} +impl ExactSizeIterator for Linspace where Linspace: Iterator {} /// Return an iterator of evenly spaced floats. /// @@ -70,7 +70,8 @@ impl ExactSizeIterator for Linspace /// either `f32` or `f64`. #[inline] pub fn linspace(a: F, b: F, n: usize) -> Linspace - where F: Float +where + F: Float, { let step = if n > 1 { let nf: F = F::from(n).unwrap(); @@ -95,7 +96,8 @@ pub fn linspace(a: F, b: F, n: usize) -> Linspace /// either `f32` or `f64`. #[inline] pub fn range(a: F, b: F, step: F) -> Linspace - where F: Float +where + F: Float, { let len = b - a; let steps = F::ceil(len / step); diff --git a/src/macro_utils.rs b/src/macro_utils.rs index 2806afba3..85b5c260e 100644 --- a/src/macro_utils.rs +++ b/src/macro_utils.rs @@ -1,4 +1,3 @@ - /// Derive Copy and Clone using the parameters (and bounds) as specified in [] macro_rules! copy_and_clone { ([$($parm:tt)*] $type_:ty) => { @@ -44,7 +43,9 @@ macro_rules! ndassert { #[cfg(not(debug_assertions))] macro_rules! ndassert { - ($e:expr, $($_ignore:tt)*) => { assert!($e) } + ($e:expr, $($_ignore:tt)*) => { + assert!($e) + }; } macro_rules! expand_if { @@ -61,14 +62,16 @@ macro_rules! expand_if { macro_rules! debug_bounds_check { ($self_:ident, $index:expr) => { if let None = $index.index_checked(&$self_.dim, &$self_.strides) { - panic!("ndarray: index {:?} is out of bounds for array of shape {:?}", - $index, $self_.shape()); + panic!( + "ndarray: index {:?} is out of bounds for array of shape {:?}", + $index, + $self_.shape() + ); } }; } #[cfg(not(debug_assertions))] macro_rules! debug_bounds_check { - ($self_:ident, $index:expr) => { }; + ($self_:ident, $index:expr) => {}; } - diff --git a/src/numeric/impl_numeric.rs b/src/numeric/impl_numeric.rs index f8c1f04a3..26816a827 100644 --- a/src/numeric/impl_numeric.rs +++ b/src/numeric/impl_numeric.rs @@ -6,9 +6,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::ops::{Add, Div, Mul}; -use num_traits::{self, Zero, Float, FromPrimitive}; use itertools::free::enumerate; +use num_traits::{self, Float, FromPrimitive, Zero}; +use std::ops::{Add, Div, Mul}; use crate::imp_prelude::*; use crate::numeric_util; @@ -17,8 +17,9 @@ use crate::{FoldWhile, Zip}; /// # Numerical Methods for Arrays impl ArrayBase - where S: Data, - D: Dimension, +where + S: Data, + D: Dimension, { /// Return the sum of all elements in the array. /// @@ -30,7 +31,8 @@ impl ArrayBase /// assert_eq!(a.sum(), 10.); /// ``` pub fn sum(&self) -> A - where A: Clone + Add + num_traits::Zero, + where + A: Clone + Add + num_traits::Zero, { if let Some(slc) = self.as_slice_memory_order() { return numeric_util::unrolled_fold(slc, A::zero, A::add); @@ -52,7 +54,8 @@ impl ArrayBase /// next version.* // #[deprecated(note="renamed to `sum`", since="0.13")] pub fn scalar_sum(&self) -> A - where A: Clone + Add + num_traits::Zero, + where + A: Clone + Add + num_traits::Zero, { self.sum() } @@ -67,7 +70,8 @@ impl ArrayBase /// assert_eq!(a.product(), 24.); /// ``` pub fn product(&self) -> A - where A: Clone + Mul + num_traits::One, + where + A: Clone + Mul + num_traits::One, { if let Some(slc) = self.as_slice_memory_order() { return numeric_util::unrolled_fold(slc, A::one, A::mul); @@ -100,8 +104,9 @@ impl ArrayBase /// /// **Panics** if `axis` is out of bounds. pub fn sum_axis(&self, axis: Axis) -> Array - where A: Clone + Zero + Add, - D: RemoveAxis, + where + A: Clone + Zero + Add, + D: RemoveAxis, { let n = self.len_of(axis); let mut res = Array::zeros(self.raw_dim().remove_axis(axis)); @@ -140,10 +145,12 @@ impl ArrayBase /// ); /// ``` pub fn mean_axis(&self, axis: Axis) -> Array - where A: Clone + Zero + FromPrimitive + Add + Div, - D: RemoveAxis, + where + A: Clone + Zero + FromPrimitive + Add + Div, + D: RemoveAxis, { - let n = A::from_usize(self.len_of(axis)).expect("Converting axis length to `A` must not fail."); + let n = + A::from_usize(self.len_of(axis)).expect("Converting axis length to `A` must not fail."); let sum = self.sum_axis(axis); sum / &aview0(&n) } @@ -273,9 +280,10 @@ impl ArrayBase /// /// **Panics** if broadcasting to the same shape isn’t possible. pub fn all_close(&self, rhs: &ArrayBase, tol: A) -> bool - where A: Float, - S2: Data, - E: Dimension, + where + A: Float, + S2: Data, + E: Dimension, { !Zip::from(self) .and(rhs.broadcast_unwrap(self.raw_dim())) @@ -285,7 +293,7 @@ impl ArrayBase } else { FoldWhile::Done(()) } - }).is_done() + }) + .is_done() } } - diff --git a/src/numeric/mod.rs b/src/numeric/mod.rs index 60c5e39f6..b3da06746 100644 --- a/src/numeric/mod.rs +++ b/src/numeric/mod.rs @@ -1,3 +1 @@ - mod impl_numeric; - diff --git a/src/numeric_util.rs b/src/numeric_util.rs index b36d11605..fc066da13 100644 --- a/src/numeric_util.rs +++ b/src/numeric_util.rs @@ -11,17 +11,24 @@ use crate::LinalgScalar; /// Fold over the manually unrolled `xs` with `f` pub fn unrolled_fold(mut xs: &[A], init: I, f: F) -> A - where A: Clone, +where + A: Clone, I: Fn() -> A, F: Fn(A, A) -> A, { // eightfold unrolled so that floating point can be vectorized // (even with strict floating point accuracy semantics) let mut acc = init(); - let (mut p0, mut p1, mut p2, mut p3, - mut p4, mut p5, mut p6, mut p7) = - (init(), init(), init(), init(), - init(), init(), init(), init()); + let (mut p0, mut p1, mut p2, mut p3, mut p4, mut p5, mut p6, mut p7) = ( + init(), + init(), + init(), + init(), + init(), + init(), + init(), + init(), + ); while xs.len() >= 8 { p0 = f(p0, xs[0].clone()); p1 = f(p1, xs[1].clone()); @@ -42,7 +49,9 @@ pub fn unrolled_fold(mut xs: &[A], init: I, f: F) -> A // make it clear to the optimizer that this loop is short // and can not be autovectorized. for i in 0..xs.len() { - if i >= 7 { break; } + if i >= 7 { + break; + } acc = f(acc.clone(), xs[i].clone()) } acc @@ -52,7 +61,8 @@ pub fn unrolled_fold(mut xs: &[A], init: I, f: F) -> A /// /// `xs` and `ys` must be the same length pub fn unrolled_dot
(xs: &[A], ys: &[A]) -> A - where A: LinalgScalar, +where + A: LinalgScalar, { debug_assert_eq!(xs.len(), ys.len()); // eightfold unrolled so that floating point can be vectorized @@ -61,10 +71,16 @@ pub fn unrolled_dot(xs: &[A], ys: &[A]) -> A let mut xs = &xs[..len]; let mut ys = &ys[..len]; let mut sum = A::zero(); - let (mut p0, mut p1, mut p2, mut p3, - mut p4, mut p5, mut p6, mut p7) = - (A::zero(), A::zero(), A::zero(), A::zero(), - A::zero(), A::zero(), A::zero(), A::zero()); + let (mut p0, mut p1, mut p2, mut p3, mut p4, mut p5, mut p6, mut p7) = ( + A::zero(), + A::zero(), + A::zero(), + A::zero(), + A::zero(), + A::zero(), + A::zero(), + A::zero(), + ); while xs.len() >= 8 { p0 = p0 + xs[0] * ys[0]; p1 = p1 + xs[1] * ys[1]; @@ -84,7 +100,9 @@ pub fn unrolled_dot(xs: &[A], ys: &[A]) -> A sum = sum + (p3 + p7); for i in 0..xs.len() { - if i >= 7 { break; } + if i >= 7 { + break; + } unsafe { // get_unchecked is needed to avoid the bounds check sum = sum + xs[i] * *ys.get_unchecked(i); @@ -97,7 +115,8 @@ pub fn unrolled_dot(xs: &[A], ys: &[A]) -> A /// /// `xs` and `ys` must be the same length pub fn unrolled_eq(xs: &[A], ys: &[A]) -> bool - where A: PartialEq +where + A: PartialEq, { debug_assert_eq!(xs.len(), ys.len()); // eightfold unrolled for performance (this is not done by llvm automatically) @@ -107,13 +126,16 @@ pub fn unrolled_eq(xs: &[A], ys: &[A]) -> bool while xs.len() >= 8 { if (xs[0] != ys[0]) - | (xs[1] != ys[1]) - | (xs[2] != ys[2]) - | (xs[3] != ys[3]) - | (xs[4] != ys[4]) - | (xs[5] != ys[5]) - | (xs[6] != ys[6]) - | (xs[7] != ys[7]) { return false; } + | (xs[1] != ys[1]) + | (xs[2] != ys[2]) + | (xs[3] != ys[3]) + | (xs[4] != ys[4]) + | (xs[5] != ys[5]) + | (xs[6] != ys[6]) + | (xs[7] != ys[7]) + { + return false; + } xs = &xs[8..]; ys = &ys[8..]; } diff --git a/src/parallel/impl_par_methods.rs b/src/parallel/impl_par_methods.rs index b0d4f823d..88fe769bf 100644 --- a/src/parallel/impl_par_methods.rs +++ b/src/parallel/impl_par_methods.rs @@ -1,22 +1,15 @@ - -use crate::{ - Dimension, - NdProducer, - Zip, - ArrayBase, - DataMut, -}; +use crate::{ArrayBase, DataMut, Dimension, NdProducer, Zip}; use crate::parallel::prelude::*; - /// # Parallel methods /// /// These methods require crate feature `rayon`. impl ArrayBase - where S: DataMut, - D: Dimension, - A: Send + Sync, +where + S: DataMut, + D: Dimension, + A: Send + Sync, { /// Parallel version of `map_inplace`. /// @@ -24,7 +17,8 @@ impl ArrayBase /// /// Elements are visited in arbitrary order. pub fn par_map_inplace(&mut self, f: F) - where F: Fn(&mut A) + Sync + Send + where + F: Fn(&mut A) + Sync + Send, { self.view_mut().into_par_iter().for_each(f) } @@ -36,17 +30,16 @@ impl ArrayBase /// /// Elements are visited in arbitrary order. pub fn par_mapv_inplace(&mut self, f: F) - where F: Fn(A) -> A + Sync + Send, - A: Clone, + where + F: Fn(A) -> A + Sync + Send, + A: Clone, { - self.view_mut().into_par_iter() + self.view_mut() + .into_par_iter() .for_each(move |x| *x = f(x.clone())) } } - - - // Zip macro_rules! zip_impl { @@ -75,7 +68,7 @@ macro_rules! zip_impl { } } -zip_impl!{ +zip_impl! { [P1], [P1 P2], [P1 P2 P3], diff --git a/src/parallel/into_impls.rs b/src/parallel/into_impls.rs index 3bb5d69aa..c1a5388fd 100644 --- a/src/parallel/into_impls.rs +++ b/src/parallel/into_impls.rs @@ -1,12 +1,13 @@ -use crate::{Array, ArcArray, Dimension, ArrayView, ArrayViewMut}; +use crate::{ArcArray, Array, ArrayView, ArrayViewMut, Dimension}; use super::prelude::IntoParallelIterator; use super::Parallel; /// Requires crate feature `rayon`. impl<'a, A, D> IntoParallelIterator for &'a Array - where D: Dimension, - A: Sync +where + D: Dimension, + A: Sync, { type Item = &'a A; type Iter = Parallel>; @@ -18,8 +19,9 @@ impl<'a, A, D> IntoParallelIterator for &'a Array // This is allowed: goes through `.view()` /// Requires crate feature `rayon`. impl<'a, A, D> IntoParallelIterator for &'a ArcArray - where D: Dimension, - A: Sync +where + D: Dimension, + A: Sync, { type Item = &'a A; type Iter = Parallel>; @@ -30,8 +32,9 @@ impl<'a, A, D> IntoParallelIterator for &'a ArcArray /// Requires crate feature `rayon`. impl<'a, A, D> IntoParallelIterator for &'a mut Array - where D: Dimension, - A: Sync + Send +where + D: Dimension, + A: Sync + Send, { type Item = &'a mut A; type Iter = Parallel>; @@ -43,8 +46,9 @@ impl<'a, A, D> IntoParallelIterator for &'a mut Array // This is allowed: goes through `.view_mut()`, which is unique access /// Requires crate feature `rayon`. impl<'a, A, D> IntoParallelIterator for &'a mut ArcArray - where D: Dimension, - A: Sync + Send + Clone, +where + D: Dimension, + A: Sync + Send + Clone, { type Item = &'a mut A; type Iter = Parallel>; diff --git a/src/parallel/mod.rs b/src/parallel/mod.rs index 6bb93901d..fb9738817 100644 --- a/src/parallel/mod.rs +++ b/src/parallel/mod.rs @@ -103,12 +103,13 @@ //! } //! ``` - /// Into- traits for creating parallelized iterators and/or using [`par_azip!`] pub mod prelude { #[doc(no_inline)] - pub use rayon::prelude::{ParallelIterator, IndexedParallelIterator, - IntoParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator}; + pub use rayon::prelude::{ + IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, + IntoParallelRefMutIterator, ParallelIterator, + }; pub use super::par_azip; } @@ -116,7 +117,7 @@ pub mod prelude { pub use self::par::Parallel; pub use crate::par_azip; -mod par; mod impl_par_methods; mod into_impls; +mod par; mod zipmacro; diff --git a/src/parallel/par.rs b/src/parallel/par.rs index 78714935d..bfa7522ad 100644 --- a/src/parallel/par.rs +++ b/src/parallel/par.rs @@ -1,18 +1,17 @@ - -use rayon::iter::ParallelIterator; -use rayon::prelude::IntoParallelIterator; -use rayon::iter::IndexedParallelIterator; -use rayon::iter::plumbing::{Consumer, UnindexedConsumer}; use rayon::iter::plumbing::bridge; -use rayon::iter::plumbing::ProducerCallback; -use rayon::iter::plumbing::Producer; -use rayon::iter::plumbing::UnindexedProducer; use rayon::iter::plumbing::bridge_unindexed; use rayon::iter::plumbing::Folder; +use rayon::iter::plumbing::Producer; +use rayon::iter::plumbing::ProducerCallback; +use rayon::iter::plumbing::UnindexedProducer; +use rayon::iter::plumbing::{Consumer, UnindexedConsumer}; +use rayon::iter::IndexedParallelIterator; +use rayon::iter::ParallelIterator; +use rayon::prelude::IntoParallelIterator; use crate::iter::AxisIter; use crate::iter::AxisIterMut; -use crate::{Dimension}; +use crate::Dimension; use crate::{ArrayView, ArrayViewMut}; /// Parallel iterator wrapper. @@ -111,12 +110,9 @@ macro_rules! par_iter_wrapper { } } - par_iter_wrapper!(AxisIter, [Sync]); par_iter_wrapper!(AxisIterMut, [Send + Sync]); - - macro_rules! par_iter_view_wrapper { // thread_bounds are either Sync or Send + Sync ($view_name:ident, [$($thread_bounds:tt)*]) => { @@ -191,8 +187,7 @@ macro_rules! par_iter_view_wrapper { par_iter_view_wrapper!(ArrayView, [Sync]); par_iter_view_wrapper!(ArrayViewMut, [Sync + Send]); - -use crate::{Zip, NdProducer, FoldWhile}; +use crate::{FoldWhile, NdProducer, Zip}; macro_rules! zip_impl { ($([$($p:ident)*],)+) => { @@ -268,7 +263,7 @@ macro_rules! zip_impl { } } -zip_impl!{ +zip_impl! { [P1], [P1 P2], [P1 P2 P3], diff --git a/src/prelude.rs b/src/prelude.rs index 80c98e34c..8527f77c1 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -21,53 +21,36 @@ #[doc(no_inline)] #[allow(deprecated)] pub use crate::{ - ArrayBase, - Array, - ArcArray, - RcArray, - ArrayView, - ArrayViewMut, - RawArrayView, - RawArrayViewMut, + ArcArray, Array, ArrayBase, ArrayView, ArrayViewMut, RawArrayView, RawArrayViewMut, RcArray, }; #[doc(no_inline)] -pub use crate::{ - Axis, - Dim, - Dimension, -}; +pub use crate::{Axis, Dim, Dimension}; #[doc(no_inline)] pub use crate::{Array0, Array1, Array2, Array3, Array4, Array5, Array6, ArrayD}; #[doc(no_inline)] -pub use crate::{ArrayView0, ArrayView1, ArrayView2, ArrayView3, ArrayView4, ArrayView5, -ArrayView6, ArrayViewD}; +pub use crate::{ + ArrayView0, ArrayView1, ArrayView2, ArrayView3, ArrayView4, ArrayView5, ArrayView6, ArrayViewD, +}; #[doc(no_inline)] -pub use crate::{ArrayViewMut0, ArrayViewMut1, ArrayViewMut2, ArrayViewMut3, -ArrayViewMut4, ArrayViewMut5, ArrayViewMut6, ArrayViewMutD}; +pub use crate::{ + ArrayViewMut0, ArrayViewMut1, ArrayViewMut2, ArrayViewMut3, ArrayViewMut4, ArrayViewMut5, + ArrayViewMut6, ArrayViewMutD, +}; #[doc(no_inline)] pub use crate::{Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6, IxDyn}; #[doc(no_inline)] -pub use crate::{ - arr0, arr1, arr2, - aview0, aview1, aview2, - aview_mut1, -}; +pub use crate::{arr0, arr1, arr2, aview0, aview1, aview2, aview_mut1}; pub use crate::{array, azip, s}; #[doc(no_inline)] -pub use crate::{ - ShapeBuilder, -}; +pub use crate::ShapeBuilder; #[doc(no_inline)] -pub use crate::{ - NdFloat, - AsArray, -}; +pub use crate::{AsArray, NdFloat}; diff --git a/src/private.rs b/src/private.rs index bbbe070bc..ea13164e4 100644 --- a/src/private.rs +++ b/src/private.rs @@ -3,7 +3,6 @@ //! can feel free to extend those traits without worrying about it //! being a breaking change for other implementations. - /// If this type is pub but not publicly reachable, third parties /// can't name it and can't implement traits using it. pub struct PrivateMarker; diff --git a/src/shape_builder.rs b/src/shape_builder.rs index 6748be164..bb5a949ab 100644 --- a/src/shape_builder.rs +++ b/src/shape_builder.rs @@ -1,7 +1,6 @@ - +use crate::dimension::IntoDimension; use crate::Dimension; use crate::{Shape, StrideShape}; -use crate::dimension::IntoDimension; /// A trait for `Shape` and `D where D: Dimension` that allows /// customizing the memory layout (strides) of an array shape. @@ -19,7 +18,8 @@ pub trait ShapeBuilder { } impl From for Shape - where D: Dimension, +where + D: Dimension, { /// Create a `Shape` from `dimension`, using the default memory layout. fn from(dimension: D) -> Shape { @@ -28,13 +28,18 @@ impl From for Shape } impl From for StrideShape - where D: Dimension, - T: ShapeBuilder, +where + D: Dimension, + T: ShapeBuilder, { fn from(value: T) -> Self { let shape = value.into_shape(); let d = shape.dim; - let st = if shape.is_c { d.default_strides() } else { d.fortran_strides() }; + let st = if shape.is_c { + d.default_strides() + } else { + d.fortran_strides() + }; StrideShape { strides: st, dim: d, @@ -60,7 +65,8 @@ impl From> for StrideShape */ impl ShapeBuilder for T - where T: IntoDimension +where + T: IntoDimension, { type Dim = T::Dim; type Strides = T; @@ -70,7 +76,9 @@ impl ShapeBuilder for T is_c: true, } } - fn f(self) -> Shape { self.set_f(true) } + fn f(self) -> Shape { + self.set_f(true) + } fn set_f(self, is_f: bool) -> Shape { self.into_shape().set_f(is_f) } @@ -80,12 +88,17 @@ impl ShapeBuilder for T } impl ShapeBuilder for Shape - where D: Dimension +where + D: Dimension, { type Dim = D; type Strides = D; - fn into_shape(self) -> Shape { self } - fn f(self) -> Self { self.set_f(true) } + fn into_shape(self) -> Shape { + self + } + fn f(self) -> Self { + self.set_f(true) + } fn set_f(mut self, is_f: bool) -> Self { self.is_c = !is_f; self @@ -99,12 +112,14 @@ impl ShapeBuilder for Shape } } - impl Shape - where D: Dimension, +where + D: Dimension, { // Return a reference to the dimension //pub fn dimension(&self) -> &D { &self.dim } /// Return the size of the shape in number of elements - pub fn size(&self) -> usize { self.dim.size() } + pub fn size(&self) -> usize { + self.dim.size() + } } diff --git a/src/slice.rs b/src/slice.rs index cb86900ba..d88378b60 100644 --- a/src/slice.rs +++ b/src/slice.rs @@ -5,11 +5,11 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::error::{ShapeError, ErrorKind}; -use std::ops::{Deref, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}; +use crate::error::{ErrorKind, ShapeError}; +use crate::{ArrayView, ArrayViewMut, Dimension, RawArrayViewMut}; use std::fmt; use std::marker::PhantomData; -use crate::{ArrayView, ArrayViewMut, Dimension, RawArrayViewMut}; +use std::ops::{Deref, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}; /// A slice (range with step size). /// @@ -48,11 +48,7 @@ impl Slice { /// (This method checks with a debug assertion that `step` is not zero.) pub fn new(start: isize, end: Option, step: isize) -> Slice { debug_assert_ne!(step, 0, "Slice::new: step must be nonzero"); - Slice { - start, - end, - step, - } + Slice { start, end, step } } /// Create a new `Slice` with the given step size (multiplied with the @@ -63,7 +59,10 @@ impl Slice { #[inline] pub fn step_by(self, step: isize) -> Self { debug_assert_ne!(step, 0, "Slice::step_by: step must be nonzero"); - Slice { step: self.step * step, ..self } + Slice { + step: self.step * step, + ..self + } } } @@ -105,7 +104,7 @@ pub enum SliceOrIndex { Index(isize), } -copy_and_clone!{SliceOrIndex} +copy_and_clone! {SliceOrIndex} impl SliceOrIndex { /// Returns `true` if `self` is a `Slice` value. @@ -414,7 +413,6 @@ where } } - #[doc(hidden)] pub trait SliceNextDim { fn next_dim(&self, _: PhantomData) -> PhantomData; @@ -427,7 +425,7 @@ macro_rules! impl_slicenextdim_equal { PhantomData } } - } + }; } impl_slicenextdim_equal!(isize); impl_slicenextdim_equal!(usize); @@ -627,7 +625,7 @@ macro_rules! s( /// Returns a ZST representing the lifetime of the mutable view. #[doc(hidden)] pub fn life_of_view_mut<'a, A, D: Dimension>( - _view: &ArrayViewMut<'a, A, D> + _view: &ArrayViewMut<'a, A, D>, ) -> PhantomData<&'a mut A> { PhantomData } diff --git a/src/stacking.rs b/src/stacking.rs index a2dc55852..c8bdb67d0 100644 --- a/src/stacking.rs +++ b/src/stacking.rs @@ -6,8 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use crate::error::{from_kind, ErrorKind, ShapeError}; use crate::imp_prelude::*; -use crate::error::{ShapeError, ErrorKind, from_kind}; /// Stack arrays along the given axis. /// @@ -29,10 +29,13 @@ use crate::error::{ShapeError, ErrorKind, from_kind}; /// [3., 3.]])) /// ); /// ``` -pub fn stack<'a, A, D>(axis: Axis, arrays: &[ArrayView<'a, A, D>]) - -> Result, ShapeError> - where A: Copy, - D: RemoveAxis +pub fn stack<'a, A, D>( + axis: Axis, + arrays: &[ArrayView<'a, A, D>], +) -> Result, ShapeError> +where + A: Copy, + D: RemoveAxis, { if arrays.len() == 0 { return Err(from_kind(ErrorKind::Unsupported)); @@ -42,7 +45,10 @@ pub fn stack<'a, A, D>(axis: Axis, arrays: &[ArrayView<'a, A, D>]) return Err(from_kind(ErrorKind::OutOfBounds)); } let common_dim = res_dim.remove_axis(axis); - if arrays.iter().any(|a| a.raw_dim().remove_axis(axis) != common_dim) { + if arrays + .iter() + .any(|a| a.raw_dim().remove_axis(axis) != common_dim) + { return Err(from_kind(ErrorKind::IncompatibleShape)); } diff --git a/src/zip/mod.rs b/src/zip/mod.rs index b26e2aebb..f40f18031 100644 --- a/src/zip/mod.rs +++ b/src/zip/mod.rs @@ -11,12 +11,12 @@ mod zipmacro; use crate::imp_prelude::*; use crate::IntoDimension; -use crate::NdIndex; use crate::Layout; +use crate::NdIndex; -use crate::layout::{CORDER, FORDER}; +use crate::indexes::{indices, Indices}; use crate::layout::LayoutPriv; -use crate::indexes::{Indices, indices}; +use crate::layout::{CORDER, FORDER}; /// Return if the expression is a break value. macro_rules! fold_while { @@ -25,29 +25,30 @@ macro_rules! fold_while { FoldWhile::Continue(x) => x, x => return x, } - } + }; } - /// Broadcast an array so that it acts like a larger size and/or shape array. /// /// See [broadcasting][1] for more information. /// /// [1]: struct.ArrayBase.html#broadcasting trait Broadcast - where E: IntoDimension, +where + E: IntoDimension, { - type Output: NdProducer; + type Output: NdProducer; /// Broadcast the array to the new dimensions `shape`. /// /// ***Panics*** if broadcasting isn’t possible. fn broadcast_unwrap(self, shape: E) -> Self::Output; - private_decl!{} + private_decl! {} } impl ArrayBase - where S: Data, - D: Dimension, +where + S: Data, + D: Dimension, { pub(crate) fn layout_impl(&self) -> Layout { Layout::new(if self.is_standard_layout() { @@ -65,25 +66,25 @@ impl ArrayBase } impl<'a, A, D, E> Broadcast for ArrayView<'a, A, D> - where E: IntoDimension, - D: Dimension, +where + E: IntoDimension, + D: Dimension, { type Output = ArrayView<'a, A, E::Dim>; fn broadcast_unwrap(self, shape: E) -> Self::Output { let res: ArrayView = (&self).broadcast_unwrap(shape.into_dimension()); - unsafe { - ArrayView::new_(res.ptr, res.dim, res.strides) - } + unsafe { ArrayView::new_(res.ptr, res.dim, res.strides) } } - private_impl!{} + private_impl! {} } -pub trait Splittable : Sized { +pub trait Splittable: Sized { fn split_at(self, axis: Axis, index: Ix) -> (Self, Self); } impl Splittable for D - where D: Dimension, +where + D: Dimension, { fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { let mut d1 = self; @@ -106,16 +107,21 @@ pub trait IntoNdProducer { type Item; /// Dimension type of the producer type Dim: Dimension; - type Output: NdProducer; + type Output: NdProducer; /// Convert the value into an `NdProducer`. fn into_producer(self) -> Self::Output; } -impl

IntoNdProducer for P where P: NdProducer { +impl

IntoNdProducer for P +where + P: NdProducer, +{ type Item = P::Item; type Dim = P::Dim; type Output = Self; - fn into_producer(self) -> Self::Output { self } + fn into_producer(self) -> Self::Output { + self + } } /// A producer of an n-dimensional set of elements; @@ -150,7 +156,7 @@ pub trait NdProducer { // stride (= along a particular axis) #[doc(hidden)] /// Pointer or stand-in for pointer - type Ptr: Offset; + type Ptr: Offset; #[doc(hidden)] /// Pointer stride type Stride: Copy; @@ -175,14 +181,16 @@ pub trait NdProducer { #[inline(always)] fn contiguous_stride(&self) -> Self::Stride; #[doc(hidden)] - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) where Self: Sized; - private_decl!{} + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + where + Self: Sized; + private_decl! {} } -pub trait Offset : Copy { +pub trait Offset: Copy { type Stride: Copy; unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self; - private_decl!{} + private_decl! {} } impl Offset for *mut T { @@ -190,12 +198,12 @@ impl Offset for *mut T { unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self { self.offset(s * (index as isize)) } - private_impl!{} + private_impl! {} } -trait ZippableTuple : Sized { +trait ZippableTuple: Sized { type Item; - type Ptr: OffsetTuple + Copy; + type Ptr: OffsetTuple + Copy; type Dim: Dimension; type Stride: Copy; fn as_ptr(&self) -> Self::Ptr; @@ -209,8 +217,9 @@ trait ZippableTuple : Sized { /// An array reference is an n-dimensional producer of element references /// (like ArrayView). impl<'a, A: 'a, S, D> IntoNdProducer for &'a ArrayBase - where D: Dimension, - S: Data, +where + D: Dimension, + S: Data, { type Item = &'a A; type Dim = D; @@ -223,8 +232,9 @@ impl<'a, A: 'a, S, D> IntoNdProducer for &'a ArrayBase /// A mutable array reference is an n-dimensional producer of mutable element /// references (like ArrayViewMut). impl<'a, A: 'a, S, D> IntoNdProducer for &'a mut ArrayBase - where D: Dimension, - S: DataMut, +where + D: Dimension, + S: DataMut, { type Item = &'a mut A; type Dim = D; @@ -274,14 +284,13 @@ impl<'a, A: 'a> IntoNdProducer for &'a mut Vec { } } -impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> -{ +impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> { type Item = &'a A; type Dim = D; type Ptr = *mut A; type Stride = isize; - private_impl!{} + private_impl! {} #[doc(hidden)] fn raw_dim(&self) -> Self::Dim { self.raw_dim() @@ -318,7 +327,9 @@ impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { 1 } + fn contiguous_stride(&self) -> Self::Stride { + 1 + } #[doc(hidden)] fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { @@ -332,7 +343,7 @@ impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> { type Ptr = *mut A; type Stride = isize; - private_impl!{} + private_impl! {} #[doc(hidden)] fn raw_dim(&self) -> Self::Dim { self.raw_dim() @@ -369,7 +380,9 @@ impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> { } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { 1 } + fn contiguous_stride(&self) -> Self::Stride { + 1 + } #[doc(hidden)] fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { @@ -377,7 +390,6 @@ impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> { } } - /// Lock step function application across several arrays or other producers. /// /// Zip allows matching several producers to each other elementwise and applying @@ -453,29 +465,32 @@ pub struct Zip { layout: Layout, } -impl Zip<(P, ), D> - where D: Dimension, - P: NdProducer +impl Zip<(P,), D> +where + D: Dimension, + P: NdProducer, { /// Create a new `Zip` from the input array or other producer `p`. /// /// The Zip will take the exact dimension of `p` and all inputs /// must have the same dimensions (or be broadcast to them). pub fn from(p: IP) -> Self - where IP: IntoNdProducer + where + IP: IntoNdProducer, { let array = p.into_producer(); let dim = array.raw_dim(); Zip { dimension: dim, layout: array.layout(), - parts: (array, ), + parts: (array,), } } } impl Zip<(Indices, P), D> - where D: Dimension + Copy, - P: NdProducer, +where + D: Dimension + Copy, + P: NdProducer, { /// Create a new `Zip` with an index producer and the producer `p`. /// @@ -484,7 +499,8 @@ impl Zip<(Indices, P), D> /// /// *Note:* Indexed zip has overhead. pub fn indexed(p: IP) -> Self - where IP: IntoNdProducer + where + IP: IntoNdProducer, { let array = p.into_producer(); let dim = array.raw_dim(); @@ -493,15 +509,19 @@ impl Zip<(Indices, P), D> } impl Zip - where D: Dimension, +where + D: Dimension, { - fn check

(&self, part: &P) - where P: NdProducer + where + P: NdProducer, { - ndassert!(part.equal_dim(&self.dimension), + ndassert!( + part.equal_dim(&self.dimension), "Zip: Producer dimension mismatch, expected: {:?}, got: {:?}", - self.dimension, part.raw_dim()); + self.dimension, + part.raw_dim() + ); } /// Return a the number of element tuples in the Zip @@ -521,23 +541,32 @@ impl Zip /// others. fn max_stride_axis(&self) -> Axis { let i = match self.layout.flag() { - FORDER => self.dimension.slice().iter() - .rposition(|&len| len > 1).unwrap_or(self.dimension.ndim() - 1), + FORDER => self + .dimension + .slice() + .iter() + .rposition(|&len| len > 1) + .unwrap_or(self.dimension.ndim() - 1), /* corder or default */ - _ => self.dimension.slice().iter() - .position(|&len| len > 1).unwrap_or(0), + _ => self + .dimension + .slice() + .iter() + .position(|&len| len > 1) + .unwrap_or(0), }; Axis(i) } - } impl Zip - where D: Dimension, +where + D: Dimension, { fn apply_core(&mut self, acc: Acc, function: F) -> FoldWhile - where F: FnMut(Acc, P::Item) -> FoldWhile, - P: ZippableTuple, + where + F: FnMut(Acc, P::Item) -> FoldWhile, + P: ZippableTuple, { if self.layout.is(CORDER | FORDER) { self.apply_core_contiguous(acc, function) @@ -546,8 +575,9 @@ impl Zip } } fn apply_core_contiguous(&mut self, mut acc: Acc, mut function: F) -> FoldWhile - where F: FnMut(Acc, P::Item) -> FoldWhile, - P: ZippableTuple, + where + F: FnMut(Acc, P::Item) -> FoldWhile, + P: ZippableTuple, { debug_assert!(self.layout.is(CORDER | FORDER)); let size = self.dimension.size(); @@ -563,8 +593,9 @@ impl Zip } fn apply_core_strided(&mut self, mut acc: Acc, mut function: F) -> FoldWhile - where F: FnMut(Acc, P::Item) -> FoldWhile, - P: ZippableTuple, + where + F: FnMut(Acc, P::Item) -> FoldWhile, + P: ZippableTuple, { let n = self.dimension.ndim(); if n == 0 { @@ -607,7 +638,6 @@ impl Offset for *mut T { } */ - trait OffsetTuple { type Args; unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self; @@ -636,7 +666,7 @@ macro_rules! offset_impl { } } -offset_impl!{ +offset_impl! { [A ][ a], [A B][ a b], [A B C][ a b c], @@ -695,7 +725,7 @@ macro_rules! zipt_impl { } } -zipt_impl!{ +zipt_impl! { [A ][ a], [A B][ a b], [A B C][ a b c], @@ -805,7 +835,7 @@ macro_rules! map_impl { } } -map_impl!{ +map_impl! { [true P1], [true P1 P2], [true P1 P2 P3], @@ -827,7 +857,7 @@ impl FoldWhile { /// Return the inner value pub fn into_inner(self) -> T { match self { - FoldWhile::Continue(x) | FoldWhile::Done(x) => x + FoldWhile::Continue(x) | FoldWhile::Done(x) => x, } } diff --git a/src/zip/zipmacro.rs b/src/zip/zipmacro.rs index c836ed52f..55e83027f 100644 --- a/src/zip/zipmacro.rs +++ b/src/zip/zipmacro.rs @@ -1,4 +1,3 @@ - #[macro_export] /// Array zip macro: lock step function application across several arrays and /// producers. @@ -165,4 +164,3 @@ macro_rules! azip { $crate::azip!(@parse [] [] $($t)*); } } - diff --git a/tests/array-construct.rs b/tests/array-construct.rs index 7e320c314..ae7c711d9 100644 --- a/tests/array-construct.rs +++ b/tests/array-construct.rs @@ -7,8 +7,9 @@ use ndarray::prelude::*; #[test] fn test_from_shape_fn() { let step = 3.1; - let h = Array::from_shape_fn((5, 5), - |(i, j)| f64::sin(i as f64 / step) * f64::cos(j as f64 / step)); + let h = Array::from_shape_fn((5, 5), |(i, j)| { + f64::sin(i as f64 / step) * f64::cos(j as f64 / step) + }); assert_eq!(h.shape(), &[5, 5]); } @@ -33,8 +34,8 @@ fn test_arc_into_owned() { #[test] fn test_arcarray_thread_safe() { - fn is_send(_t: &T) { } - fn is_sync(_t: &T) { } + fn is_send(_t: &T) {} + fn is_sync(_t: &T) {} let a = Array2::from_elem((5, 5), 1.).into_shared(); is_send(&a); @@ -47,7 +48,9 @@ fn test_uninit() { let mut a = Array::::uninitialized((3, 4).f()); assert_eq!(a.dim(), (3, 4)); assert_eq!(a.strides(), &[1, 3]); - let b = Array::::linspace(0., 25., a.len()).into_shape(a.dim()).unwrap(); + let b = Array::::linspace(0., 25., a.len()) + .into_shape(a.dim()) + .unwrap(); a.assign(&b); assert_eq!(&a, &b); assert_eq!(a.t(), b.t()); diff --git a/tests/array.rs b/tests/array.rs index 28e2e7fbc..0aac35fbf 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -1,19 +1,15 @@ #![allow(non_snake_case)] -extern crate ndarray; extern crate defmac; extern crate itertools; +extern crate ndarray; -use ndarray::{Slice, SliceInfo, SliceOrIndex}; -use ndarray::prelude::*; -use ndarray::{ - rcarr2, - arr3, - multislice, -}; -use ndarray::indices; use defmac::defmac; use itertools::{enumerate, zip}; +use ndarray::indices; +use ndarray::prelude::*; +use ndarray::{arr3, multislice, rcarr2}; +use ndarray::{Slice, SliceInfo, SliceOrIndex}; macro_rules! assert_panics { ($body:expr) => { @@ -30,8 +26,7 @@ macro_rules! assert_panics { } #[test] -fn test_matmul_arcarray() -{ +fn test_matmul_arcarray() { let mut A = ArcArray::::zeros((2, 3)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -47,24 +42,23 @@ fn test_matmul_arcarray() println!("B = \n{:?}", B); println!("A x B = \n{:?}", c); unsafe { - let result = ArcArray::from_shape_vec_unchecked((2, 4), vec![20, 23, 26, 29, 56, 68, 80, 92]); + let result = + ArcArray::from_shape_vec_unchecked((2, 4), vec![20, 23, 26, 29, 56, 68, 80, 92]); assert_eq!(c.shape(), result.shape()); - assert!(c.iter().zip(result.iter()).all(|(a,b)| a == b)); + assert!(c.iter().zip(result.iter()).all(|(a, b)| a == b)); assert!(c == result); } } #[allow(unused)] -fn arrayview_shrink_lifetime<'a, 'b: 'a>(view: ArrayView1<'b, f64>) - -> ArrayView1<'a, f64> -{ +fn arrayview_shrink_lifetime<'a, 'b: 'a>(view: ArrayView1<'b, f64>) -> ArrayView1<'a, f64> { view.reborrow() } #[allow(unused)] -fn arrayviewmut_shrink_lifetime<'a, 'b: 'a>(view: ArrayViewMut1<'b, f64>) - -> ArrayViewMut1<'a, f64> -{ +fn arrayviewmut_shrink_lifetime<'a, 'b: 'a>( + view: ArrayViewMut1<'b, f64>, +) -> ArrayViewMut1<'a, f64> { view.reborrow() } @@ -72,18 +66,16 @@ fn arrayviewmut_shrink_lifetime<'a, 'b: 'a>(view: ArrayViewMut1<'b, f64>) fn test_mat_mul() { // smoke test, a big matrix multiplication of uneven size let (n, m) = (45, 33); - let a = ArcArray::linspace(0., ((n * m) - 1) as f32, n as usize * m as usize ).reshape((n, m)); + let a = ArcArray::linspace(0., ((n * m) - 1) as f32, n as usize * m as usize).reshape((n, m)); let b = ArcArray::eye(m); assert_eq!(a.dot(&b), a); let c = ArcArray::eye(n); assert_eq!(c.dot(&a), a); } - #[deny(unsafe_code)] #[test] -fn test_slice() -{ +fn test_slice() { let mut A = ArcArray::::zeros((3, 4, 5)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -111,8 +103,7 @@ fn test_slice_inclusive_range() { /// `ArrayView1` and `ArrayView2`, so the compiler needs to determine which /// type is the correct result for the `.slice()` call. #[test] -fn test_slice_infer() -{ +fn test_slice_infer() { let a = array![1., 2.]; let b = array![[3., 4.], [5., 6.]]; b.slice(s![..-1, ..]).dot(&a); @@ -132,8 +123,9 @@ fn test_slice_with_many_dim() { let correct = array![ [A[&[0, 0, 0, 0, 0, 1, 0][..]], A[&[0, 0, 2, 0, 0, 1, 0][..]]], [A[&[1, 0, 0, 0, 0, 1, 0][..]], A[&[1, 0, 2, 0, 0, 1, 0][..]]] - ].into_shape(new_shape) - .unwrap(); + ] + .into_shape(new_shape) + .unwrap(); assert_eq!(vi, correct); let vi = A.slice(s![..2, 0, ..;2, 0, 0, 1, 0]); @@ -207,7 +199,8 @@ fn test_slice_array_dyn() { SliceOrIndex::from(1..), SliceOrIndex::from(1), SliceOrIndex::from(..).step_by(2), - ]).unwrap(); + ]) + .unwrap(); arr.slice(info); arr.slice_mut(info); arr.view().slice_move(info); @@ -221,7 +214,8 @@ fn test_slice_dyninput_array_dyn() { SliceOrIndex::from(1..), SliceOrIndex::from(1), SliceOrIndex::from(..).step_by(2), - ]).unwrap(); + ]) + .unwrap(); arr.slice(info); arr.slice_mut(info); arr.view().slice_move(info); @@ -235,7 +229,8 @@ fn test_slice_dyninput_vec_fixed() { SliceOrIndex::from(1..), SliceOrIndex::from(1), SliceOrIndex::from(..).step_by(2), - ]).unwrap(); + ]) + .unwrap(); arr.slice(info.as_ref()); arr.slice_mut(info.as_ref()); arr.view().slice_move(info.as_ref()); @@ -249,7 +244,8 @@ fn test_slice_dyninput_vec_dyn() { SliceOrIndex::from(1..), SliceOrIndex::from(1), SliceOrIndex::from(..).step_by(2), - ]).unwrap(); + ]) + .unwrap(); arr.slice(info.as_ref()); arr.slice_mut(info.as_ref()); arr.view().slice_move(info.as_ref()); @@ -265,24 +261,22 @@ fn test_slice_with_subview() { let vi = arr.slice(s![1.., 2, ..;2]); assert_eq!(vi.shape(), &[2, 2]); - assert!( - vi.iter() - .zip(arr.index_axis(Axis(1), 2).slice(s![1.., ..;2]).iter()) - .all(|(a, b)| a == b) - ); + assert!(vi + .iter() + .zip(arr.index_axis(Axis(1), 2).slice(s![1.., ..;2]).iter()) + .all(|(a, b)| a == b)); let vi = arr.slice(s![1, 2, ..;2]); assert_eq!(vi.shape(), &[2]); - assert!( - vi.iter() - .zip( - arr.index_axis(Axis(0), 1) - .index_axis(Axis(0), 2) - .slice(s![..;2]) - .iter() - ) - .all(|(a, b)| a == b) - ); + assert!(vi + .iter() + .zip( + arr.index_axis(Axis(0), 1) + .index_axis(Axis(0), 2) + .slice(s![..;2]) + .iter() + ) + .all(|(a, b)| a == b)); let vi = arr.slice(s![1, 2, 3]); assert_eq!(vi.shape(), &[]); @@ -300,20 +294,18 @@ fn test_slice_collapse_with_indices() { let mut vi = arr.view(); vi.slice_collapse(s![1.., 2, ..;2]); assert_eq!(vi.shape(), &[2, 1, 2]); - assert!( - vi.iter() - .zip(arr.slice(s![1.., 2..3, ..;2]).iter()) - .all(|(a, b)| a == b) - ); + assert!(vi + .iter() + .zip(arr.slice(s![1.., 2..3, ..;2]).iter()) + .all(|(a, b)| a == b)); let mut vi = arr.view(); vi.slice_collapse(s![1, 2, ..;2]); assert_eq!(vi.shape(), &[1, 1, 2]); - assert!( - vi.iter() - .zip(arr.slice(s![1..2, 2..3, ..;2]).iter()) - .all(|(a, b)| a == b) - ); + assert!(vi + .iter() + .zip(arr.slice(s![1..2, 2..3, ..;2]).iter()) + .all(|(a, b)| a == b)); let mut vi = arr.view(); vi.slice_collapse(s![1, 2, 3]); @@ -470,8 +462,7 @@ fn index_out_of_bounds() { #[should_panic] #[test] -fn slice_oob() -{ +fn slice_oob() { let a = ArcArray::::zeros((3, 4)); let _vi = a.slice(s![..10, ..]); } @@ -485,15 +476,13 @@ fn slice_axis_oob() { #[should_panic] #[test] -fn slice_wrong_dim() -{ +fn slice_wrong_dim() { let a = ArcArray::::zeros(vec![3, 4, 5]); let _vi = a.slice(s![.., ..]); } #[test] -fn test_index() -{ +fn test_index() { let mut A = ArcArray::::zeros((2, 3)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -522,8 +511,7 @@ fn test_index_arrays() { } #[test] -fn test_add() -{ +fn test_add() { let mut A = ArcArray::::zeros((2, 2)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -538,19 +526,17 @@ fn test_add() } #[test] -fn test_multidim() -{ - let mut mat = ArcArray::zeros(2*3*4*5*6).reshape((2,3,4,5,6)); - mat[(0,0,0,0,0)] = 22u8; +fn test_multidim() { + let mut mat = ArcArray::zeros(2 * 3 * 4 * 5 * 6).reshape((2, 3, 4, 5, 6)); + mat[(0, 0, 0, 0, 0)] = 22u8; { for (i, elt) in mat.iter_mut().enumerate() { *elt = i as u8; } } - assert_eq!(mat.shape(), &[2,3,4,5,6]); + assert_eq!(mat.shape(), &[2, 3, 4, 5, 6]); } - /* array([[[ 7, 6], [ 5, 4], @@ -563,8 +549,7 @@ array([[[ 7, 6], [ 9, 8]]]) */ #[test] -fn test_negative_stride_arcarray() -{ +fn test_negative_stride_arcarray() { let mut mat = ArcArray::zeros((2, 4, 2)); mat[[0, 0, 0]] = 1.0f32; for (i, elt) in mat.iter_mut().enumerate() { @@ -575,7 +560,9 @@ fn test_negative_stride_arcarray() let vi = mat.slice(s![.., ..;-1, ..;-1]); assert_eq!(vi.shape(), &[2, 4, 2]); // Test against sequential iterator - let seq = [7f32,6., 5.,4.,3.,2.,1.,0.,15.,14.,13., 12.,11., 10., 9., 8.]; + let seq = [ + 7f32, 6., 5., 4., 3., 2., 1., 0., 15., 14., 13., 12., 11., 10., 9., 8., + ]; for (a, b) in vi.clone().iter().zip(seq.iter()) { assert_eq!(*a, *b); } @@ -590,9 +577,8 @@ fn test_negative_stride_arcarray() } #[test] -fn test_cow() -{ - let mut mat = ArcArray::zeros((2,2)); +fn test_cow() { + let mut mat = ArcArray::zeros((2, 2)); mat[[0, 0]] = 1; let n = mat.clone(); mat[[0, 1]] = 2; @@ -623,8 +609,7 @@ fn test_cow() } #[test] -fn test_cow_shrink() -{ +fn test_cow_shrink() { // A test for clone-on-write in the case that // mutation shrinks the array and gives it different strides // @@ -658,14 +643,13 @@ fn test_cow_shrink() } #[test] -fn test_sub() -{ +fn test_sub() { let mat = ArcArray::linspace(0., 15., 16).reshape((2, 4, 2)); let s1 = mat.index_axis(Axis(0), 0); let s2 = mat.index_axis(Axis(0), 1); assert_eq!(s1.shape(), &[4, 2]); assert_eq!(s2.shape(), &[4, 2]); - let n = ArcArray::linspace(8., 15., 8).reshape((4,2)); + let n = ArcArray::linspace(8., 15., 8).reshape((4, 2)); assert_eq!(n, s2); let m = ArcArray::from_vec(vec![2., 3., 10., 11.]).reshape((2, 2)); assert_eq!(m, mat.index_axis(Axis(1), 1)); @@ -678,36 +662,40 @@ fn test_sub_oob_1() { mat.index_axis(Axis(0), 2); } - #[test] -fn test_select(){ +fn test_select() { // test for 2-d array - let x = arr2(&[[0., 1.], [1.,0.],[1.,0.],[1.,0.],[1.,0.],[0., 1.],[0., 1.]]); - let r = x.select(Axis(0),&[1,3,5]); - let c = x.select(Axis(1),&[1]); - let r_target = arr2(&[[1.,0.],[1.,0.],[0., 1.]]); - let c_target = arr2(&[[1.,0.,0.,0.,0., 1., 1.]]); - assert!(r.all_close(&r_target,1e-8)); - assert!(c.all_close(&c_target.t(),1e-8)); + let x = arr2(&[ + [0., 1.], + [1., 0.], + [1., 0.], + [1., 0.], + [1., 0.], + [0., 1.], + [0., 1.], + ]); + let r = x.select(Axis(0), &[1, 3, 5]); + let c = x.select(Axis(1), &[1]); + let r_target = arr2(&[[1., 0.], [1., 0.], [0., 1.]]); + let c_target = arr2(&[[1., 0., 0., 0., 0., 1., 1.]]); + assert!(r.all_close(&r_target, 1e-8)); + assert!(c.all_close(&c_target.t(), 1e-8)); // test for 3-d array - let y = arr3(&[[[1., 2., 3.], - [1.5, 1.5, 3.]], - [[1., 2., 8.], - [1., 2.5, 3.]]]); - let r = y.select(Axis(1),&[1]); - let c = y.select(Axis(2),&[1]); + let y = arr3(&[ + [[1., 2., 3.], [1.5, 1.5, 3.]], + [[1., 2., 8.], [1., 2.5, 3.]], + ]); + let r = y.select(Axis(1), &[1]); + let c = y.select(Axis(2), &[1]); let r_target = arr3(&[[[1.5, 1.5, 3.]], [[1., 2.5, 3.]]]); - let c_target = arr3(&[[[2.],[1.5]],[[2.],[2.5]]]); - assert!(r.all_close(&r_target,1e-8)); - assert!(c.all_close(&c_target,1e-8)); - + let c_target = arr3(&[[[2.], [1.5]], [[2.], [2.5]]]); + assert!(r.all_close(&r_target, 1e-8)); + assert!(c.all_close(&c_target, 1e-8)); } - #[test] -fn diag() -{ +fn diag() { let d = arr2(&[[1., 2., 3.0f32]]).into_diag(); assert_eq!(d.dim(), 1); let a = arr2(&[[1., 2., 3.0f32], [0., 0., 0.]]); @@ -731,7 +719,7 @@ fn merge_axes() { assert!(v.merge_axes(Axis($take), Axis($into))); assert_eq!(v.len_of(Axis($take)), if merged_len == 0 { 0 } else { 1 }); assert_eq!(v.len_of(Axis($into)), merged_len); - } + }; } macro_rules! assert_not_merged { ($arr:expr, $slice:expr, $take:expr, $into:expr) => { @@ -741,7 +729,7 @@ fn merge_axes() { assert!(!v.merge_axes(Axis($take), Axis($into))); assert_eq!(v.raw_dim(), old_dim); assert_eq!(v.strides(), &old_strides[..]); - } + }; } let a = Array4::::zeros((3, 4, 5, 4)); @@ -811,10 +799,9 @@ fn merge_axes() { } #[test] -fn swapaxes() -{ +fn swapaxes() { let mut a = arr2(&[[1., 2.], [3., 4.0f32]]); - let b = arr2(&[[1., 3.], [2., 4.0f32]]); + let b = arr2(&[[1., 3.], [2., 4.0f32]]); assert!(a != b); a.swap_axes(0, 1); assert_eq!(a, b); @@ -825,8 +812,7 @@ fn swapaxes() } #[test] -fn permuted_axes() -{ +fn permuted_axes() { let a = array![1].index_axis_move(Axis(0), 0); let permuted = a.view().permuted_axes([]); assert_eq!(a, permuted); @@ -858,31 +844,30 @@ fn permuted_axes() #[should_panic] #[test] -fn permuted_axes_repeated_axis() -{ +fn permuted_axes_repeated_axis() { let a = Array::from_iter(0..24).into_shape((2, 3, 4)).unwrap(); a.view().permuted_axes([1, 0, 1]); } #[should_panic] #[test] -fn permuted_axes_missing_axis() -{ - let a = Array::from_iter(0..24).into_shape((2, 3, 4)).unwrap().into_dyn(); +fn permuted_axes_missing_axis() { + let a = Array::from_iter(0..24) + .into_shape((2, 3, 4)) + .unwrap() + .into_dyn(); a.view().permuted_axes(&[2, 0][..]); } #[should_panic] #[test] -fn permuted_axes_oob() -{ +fn permuted_axes_oob() { let a = Array::from_iter(0..24).into_shape((2, 3, 4)).unwrap(); a.view().permuted_axes([1, 0, 3]); } #[test] -fn standard_layout() -{ +fn standard_layout() { let mut a = arr2(&[[1., 2.], [3., 4.0]]); assert!(a.is_standard_layout()); a.swap_axes(0, 1); @@ -900,10 +885,9 @@ fn standard_layout() } #[test] -fn assign() -{ +fn assign() { let mut a = arr2(&[[1., 2.], [3., 4.]]); - let b = arr2(&[[1., 3.], [2., 4.]]); + let b = arr2(&[[1., 3.], [2., 4.]]); a.assign(&b); assert_eq!(a, b); @@ -926,8 +910,7 @@ fn assign() } #[test] -fn sum_mean() -{ +fn sum_mean() { let a = arr2(&[[1., 2.], [3., 4.]]); assert_eq!(a.sum_axis(Axis(0)), arr1(&[4., 6.])); assert_eq!(a.sum_axis(Axis(1)), arr1(&[3., 7.])); @@ -963,7 +946,7 @@ fn var_axis() { [-9.54, 5.09, 3.21, 6.56], ], [ - [ 8.23, -9.63, 3.76, -3.48], + [8.23, -9.63, 3.76, -3.48], [-5.46, 5.86, -2.81, 1.35], [-1.08, 4.66, 8.34, -0.73], ], @@ -985,14 +968,16 @@ fn var_axis() { )); assert!(a.var_axis(Axis(2), 2.3).all_close( &aview2(&[ - [ 79.64552941, 129.09663235, 95.98929412], + [79.64552941, 129.09663235, 95.98929412], [109.64952941, 43.28758824, 36.27439706], ]), 1e-8, )); let b = array![[1.1, 2.3, 4.7]]; - assert!(b.var_axis(Axis(0), 0.).all_close(&aview1(&[0., 0., 0.]), 1e-12)); + assert!(b + .var_axis(Axis(0), 0.) + .all_close(&aview1(&[0., 0., 0.]), 1e-12)); assert!(b.var_axis(Axis(1), 0.).all_close(&aview1(&[2.24]), 1e-12)); let c = array![[], []]; @@ -1005,45 +990,47 @@ fn var_axis() { #[test] fn std_axis() { let a = array![ - [ - [ 0.22935481, 0.08030619, 0.60827517, 0.73684379], - [ 0.90339851, 0.82859436, 0.64020362, 0.2774583 ], - [ 0.44485313, 0.63316367, 0.11005111, 0.08656246] - ], - [ - [ 0.28924665, 0.44082454, 0.59837736, 0.41014531], - [ 0.08382316, 0.43259439, 0.1428889 , 0.44830176], - [ 0.51529756, 0.70111616, 0.20799415, 0.91851457] - ], + [ + [0.22935481, 0.08030619, 0.60827517, 0.73684379], + [0.90339851, 0.82859436, 0.64020362, 0.2774583], + [0.44485313, 0.63316367, 0.11005111, 0.08656246] + ], + [ + [0.28924665, 0.44082454, 0.59837736, 0.41014531], + [0.08382316, 0.43259439, 0.1428889, 0.44830176], + [0.51529756, 0.70111616, 0.20799415, 0.91851457] + ], ]; assert!(a.std_axis(Axis(0), 1.5).all_close( &aview2(&[ - [ 0.05989184, 0.36051836, 0.00989781, 0.32669847], - [ 0.81957535, 0.39599997, 0.49731472, 0.17084346], - [ 0.07044443, 0.06795249, 0.09794304, 0.83195211], + [0.05989184, 0.36051836, 0.00989781, 0.32669847], + [0.81957535, 0.39599997, 0.49731472, 0.17084346], + [0.07044443, 0.06795249, 0.09794304, 0.83195211], ]), 1e-4, )); assert!(a.std_axis(Axis(1), 1.7).all_close( &aview2(&[ - [ 0.42698655, 0.48139215, 0.36874991, 0.41458724], - [ 0.26769097, 0.18941435, 0.30555015, 0.35118674], + [0.42698655, 0.48139215, 0.36874991, 0.41458724], + [0.26769097, 0.18941435, 0.30555015, 0.35118674], ]), 1e-8, )); assert!(a.std_axis(Axis(2), 2.3).all_close( &aview2(&[ - [ 0.41117907, 0.37130425, 0.35332388], - [ 0.16905862, 0.25304841, 0.39978276], + [0.41117907, 0.37130425, 0.35332388], + [0.16905862, 0.25304841, 0.39978276], ]), 1e-8, )); let b = array![[100000., 1., 0.01]]; - assert!(b.std_axis(Axis(0), 0.).all_close(&aview1(&[0., 0., 0.]), 1e-12)); - assert!( - b.std_axis(Axis(1), 0.).all_close(&aview1(&[47140.214021552769]), 1e-6), - ); + assert!(b + .std_axis(Axis(0), 0.) + .all_close(&aview1(&[0., 0., 0.]), 1e-12)); + assert!(b + .std_axis(Axis(1), 0.) + .all_close(&aview1(&[47140.214021552769]), 1e-6),); let c = array![[], []]; assert_eq!(c.std_axis(Axis(0), 0.), aview1(&[])); @@ -1095,8 +1082,7 @@ fn std_axis_empty_axis() { } #[test] -fn iter_size_hint() -{ +fn iter_size_hint() { let mut a = arr2(&[[1., 2.], [3., 4.]]); { let mut it = a.iter(); @@ -1131,8 +1117,7 @@ fn iter_size_hint() } #[test] -fn zero_axes() -{ +fn zero_axes() { let mut a = arr1::(&[]); for _ in a.iter() { assert!(false); @@ -1150,8 +1135,7 @@ fn zero_axes() } #[test] -fn equality() -{ +fn equality() { let a = arr2(&[[1., 2.], [3., 4.]]); let mut b = arr2(&[[1., 2.], [2., 4.]]); assert!(a != b); @@ -1164,8 +1148,7 @@ fn equality() } #[test] -fn map1() -{ +fn map1() { let a = arr2(&[[1., 2.], [3., 4.]]); let b = a.map(|&x| (x / 3.) as isize); assert_eq!(b, arr2(&[[0, 0], [1, 1]])); @@ -1175,8 +1158,7 @@ fn map1() } #[test] -fn as_slice_memory_order() -{ +fn as_slice_memory_order() { // test that mutation breaks sharing let a = rcarr2(&[[1., 2.], [3., 4.0f32]]); let mut b = a.clone(); @@ -1251,9 +1233,9 @@ fn owned_array_discontiguous() { #[test] fn owned_array_discontiguous_drop() { - use ::std::rc::Rc; use ::std::cell::RefCell; use ::std::collections::BTreeSet; + use ::std::rc::Rc; struct InsertOnDrop(Rc>>, Option); impl Drop for InsertOnDrop { @@ -1265,7 +1247,9 @@ fn owned_array_discontiguous_drop() { let set = Rc::new(RefCell::new(BTreeSet::new())); { - let v: Vec<_> = (0..12).map(|x| InsertOnDrop(set.clone(), Some(x))).collect(); + let v: Vec<_> = (0..12) + .map(|x| InsertOnDrop(set.clone(), Some(x))) + .collect(); let mut a = Array::from_shape_vec((2, 6), v).unwrap(); // discontiguous and non-zero offset a.slice_collapse(s![.., 1..]); @@ -1278,17 +1262,20 @@ macro_rules! assert_matches { ($value:expr, $pat:pat) => { match $value { $pat => {} - ref err => panic!("assertion failed: `{}` matches `{}` found: {:?}", - stringify!($value), stringify!($pat), err), + ref err => panic!( + "assertion failed: `{}` matches `{}` found: {:?}", + stringify!($value), + stringify!($pat), + err + ), } - } + }; } #[test] fn from_vec_dim_stride_empty_1d() { let empty: [f32; 0] = []; - assert_matches!(Array::from_shape_vec(0.strides(1), empty.to_vec()), - Ok(_)); + assert_matches!(Array::from_shape_vec(0.strides(1), empty.to_vec()), Ok(_)); } #[test] @@ -1297,7 +1284,10 @@ fn from_vec_dim_stride_0d() { let one = [1.]; let two = [1., 2.]; // too few elements - assert_matches!(Array::from_shape_vec(().strides(()), empty.to_vec()), Err(_)); + assert_matches!( + Array::from_shape_vec(().strides(()), empty.to_vec()), + Err(_) + ); // exact number of elements assert_matches!(Array::from_shape_vec(().strides(()), one.to_vec()), Ok(_)); // too many are ok @@ -1322,22 +1312,24 @@ fn from_vec_dim_stride_2d_2() { #[test] fn from_vec_dim_stride_2d_3() { - let a = arr3(&[[[1]], - [[2]], - [[3]]]); + let a = arr3(&[[[1]], [[2]], [[3]]]); let d = a.raw_dim(); let s = d.default_strides(); - assert_matches!(Array::from_shape_vec(d.strides(s), a.as_slice().unwrap().to_vec()), Ok(_)); + assert_matches!( + Array::from_shape_vec(d.strides(s), a.as_slice().unwrap().to_vec()), + Ok(_) + ); } #[test] fn from_vec_dim_stride_2d_4() { - let a = arr3(&[[[1]], - [[2]], - [[3]]]); + let a = arr3(&[[[1]], [[2]], [[3]]]); let d = a.raw_dim(); let s = d.fortran_strides(); - assert_matches!(Array::from_shape_vec(d.strides(s), a.as_slice().unwrap().to_vec()), Ok(_)); + assert_matches!( + Array::from_shape_vec(d.strides(s), a.as_slice().unwrap().to_vec()), + Ok(_) + ); } #[test] @@ -1345,7 +1337,10 @@ fn from_vec_dim_stride_2d_5() { let a = arr3(&[[[1, 2, 3]]]); let d = a.raw_dim(); let s = d.fortran_strides(); - assert_matches!(Array::from_shape_vec(d.strides(s), a.as_slice().unwrap().to_vec()), Ok(_)); + assert_matches!( + Array::from_shape_vec(d.strides(s), a.as_slice().unwrap().to_vec()), + Ok(_) + ); } #[test] @@ -1402,8 +1397,10 @@ fn views() { a.clone()[(0, 0)] = 99; assert_eq!(b[(0, 0)], 1); - assert_eq!(a.view().into_iter().cloned().collect::>(), - vec![1, 2, 3, 4]); + assert_eq!( + a.view().into_iter().cloned().collect::>(), + vec![1, 2, 3, 4] + ); } #[test] @@ -1433,28 +1430,24 @@ fn slice_mut() { } assert_eq!(a, aview2(&[[0, 0], [0, 0]])); - let mut b = arr2(&[[1, 2, 3], - [4, 5, 6]]); + let mut b = arr2(&[[1, 2, 3], [4, 5, 6]]); let c = b.clone(); // make sure we can mutate b even if it has to be unshared first for elt in b.slice_mut(s![.., ..1]) { *elt = 0; } - assert_eq!(b, aview2(&[[0, 2, 3], - [0, 5, 6]])); + assert_eq!(b, aview2(&[[0, 2, 3], [0, 5, 6]])); assert!(c != b); for elt in b.slice_mut(s![.., ..;2]) { *elt = 99; } - assert_eq!(b, aview2(&[[99, 2, 99], - [99, 5, 99]])); + assert_eq!(b, aview2(&[[99, 2, 99], [99, 5, 99]])); } #[test] -fn assign_ops() -{ +fn assign_ops() { let mut a = arr2(&[[1., 2.], [3., 4.]]); - let b = arr2(&[[1., 3.], [2., 4.]]); + let b = arr2(&[[1., 3.], [2., 4.]]); (*&mut a.view_mut()) += &b; assert_eq!(a, arr2(&[[2., 5.], [5., 8.]])); @@ -1488,32 +1481,28 @@ fn aview_mut() { slc += 1; } } - assert_eq!(data, [1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert_eq!(data, [1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]); } #[test] fn transpose_view() { - let a = arr2(&[[1, 2], - [3, 4]]); + let a = arr2(&[[1, 2], [3, 4]]); let at = a.view().reversed_axes(); assert_eq!(at, arr2(&[[1, 3], [2, 4]])); - let a = arr2(&[[1, 2, 3], - [4, 5, 6]]); + let a = arr2(&[[1, 2, 3], [4, 5, 6]]); let at = a.view().reversed_axes(); assert_eq!(at, arr2(&[[1, 4], [2, 5], [3, 6]])); } #[test] fn transpose_view_mut() { - let mut a = arr2(&[[1, 2], - [3, 4]]); + let mut a = arr2(&[[1, 2], [3, 4]]); let mut at = a.view_mut().reversed_axes(); at[[0, 1]] = 5; assert_eq!(at, arr2(&[[1, 5], [2, 4]])); - let mut a = arr2(&[[1, 2, 3], - [4, 5, 6]]); + let mut a = arr2(&[[1, 2, 3], [4, 5, 6]]); let mut at = a.view_mut().reversed_axes(); at[[2, 1]] = 7; assert_eq!(at, arr2(&[[1, 4], [2, 5], [3, 7]])); @@ -1531,10 +1520,7 @@ fn reshape() { assert_eq!(u.shape(), &[2, 2, 2]); let s = u.into_shape((4, 2)).unwrap(); assert_eq!(s.shape(), &[4, 2]); - assert_eq!(s, aview2(&[[1, 2], - [3, 4], - [5, 6], - [7, 8]])); + assert_eq!(s, aview2(&[[1, 2], [3, 4], [5, 6], [7, 8]])); } #[test] @@ -1577,10 +1563,7 @@ fn reshape_f() { let s = u.into_shape((4, 3)).unwrap(); println!("{:?}", s); assert_eq!(s.shape(), &[4, 3]); - assert_eq!(s, aview2(&[[0, 4, 8], - [1, 5, 9], - [2, 6,10], - [3, 7,11]])); + assert_eq!(s, aview2(&[[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]])); } #[test] @@ -1599,32 +1582,83 @@ fn insert_axis() { test_insert!(arr1(&[1, 2, 3]), 1, arr2(&[[1], [2], [3]])); assert!(::std::panic::catch_unwind(|| arr1(&[1, 2, 3]).insert_axis(Axis(2))).is_err()); - test_insert!(arr2(&[[1, 2, 3], [4, 5, 6]]), 0, arr3(&[[[1, 2, 3], [4, 5, 6]]])); - test_insert!(arr2(&[[1, 2, 3], [4, 5, 6]]), 1, arr3(&[[[1, 2, 3]], [[4, 5, 6]]])); - test_insert!(arr2(&[[1, 2, 3], [4, 5, 6]]), 2, arr3(&[[[1], [2], [3]], [[4], [5], [6]]])); - assert!(::std::panic::catch_unwind( - || arr2(&[[1, 2, 3], [4, 5, 6]]).insert_axis(Axis(3))).is_err()); + test_insert!( + arr2(&[[1, 2, 3], [4, 5, 6]]), + 0, + arr3(&[[[1, 2, 3], [4, 5, 6]]]) + ); + test_insert!( + arr2(&[[1, 2, 3], [4, 5, 6]]), + 1, + arr3(&[[[1, 2, 3]], [[4, 5, 6]]]) + ); + test_insert!( + arr2(&[[1, 2, 3], [4, 5, 6]]), + 2, + arr3(&[[[1], [2], [3]], [[4], [5], [6]]]) + ); + assert!( + ::std::panic::catch_unwind(|| arr2(&[[1, 2, 3], [4, 5, 6]]).insert_axis(Axis(3))).is_err() + ); - test_insert!(Array3::::zeros((3, 4, 5)), 0, Array4::::zeros((1, 3, 4, 5))); - test_insert!(Array3::::zeros((3, 4, 5)), 1, Array4::::zeros((3, 1, 4, 5))); - test_insert!(Array3::::zeros((3, 4, 5)), 3, Array4::::zeros((3, 4, 5, 1))); - assert!(::std::panic::catch_unwind( - || Array3::::zeros((3, 4, 5)).insert_axis(Axis(4))).is_err()); - - test_insert!(Array6::::zeros((2, 3, 4, 3, 2, 3)), 0, - ArrayD::::zeros(vec![1, 2, 3, 4, 3, 2, 3])); - test_insert!(Array6::::zeros((2, 3, 4, 3, 2, 3)), 3, - ArrayD::::zeros(vec![2, 3, 4, 1, 3, 2, 3])); - test_insert!(Array6::::zeros((2, 3, 4, 3, 2, 3)), 6, - ArrayD::::zeros(vec![2, 3, 4, 3, 2, 3, 1])); - assert!(::std::panic::catch_unwind( - || Array6::::zeros((2, 3, 4, 3, 2, 3)).insert_axis(Axis(7))).is_err()); + test_insert!( + Array3::::zeros((3, 4, 5)), + 0, + Array4::::zeros((1, 3, 4, 5)) + ); + test_insert!( + Array3::::zeros((3, 4, 5)), + 1, + Array4::::zeros((3, 1, 4, 5)) + ); + test_insert!( + Array3::::zeros((3, 4, 5)), + 3, + Array4::::zeros((3, 4, 5, 1)) + ); + assert!( + ::std::panic::catch_unwind(|| Array3::::zeros((3, 4, 5)).insert_axis(Axis(4))).is_err() + ); - test_insert!(ArrayD::::zeros(vec![3, 4, 5]), 0, ArrayD::::zeros(vec![1, 3, 4, 5])); - test_insert!(ArrayD::::zeros(vec![3, 4, 5]), 1, ArrayD::::zeros(vec![3, 1, 4, 5])); - test_insert!(ArrayD::::zeros(vec![3, 4, 5]), 3, ArrayD::::zeros(vec![3, 4, 5, 1])); + test_insert!( + Array6::::zeros((2, 3, 4, 3, 2, 3)), + 0, + ArrayD::::zeros(vec![1, 2, 3, 4, 3, 2, 3]) + ); + test_insert!( + Array6::::zeros((2, 3, 4, 3, 2, 3)), + 3, + ArrayD::::zeros(vec![2, 3, 4, 1, 3, 2, 3]) + ); + test_insert!( + Array6::::zeros((2, 3, 4, 3, 2, 3)), + 6, + ArrayD::::zeros(vec![2, 3, 4, 3, 2, 3, 1]) + ); assert!(::std::panic::catch_unwind( - || ArrayD::::zeros(vec![3, 4, 5]).insert_axis(Axis(4))).is_err()); + || Array6::::zeros((2, 3, 4, 3, 2, 3)).insert_axis(Axis(7)) + ) + .is_err()); + + test_insert!( + ArrayD::::zeros(vec![3, 4, 5]), + 0, + ArrayD::::zeros(vec![1, 3, 4, 5]) + ); + test_insert!( + ArrayD::::zeros(vec![3, 4, 5]), + 1, + ArrayD::::zeros(vec![3, 1, 4, 5]) + ); + test_insert!( + ArrayD::::zeros(vec![3, 4, 5]), + 3, + ArrayD::::zeros(vec![3, 4, 5, 1]) + ); + assert!( + ::std::panic::catch_unwind(|| ArrayD::::zeros(vec![3, 4, 5]).insert_axis(Axis(4))) + .is_err() + ); } #[test] @@ -1635,32 +1669,61 @@ fn insert_axis_f() { assert!(res.t().is_standard_layout()); }); - test_insert_f!(Array0::from_shape_vec(().f(), vec![1]).unwrap(), 0, arr1(&[1])); - assert!(::std::panic::catch_unwind( - || Array0::from_shape_vec(().f(), vec![1]).unwrap().insert_axis(Axis(1))).is_err()); + test_insert_f!( + Array0::from_shape_vec(().f(), vec![1]).unwrap(), + 0, + arr1(&[1]) + ); + assert!( + ::std::panic::catch_unwind(|| Array0::from_shape_vec(().f(), vec![1]) + .unwrap() + .insert_axis(Axis(1))) + .is_err() + ); test_insert_f!(Array1::::zeros((3).f()), 0, Array2::::zeros((1, 3))); test_insert_f!(Array1::::zeros((3).f()), 1, Array2::::zeros((3, 1))); - assert!(::std::panic::catch_unwind( - || Array1::::zeros((3).f()).insert_axis(Axis(2))).is_err()); + assert!( + ::std::panic::catch_unwind(|| Array1::::zeros((3).f()).insert_axis(Axis(2))).is_err() + ); - test_insert_f!(Array3::::zeros((3, 4, 5).f()), 1, Array4::::zeros((3, 1, 4, 5))); - assert!(::std::panic::catch_unwind( - || Array3::::zeros((3, 4, 5).f()).insert_axis(Axis(4))).is_err()); + test_insert_f!( + Array3::::zeros((3, 4, 5).f()), + 1, + Array4::::zeros((3, 1, 4, 5)) + ); + assert!( + ::std::panic::catch_unwind(|| Array3::::zeros((3, 4, 5).f()).insert_axis(Axis(4))) + .is_err() + ); - test_insert_f!(ArrayD::::zeros(vec![3, 4, 5].f()), 1, - ArrayD::::zeros(vec![3, 1, 4, 5])); + test_insert_f!( + ArrayD::::zeros(vec![3, 4, 5].f()), + 1, + ArrayD::::zeros(vec![3, 1, 4, 5]) + ); assert!(::std::panic::catch_unwind( - || ArrayD::::zeros(vec![3, 4, 5].f()).insert_axis(Axis(4))).is_err()); + || ArrayD::::zeros(vec![3, 4, 5].f()).insert_axis(Axis(4)) + ) + .is_err()); } #[test] fn insert_axis_view() { let a = array![[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]; - assert_eq!(a.index_axis(Axis(1), 0).insert_axis(Axis(0)), array![[[1, 2], [5, 6], [9, 10]]]); - assert_eq!(a.index_axis(Axis(1), 0).insert_axis(Axis(1)), array![[[1, 2]], [[5, 6]], [[9, 10]]]); - assert_eq!(a.index_axis(Axis(1), 0).insert_axis(Axis(2)), array![[[1], [2]], [[5], [6]], [[9], [10]]]); + assert_eq!( + a.index_axis(Axis(1), 0).insert_axis(Axis(0)), + array![[[1, 2], [5, 6], [9, 10]]] + ); + assert_eq!( + a.index_axis(Axis(1), 0).insert_axis(Axis(1)), + array![[[1, 2]], [[5, 6]], [[9, 10]]] + ); + assert_eq!( + a.index_axis(Axis(1), 0).insert_axis(Axis(2)), + array![[[1], [2]], [[5], [6]], [[9], [10]]] + ); } #[test] @@ -1674,8 +1737,7 @@ fn arithmetic_broadcast() { } #[test] -fn char_array() -{ +fn char_array() { // test compilation & basics of non-numerical array let cc = ArcArray::from_iter("alphabet".chars()).reshape((4, 2)); assert!(cc.index_axis(Axis(1), 0) == ArcArray::from_iter("apae".chars())); @@ -1712,10 +1774,10 @@ fn scalar_ops() { let zero = Array::::zeros((2, 2)); let one = &zero + 1; assert_eq!(one.clone() << 3, 8 * &one); - assert_eq!(3 << one.clone() , 6 * &one); + assert_eq!(3 << one.clone(), 6 * &one); assert_eq!(&one << 3, 8 * &one); - assert_eq!(3 << &one , 6 * &one); + assert_eq!(3 << &one, 6 * &one); } #[test] @@ -1736,15 +1798,19 @@ fn split_at() { } assert_eq!(a, arr2(&[[1., 5.], [8., 4.]])); - let b = ArcArray::linspace(0., 59., 60).reshape((3, 4, 5)); let (left, right) = b.view().split_at(Axis(2), 2); assert_eq!(left.shape(), [3, 4, 2]); assert_eq!(right.shape(), [3, 4, 3]); - assert_eq!(left, arr3(&[[[0., 1.], [5., 6.], [10., 11.], [15., 16.]], - [[20., 21.], [25., 26.], [30., 31.], [35., 36.]], - [[40., 41.], [45., 46.], [50., 51.], [55., 56.]]])); + assert_eq!( + left, + arr3(&[ + [[0., 1.], [5., 6.], [10., 11.], [15., 16.]], + [[20., 21.], [25., 26.], [30., 31.], [35., 36.]], + [[40., 41.], [45., 46.], [50., 51.], [55., 56.]] + ]) + ); // we allow for an empty right view when index == dim[axis] let (_, right) = b.view().split_at(Axis(1), 4); @@ -1769,13 +1835,13 @@ fn deny_split_at_index_out_of_bounds() { fn test_range() { let a = Array::range(0., 5., 1.); assert_eq!(a.len(), 5); - assert_eq!(a[0], 0.); - assert_eq!(a[4], 4.); + assert_eq!(a[0], 0.); + assert_eq!(a[4], 4.); let b = Array::range(0., 2.2, 1.); assert_eq!(b.len(), 3); - assert_eq!(b[0], 0.); - assert_eq!(b[2], 2.); + assert_eq!(b[0], 0.); + assert_eq!(b[2], 2.); let c = Array::range(0., 5., 2.); assert_eq!(c.len(), 3); @@ -1798,8 +1864,7 @@ fn test_range() { fn test_f_order() { // Test that arrays are logically equal in every way, // even if the underlying memory order is different - let c = arr2(&[[1, 2, 3], - [4, 5, 6]]); + let c = arr2(&[[1, 2, 3], [4, 5, 6]]); let mut f = Array::zeros(c.dim().f()); f.assign(&c); assert_eq!(f, c); @@ -1821,8 +1886,7 @@ fn test_f_order() { fn to_owned_memory_order() { // check that .to_owned() makes f-contiguous arrays out of f-contiguous // input. - let c = arr2(&[[1, 2, 3], - [4, 5, 6]]); + let c = arr2(&[[1, 2, 3], [4, 5, 6]]); let mut f = c.view(); f.swap_axes(0, 1); let fo = f.to_owned(); @@ -1832,8 +1896,7 @@ fn to_owned_memory_order() { #[test] fn to_owned_neg_stride() { - let mut c = arr2(&[[1, 2, 3], - [4, 5, 6]]); + let mut c = arr2(&[[1, 2, 3], [4, 5, 6]]); c.slice_collapse(s![.., ..;-1]); let co = c.to_owned(); assert_eq!(c, co); @@ -1841,8 +1904,7 @@ fn to_owned_neg_stride() { #[test] fn discontiguous_owned_to_owned() { - let mut c = arr2(&[[1, 2, 3], - [4, 5, 6]]); + let mut c = arr2(&[[1, 2, 3], [4, 5, 6]]); c.slice_collapse(s![.., ..;2]); let co = c.to_owned(); @@ -1853,10 +1915,7 @@ fn discontiguous_owned_to_owned() { #[test] fn map_memory_order() { - let a = arr3(&[[[1, 2, 3], - [4, 5, 6]], - [[7, 8, 9], - [0, -1, -2]]]); + let a = arr3(&[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [0, -1, -2]]]); let mut v = a.view(); v.swap_axes(0, 1); let amap = v.map(|x| *x >= 3); @@ -1866,10 +1925,7 @@ fn map_memory_order() { #[test] fn test_contiguous() { - let c = arr3(&[[[1, 2, 3], - [4, 5, 6]], - [[4, 5, 6], - [7, 7, 7]]]); + let c = arr3(&[[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 7, 7]]]); assert!(c.is_standard_layout()); assert!(c.as_slice_memory_order().is_some()); let v = c.slice(s![.., 0..1, ..]); @@ -1903,19 +1959,17 @@ fn test_contiguous() { #[test] fn test_all_close() { - let c = arr3(&[[[1., 2., 3.], - [1.5, 1.5, 3.]], - [[1., 2., 3.], - [1., 2.5, 3.]]]); + let c = arr3(&[ + [[1., 2., 3.], [1.5, 1.5, 3.]], + [[1., 2., 3.], [1., 2.5, 3.]], + ]); assert!(c.all_close(&aview1(&[1., 2., 3.]), 1.)); assert!(!c.all_close(&aview1(&[1., 2., 3.]), 0.1)); } #[test] fn test_swap() { - let mut a = arr2(&[[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]); + let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = a.clone(); for i in 0..a.rows() { @@ -1928,9 +1982,7 @@ fn test_swap() { #[test] fn test_uswap() { - let mut a = arr2(&[[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]); + let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = a.clone(); for i in 0..a.rows() { @@ -1974,7 +2026,6 @@ fn test_default() { let a = as Default>::default(); assert_eq!(a, aview2(&[[0.0; 0]; 0])); - #[derive(Default, Debug, PartialEq)] struct Foo(i32); let b = as Default>::default(); @@ -1988,13 +2039,9 @@ fn test_default_ixdyn() { assert_eq!(a, b); } - #[test] fn test_map_axis() { - let a = arr2(&[[1, 2, 3], - [4, 5, 6], - [7, 8, 9], - [10,11,12]]); + let a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); let b = a.map_axis(Axis(0), |view| view.sum()); let answer1 = arr1(&[22, 26, 30]); @@ -2006,10 +2053,7 @@ fn test_map_axis() { #[test] fn test_to_vec() { - let mut a = arr2(&[[1, 2, 3], - [4, 5, 6], - [7, 8, 9], - [10,11,12]]); + let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.slice_collapse(s![..;-1, ..]); assert_eq!(a.row(3).to_vec(), vec![1, 2, 3]); @@ -2048,8 +2092,10 @@ fn array_macros() { assert_eq!(a4, arr3(&[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])); let s = String::from("abc"); - let a2s = array![[String::from("w"), s], - [String::from("x"), String::from("y")]]; + let a2s = array![ + [String::from("w"), s], + [String::from("x"), String::from("y")] + ]; assert_eq!(a2s[[0, 0]], "w"); assert_eq!(a2s[[0, 1]], "abc"); assert_eq!(a2s[[1, 0]], "x"); diff --git a/tests/azip.rs b/tests/azip.rs index a5e53e0e0..6d3f661a3 100644 --- a/tests/azip.rs +++ b/tests/azip.rs @@ -1,5 +1,5 @@ -extern crate ndarray; extern crate itertools; +extern crate ndarray; use ndarray::prelude::*; use ndarray::Zip; @@ -8,7 +8,6 @@ use itertools::{assert_equal, cloned, enumerate}; use std::mem::swap; - #[test] fn test_azip1() { let mut a = Array::zeros(62); @@ -20,7 +19,7 @@ fn test_azip1() { #[test] fn test_azip2() { let mut a = Array::zeros((5, 7)); - let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2*j) as f32); + let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2 * j) as f32); azip!(mut a, b in { *a = b; }); assert_eq!(a, b); } @@ -28,7 +27,7 @@ fn test_azip2() { #[test] fn test_azip2_1() { let mut a = Array::zeros((5, 7)); - let b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2*j) as f32); + let b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2 * j) as f32); let b = b.slice(s![..;-1, 3..]); azip!(mut a, b in { *a = b; }); assert_eq!(a, b); @@ -36,7 +35,7 @@ fn test_azip2_1() { #[test] fn test_azip2_3() { - let mut b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2*j) as f32); + let mut b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2 * j) as f32); let mut c = Array::from_shape_fn((5, 10), |(i, j)| f32::exp((i + j) as f32)); let a = b.clone(); azip!(mut b, mut c in { swap(b, c) }); @@ -99,7 +98,7 @@ fn test_zip_dim_mismatch_1() { let mut a = Array::zeros((5, 7)); let mut d = a.raw_dim(); d[0] += 1; - let b = Array::from_shape_fn(d, |(i, j)| 1. / (i + 2*j) as f32); + let b = Array::from_shape_fn(d, |(i, j)| 1. / (i + 2 * j) as f32); azip!(mut a, b in { *a = b; }); } @@ -130,7 +129,6 @@ fn test_contiguous_but_not_c_or_f() { assert_eq!(ans, correct); } - #[test] fn test_clone() { let a = Array::from_iter(0..27).into_shape((3, 3, 3)).unwrap(); @@ -156,11 +154,10 @@ fn test_indices_1() { } let mut count = 0; - Zip::indexed(&a1) - .apply(|i, elt| { - count += 1; - assert_eq!(*elt, i); - }); + Zip::indexed(&a1).apply(|i, elt| { + count += 1; + assert_eq!(*elt, i); + }); assert_eq!(count, a1.len()); let mut count = 0; @@ -217,11 +214,10 @@ fn test_indices_3() { } let mut count = 0; - Zip::indexed(&a1) - .apply(|i, elt| { - count += 1; - assert_eq!(*elt, i); - }); + Zip::indexed(&a1).apply(|i, elt| { + count += 1; + assert_eq!(*elt, i); + }); assert_eq!(count, a1.len()); let mut count = 0; diff --git a/tests/broadcast.rs b/tests/broadcast.rs index c194da792..efa8284ce 100644 --- a/tests/broadcast.rs +++ b/tests/broadcast.rs @@ -1,11 +1,9 @@ - extern crate ndarray; use ndarray::prelude::*; #[test] -fn broadcast_1() -{ +fn broadcast_1() { let a_dim = Dim([2, 4, 2, 2]); let b_dim = Dim([2, 1, 2, 1]); let a = ArcArray::linspace(0., 1., a_dim.size()).reshape(a_dim); @@ -22,16 +20,15 @@ fn broadcast_1() assert!(c.broadcast((32, 1, 2)).is_none()); /* () can be broadcast to anything */ - let z = ArcArray::::zeros(()); + let z = ArcArray::::zeros(()); assert!(z.broadcast(()).is_some()); assert!(z.broadcast(1).is_some()); assert!(z.broadcast(3).is_some()); - assert!(z.broadcast((7,2,9)).is_some()); + assert!(z.broadcast((7, 2, 9)).is_some()); } #[test] -fn test_add() -{ +fn test_add() { let a_dim = Dim([2, 4, 2, 2]); let b_dim = Dim([2, 1, 2, 1]); let mut a = ArcArray::linspace(0.0, 1., a_dim.size()).reshape(a_dim); @@ -41,9 +38,9 @@ fn test_add() a += &t; } -#[test] #[should_panic] -fn test_add_incompat() -{ +#[test] +#[should_panic] +fn test_add_incompat() { let a_dim = Dim([2, 4, 2, 2]); let mut a = ArcArray::linspace(0.0, 1., a_dim.size()).reshape(a_dim); let incompat = ArcArray::from_elem(3, 1.0f32); diff --git a/tests/clone.rs b/tests/clone.rs index e6f66c3c6..0215f4e1f 100644 --- a/tests/clone.rs +++ b/tests/clone.rs @@ -1,13 +1,10 @@ - extern crate ndarray; use ndarray::arr2; #[test] fn test_clone_from() { - let a = arr2(&[[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]); + let a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = arr2(&[[7, 7, 7]]); let mut c = b.clone(); c.clone_from(&a); @@ -16,5 +13,4 @@ fn test_clone_from() { let mut bv = b.view(); bv.clone_from(&a.view()); assert_eq!(&a, &bv); - } diff --git a/tests/complex.rs b/tests/complex.rs index 8da721c4d..f5e3545ea 100644 --- a/tests/complex.rs +++ b/tests/complex.rs @@ -1,20 +1,18 @@ - -extern crate num_traits; -extern crate num_complex; extern crate ndarray; +extern crate num_complex; +extern crate num_traits; -use ndarray::{arr1, arr2, Axis}; use ndarray::Array; -use num_traits::Num; +use ndarray::{arr1, arr2, Axis}; use num_complex::Complex; +use num_traits::Num; fn c(re: T, im: T) -> Complex { Complex::new(re, im) } #[test] -fn complex_mat_mul() -{ +fn complex_mat_mul() { let a = arr2(&[[c(3., 4.), c(2., 0.)], [c(0., -2.), c(3., 0.)]]); let b = (&a * c(3., 0.)).map(|c| 5. * c / c.norm()); println!("{:>8.2}", b); diff --git a/tests/dimension.rs b/tests/dimension.rs index c76b8d7ad..6961a38cd 100644 --- a/tests/dimension.rs +++ b/tests/dimension.rs @@ -1,25 +1,14 @@ -extern crate ndarray; extern crate defmac; +extern crate ndarray; use defmac::defmac; -use ndarray::{ - ArcArray, - Array, - RemoveAxis, - arr2, - Axis, - Dimension, - Dim, - IxDyn, - IntoDimension, -}; +use ndarray::{arr2, ArcArray, Array, Axis, Dim, Dimension, IntoDimension, IxDyn, RemoveAxis}; use std::hash::{Hash, Hasher}; #[test] -fn insert_axis() -{ +fn insert_axis() { assert_eq!(Dim([]).insert_axis(Axis(0)), Dim([1])); assert_eq!(Dim([3]).insert_axis(Axis(0)), Dim([1, 3])); @@ -31,7 +20,10 @@ fn insert_axis() assert_eq!(Dim([2, 3, 4]).insert_axis(Axis(2)), Dim([2, 3, 1, 4])); - assert_eq!(Dim([2, 3, 4, 5, 6, 7]).insert_axis(Axis(2)), Dim(vec![2, 3, 1, 4, 5, 6, 7])); + assert_eq!( + Dim([2, 3, 4, 5, 6, 7]).insert_axis(Axis(2)), + Dim(vec![2, 3, 1, 4, 5, 6, 7]) + ); assert_eq!(Dim(vec![]).insert_axis(Axis(0)), Dim(vec![1])); @@ -39,13 +31,18 @@ fn insert_axis() assert_eq!(Dim(vec![2, 3]).insert_axis(Axis(1)), Dim(vec![2, 1, 3])); assert_eq!(Dim(vec![2, 3]).insert_axis(Axis(2)), Dim(vec![2, 3, 1])); - assert_eq!(Dim(vec![2, 3, 4, 5, 6]).insert_axis(Axis(2)), Dim(vec![2, 3, 1, 4, 5, 6])); - assert_eq!(Dim(vec![2, 3, 4, 5, 6, 7]).insert_axis(Axis(2)), Dim(vec![2, 3, 1, 4, 5, 6, 7])); + assert_eq!( + Dim(vec![2, 3, 4, 5, 6]).insert_axis(Axis(2)), + Dim(vec![2, 3, 1, 4, 5, 6]) + ); + assert_eq!( + Dim(vec![2, 3, 4, 5, 6, 7]).insert_axis(Axis(2)), + Dim(vec![2, 3, 1, 4, 5, 6, 7]) + ); } #[test] -fn remove_axis() -{ +fn remove_axis() { assert_eq!(Dim([3]).remove_axis(Axis(0)), Dim([])); assert_eq!(Dim([1, 2]).remove_axis(Axis(0)), Dim([2])); assert_eq!(Dim([4, 5, 6]).remove_axis(Axis(1)), Dim([4, 6])); @@ -53,16 +50,18 @@ fn remove_axis() assert_eq!(Dim(vec![1, 2]).remove_axis(Axis(0)), Dim(vec![2])); assert_eq!(Dim(vec![4, 5, 6]).remove_axis(Axis(1)), Dim(vec![4, 6])); - let a = ArcArray::::zeros((4,5)); + let a = ArcArray::::zeros((4, 5)); a.index_axis(Axis(1), 0); - let a = ArcArray::::zeros(vec![4,5,6]); - let _b = a.index_axis_move(Axis(1), 0).reshape((4, 6)).reshape(vec![2, 3, 4]); + let a = ArcArray::::zeros(vec![4, 5, 6]); + let _b = a + .index_axis_move(Axis(1), 0) + .reshape((4, 6)) + .reshape(vec![2, 3, 4]); } #[test] -fn dyn_dimension() -{ +fn dyn_dimension() { let a = arr2(&[[1., 2.], [3., 4.0]]).into_shape(vec![2, 2]).unwrap(); assert_eq!(&a - &a, Array::zeros(vec![2, 2])); assert_eq!(a[&[0, 0][..]], 1.); @@ -121,13 +120,19 @@ fn fastest_varying_order() { assert_eq!(Dim([1, 3])._fastest_varying_stride_order(), Dim([0, 1])); assert_eq!(Dim([7, 2])._fastest_varying_stride_order(), Dim([1, 0])); - assert_eq!(Dim([6, 1, 3])._fastest_varying_stride_order(), Dim([1, 2, 0])); + assert_eq!( + Dim([6, 1, 3])._fastest_varying_stride_order(), + Dim([1, 2, 0]) + ); // it's important that it produces distinct indices. Prefer the stable order // where 0 is before 1 when they are equal. assert_eq!(Dim([2, 2])._fastest_varying_stride_order(), [0, 1]); assert_eq!(Dim([2, 2, 1])._fastest_varying_stride_order(), [2, 0, 1]); - assert_eq!(Dim([2, 2, 3, 1, 2])._fastest_varying_stride_order(), [3, 0, 1, 4, 2]); + assert_eq!( + Dim([2, 2, 3, 1, 2])._fastest_varying_stride_order(), + [3, 0, 1, 4, 2] + ); } type ArrayF32 = Array; @@ -228,14 +233,14 @@ fn test_hash() { ($arr:expr) => { assert_eq!(calc_hash(&Dim($arr)), calc_hash(&Dim($arr))); assert_eq!(calc_hash(&Dim($arr)), calc_hash(&IxDyn(&$arr))); - } + }; } macro_rules! test_hash_ne { ($arr1:expr, $arr2:expr) => { assert_ne!(calc_hash(&Dim($arr1)), calc_hash(&Dim($arr2))); assert_ne!(calc_hash(&Dim($arr1)), calc_hash(&IxDyn(&$arr2))); assert_ne!(calc_hash(&IxDyn(&$arr1)), calc_hash(&Dim($arr2))); - } + }; } test_hash_eq!([]); test_hash_eq!([0]); @@ -282,7 +287,7 @@ fn test_array_view() { #[test] fn test_all_ndindex() { -macro_rules! ndindex { + macro_rules! ndindex { ($($i:expr),*) => { for &rev in &[false, true] { // rev is for C / F order diff --git a/tests/format.rs b/tests/format.rs index 4dbc939cc..7b36efd8c 100644 --- a/tests/format.rs +++ b/tests/format.rs @@ -4,33 +4,30 @@ use ndarray::prelude::*; use ndarray::rcarr1; #[test] -fn formatting() -{ +fn formatting() { let a = rcarr1::(&[1., 2., 3., 4.]); - assert_eq!(format!("{}", a), - //"[ 1, 2, 3, 4]"); - "[1, 2, 3, 4]"); - assert_eq!(format!("{:4}", a), - "[ 1, 2, 3, 4]"); + assert_eq!( + format!("{}", a), + //"[ 1, 2, 3, 4]"); + "[1, 2, 3, 4]" + ); + assert_eq!(format!("{:4}", a), "[ 1, 2, 3, 4]"); let a = a.reshape((4, 1, 1)); - assert_eq!(format!("{:4}", a), - "[[[ 1]],\n [[ 2]],\n [[ 3]],\n [[ 4]]]"); + assert_eq!( + format!("{:4}", a), + "[[[ 1]],\n [[ 2]],\n [[ 3]],\n [[ 4]]]" + ); let a = a.reshape((2, 2)); - assert_eq!(format!("{}", a), - "[[1, 2],\n [3, 4]]"); - assert_eq!(format!("{}", a), - "[[1, 2],\n [3, 4]]"); - assert_eq!(format!("{:4}", a), - "[[ 1, 2],\n [ 3, 4]]"); + assert_eq!(format!("{}", a), "[[1, 2],\n [3, 4]]"); + assert_eq!(format!("{}", a), "[[1, 2],\n [3, 4]]"); + assert_eq!(format!("{:4}", a), "[[ 1, 2],\n [ 3, 4]]"); let b = arr0::(3.5); - assert_eq!(format!("{}", b), - "3.5"); + assert_eq!(format!("{}", b), "3.5"); let s = format!("{:.3e}", aview1::(&[1.1, 2.2, 33., 440.])); - assert_eq!(s, - "[1.100e0, 2.200e0, 3.300e1, 4.400e2]"); + assert_eq!(s, "[1.100e0, 2.200e0, 3.300e1, 4.400e2]"); let s = format!("{:02x}", aview1::(&[1, 0xff, 0xfe])); assert_eq!(s, "[01, ff, fe]"); diff --git a/tests/higher_order_f.rs b/tests/higher_order_f.rs index c7355fa12..c0c3d4ef4 100644 --- a/tests/higher_order_f.rs +++ b/tests/higher_order_f.rs @@ -1,4 +1,3 @@ - extern crate ndarray; use ndarray::prelude::*; diff --git a/tests/indices.rs b/tests/indices.rs index 9e47075a4..a369661c3 100644 --- a/tests/indices.rs +++ b/tests/indices.rs @@ -1,7 +1,7 @@ extern crate ndarray; -use ndarray::prelude::*; use ndarray::indices_of; +use ndarray::prelude::*; #[test] fn test_ixdyn_index_iterate() { diff --git a/tests/iterator_chunks.rs b/tests/iterator_chunks.rs index dbbd44082..9453c0cea 100644 --- a/tests/iterator_chunks.rs +++ b/tests/iterator_chunks.rs @@ -5,7 +5,9 @@ use ndarray::NdProducer; #[test] fn chunks() { - let a = >::linspace(1., 100., 10 * 10).into_shape((10, 10)).unwrap(); + let a = >::linspace(1., 100., 10 * 10) + .into_shape((10, 10)) + .unwrap(); let (m, n) = a.dim(); for i in 1..m + 1 { @@ -18,7 +20,11 @@ fn chunks() { let cindex = (index / ly, index % ly); let cx = (cindex.0 * i) as isize; let cy = (cindex.1 * j) as isize; - assert_eq!(elt, a.slice(s![cx.., cy..]).slice(s![..i as isize, ..j as isize])); + assert_eq!( + elt, + a.slice(s![cx.., cy..]) + .slice(s![..i as isize, ..j as isize]) + ); } let c = a.exact_chunks((i, j)); assert_eq!(c.into_iter().count(), (m / i) * (n / j)); @@ -69,13 +75,14 @@ fn chunks_mut() { } println!("{:?}", a); let ans = array![ - [0, 0, 0, 1, 1, 1, 0, 0], - [0, 0, 0, 1, 1, 1, 0, 0], - [2, 2, 2, 3, 3, 3, 0, 0], - [2, 2, 2, 3, 3, 3, 0, 0], - [4, 4, 4, 5, 5, 5, 0, 0], - [4, 4, 4, 5, 5, 5, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]; + [0, 0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0], + [2, 2, 2, 3, 3, 3, 0, 0], + [2, 2, 2, 3, 3, 3, 0, 0], + [4, 4, 4, 5, 5, 5, 0, 0], + [4, 4, 4, 5, 5, 5, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0] + ]; assert_eq!(a, ans); } diff --git a/tests/iterators.rs b/tests/iterators.rs index 71767562f..eba410b4d 100644 --- a/tests/iterators.rs +++ b/tests/iterators.rs @@ -1,12 +1,12 @@ -extern crate ndarray; extern crate itertools; +extern crate ndarray; use ndarray::prelude::*; use ndarray::Ix; use ndarray::{arr2, arr3, aview1, indices, s, Axis, Data, Dimension, Slice}; use itertools::assert_equal; -use itertools::{rev, enumerate}; +use itertools::{enumerate, rev}; #[test] fn double_ended() { @@ -39,8 +39,7 @@ fn iter_size_hint() { } #[test] -fn indexed() -{ +fn indexed() { let a = ArcArray::linspace(0., 7., 8); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt as Ix); @@ -58,11 +57,11 @@ fn indexed() } } - fn assert_slice_correct(v: &ArrayBase) - where S: Data, - D: Dimension, - A: PartialEq + std::fmt::Debug, +where + S: Data, + D: Dimension, + A: PartialEq + std::fmt::Debug, { let slc = v.as_slice(); assert!(slc.is_some()); @@ -104,7 +103,12 @@ fn as_slice() { let a = a.reshape((8, 1)); assert_slice_correct(&a); let u = a.slice(s![..;2, ..]); - println!("u={:?}, shape={:?}, strides={:?}", u, u.shape(), u.strides()); + println!( + "u={:?}, shape={:?}, strides={:?}", + u, + u.shape(), + u.strides() + ); assert!(u.as_slice().is_none()); } @@ -118,15 +122,31 @@ fn inner_iter() { // [[6, 7], // [8, 9], // ... - assert_equal(a.genrows(), - vec![aview1(&[0, 1]), aview1(&[2, 3]), aview1(&[4, 5]), - aview1(&[6, 7]), aview1(&[8, 9]), aview1(&[10, 11])]); + assert_equal( + a.genrows(), + vec![ + aview1(&[0, 1]), + aview1(&[2, 3]), + aview1(&[4, 5]), + aview1(&[6, 7]), + aview1(&[8, 9]), + aview1(&[10, 11]), + ], + ); let mut b = ArcArray::zeros((2, 3, 2)); b.swap_axes(0, 2); b.assign(&a); - assert_equal(b.genrows(), - vec![aview1(&[0, 1]), aview1(&[2, 3]), aview1(&[4, 5]), - aview1(&[6, 7]), aview1(&[8, 9]), aview1(&[10, 11])]); + assert_equal( + b.genrows(), + vec![ + aview1(&[0, 1]), + aview1(&[2, 3]), + aview1(&[4, 5]), + aview1(&[6, 7]), + aview1(&[8, 9]), + aview1(&[10, 11]), + ], + ); } #[test] @@ -135,12 +155,10 @@ fn inner_iter_corner_cases() { assert_equal(a0.genrows(), vec![aview1(&[0])]); let a2 = ArcArray::::zeros((0, 3)); - assert_equal(a2.genrows(), - vec![aview1(&[]); 0]); + assert_equal(a2.genrows(), vec![aview1(&[]); 0]); let a2 = ArcArray::::zeros((3, 0)); - assert_equal(a2.genrows(), - vec![aview1(&[]); 3]); + assert_equal(a2.genrows(), vec![aview1(&[]); 3]); } #[test] @@ -168,13 +186,17 @@ fn outer_iter() { // [[6, 7], // [8, 9], // ... - assert_equal(a.outer_iter(), - vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)]); + assert_equal( + a.outer_iter(), + vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)], + ); let mut b = ArcArray::zeros((2, 3, 2)); b.swap_axes(0, 2); b.assign(&a); - assert_equal(b.outer_iter(), - vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)]); + assert_equal( + b.outer_iter(), + vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)], + ); let mut found_rows = Vec::new(); for sub in b.outer_iter() { @@ -198,8 +220,10 @@ fn outer_iter() { let mut cv = c.slice_mut(s![..;-1, ..;-1, ..;-1]); cv.assign(&a); assert_eq!(&a, &cv); - assert_equal(cv.outer_iter(), - vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)]); + assert_equal( + cv.outer_iter(), + vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)], + ); let mut found_rows = Vec::new(); for sub in cv.outer_iter() { @@ -221,21 +245,23 @@ fn axis_iter() { // [[6, 7], // [8, 9], // ... - assert_equal(a.axis_iter(Axis(1)), - vec![a.index_axis(Axis(1), 0), - a.index_axis(Axis(1), 1), - a.index_axis(Axis(1), 2)]); + assert_equal( + a.axis_iter(Axis(1)), + vec![ + a.index_axis(Axis(1), 0), + a.index_axis(Axis(1), 1), + a.index_axis(Axis(1), 2), + ], + ); } #[test] fn outer_iter_corner_cases() { let a2 = ArcArray::::zeros((0, 3)); - assert_equal(a2.outer_iter(), - vec![aview1(&[]); 0]); + assert_equal(a2.outer_iter(), vec![aview1(&[]); 0]); let a2 = ArcArray::::zeros((3, 0)); - assert_equal(a2.outer_iter(), - vec![aview1(&[]); 3]); + assert_equal(a2.outer_iter(), vec![aview1(&[]); 3]); } #[allow(deprecated)] @@ -252,8 +278,10 @@ fn outer_iter_mut() { let mut b = ArcArray::zeros((2, 3, 2)); b.swap_axes(0, 2); b.assign(&a); - assert_equal(b.outer_iter_mut(), - vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)]); + assert_equal( + b.outer_iter_mut(), + vec![a.index_axis(Axis(0), 0), a.index_axis(Axis(0), 1)], + ); let mut found_rows = Vec::new(); for sub in b.outer_iter_mut() { @@ -280,12 +308,7 @@ fn axis_iter_mut() { subview[[0, 0]] = 42; } - let b = arr3(&[[[42, 1], - [42, 3], - [42, 5]], - [[6, 7], - [8, 9], - [10, 11]]]); + let b = arr3(&[[[42, 1], [42, 3], [42, 5]], [[6, 7], [8, 9], [10, 11]]]); assert_eq!(a, b); } @@ -295,27 +318,39 @@ fn axis_chunks_iter() { let a = a.reshape((2, 6, 2)); let it = a.axis_chunks_iter(Axis(1), 2); - assert_equal(it, - vec![arr3(&[[[0, 1], [2, 3]], [[12, 13], [14, 15]]]), - arr3(&[[[4, 5], [6, 7]], [[16, 17], [18, 19]]]), - arr3(&[[[8, 9], [10, 11]], [[20, 21], [22, 23]]])]); + assert_equal( + it, + vec![ + arr3(&[[[0, 1], [2, 3]], [[12, 13], [14, 15]]]), + arr3(&[[[4, 5], [6, 7]], [[16, 17], [18, 19]]]), + arr3(&[[[8, 9], [10, 11]], [[20, 21], [22, 23]]]), + ], + ); let a = ArcArray::from_iter(0..28); let a = a.reshape((2, 7, 2)); let it = a.axis_chunks_iter(Axis(1), 2); - assert_equal(it, - vec![arr3(&[[[0, 1], [2, 3]], [[14, 15], [16, 17]]]), - arr3(&[[[4, 5], [6, 7]], [[18, 19], [20, 21]]]), - arr3(&[[[8, 9], [10, 11]], [[22, 23], [24, 25]]]), - arr3(&[[[12, 13]], [[26, 27]]])]); + assert_equal( + it, + vec![ + arr3(&[[[0, 1], [2, 3]], [[14, 15], [16, 17]]]), + arr3(&[[[4, 5], [6, 7]], [[18, 19], [20, 21]]]), + arr3(&[[[8, 9], [10, 11]], [[22, 23], [24, 25]]]), + arr3(&[[[12, 13]], [[26, 27]]]), + ], + ); let it = a.axis_chunks_iter(Axis(1), 2).rev(); - assert_equal(it, - vec![arr3(&[[[12, 13]], [[26, 27]]]), - arr3(&[[[8, 9], [10, 11]], [[22, 23], [24, 25]]]), - arr3(&[[[4, 5], [6, 7]], [[18, 19], [20, 21]]]), - arr3(&[[[0, 1], [2, 3]], [[14, 15], [16, 17]]])]); + assert_equal( + it, + vec![ + arr3(&[[[12, 13]], [[26, 27]]]), + arr3(&[[[8, 9], [10, 11]], [[22, 23], [24, 25]]]), + arr3(&[[[4, 5], [6, 7]], [[18, 19], [20, 21]]]), + arr3(&[[[0, 1], [2, 3]], [[14, 15], [16, 17]]]), + ], + ); let it = a.axis_chunks_iter(Axis(1), 7); assert_equal(it, vec![a.view()]); @@ -338,10 +373,14 @@ fn axis_chunks_iter_corner_cases() { let it = a.axis_chunks_iter(Axis(0), 8); assert_equal(it, vec![a.view()]); let it = a.axis_chunks_iter(Axis(0), 3); - assert_equal(it, - vec![arr2(&[[7.], [6.], [5.]]), - arr2(&[[4.], [3.], [2.]]), - arr2(&[[1.], [0.]])]); + assert_equal( + it, + vec![ + arr2(&[[7.], [6.], [5.]]), + arr2(&[[4.], [3.], [2.]]), + arr2(&[[1.], [0.]]), + ], + ); let b = ArcArray::::zeros((8, 2)); let a = b.slice(s![1..;2,..]); @@ -354,18 +393,28 @@ fn axis_chunks_iter_corner_cases() { #[test] fn axis_chunks_iter_zero_stride() { - { // stride 0 case - let b = Array::from_vec(vec![0f32; 0]).into_shape((5, 0, 3)).unwrap(); - let shapes: Vec<_> = b.axis_chunks_iter(Axis(0), 2).map(|v| v.raw_dim()).collect(); + let b = Array::from_vec(vec![0f32; 0]) + .into_shape((5, 0, 3)) + .unwrap(); + let shapes: Vec<_> = b + .axis_chunks_iter(Axis(0), 2) + .map(|v| v.raw_dim()) + .collect(); assert_eq!(shapes, vec![Ix3(2, 0, 3), Ix3(2, 0, 3), Ix3(1, 0, 3)]); } { // stride 0 case reverse - let b = Array::from_vec(vec![0f32; 0]).into_shape((5, 0, 3)).unwrap(); - let shapes: Vec<_> = b.axis_chunks_iter(Axis(0), 2).rev().map(|v| v.raw_dim()).collect(); + let b = Array::from_vec(vec![0f32; 0]) + .into_shape((5, 0, 3)) + .unwrap(); + let shapes: Vec<_> = b + .axis_chunks_iter(Axis(0), 2) + .rev() + .map(|v| v.raw_dim()) + .collect(); assert_eq!(shapes, vec![Ix3(1, 0, 3), Ix3(2, 0, 3), Ix3(2, 0, 3)]); } @@ -475,7 +524,7 @@ fn outer_iter_mut_split_at() { fn iterators_are_send_sync() { // When the element type is Send + Sync, then the iterators and views // are too. - fn _send_sync(_: &T) { } + fn _send_sync(_: &T) {} let mut a = ArcArray::from_iter(0..30).into_shape((5, 3, 2)).unwrap(); @@ -548,8 +597,13 @@ fn test_rfold() { a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter = a.iter(); iter.next(); - let output = iter.rfold(Vec::new(), - |mut acc, elt| { acc.push(*elt); acc }); - assert_eq!(Array1::from_vec(output), Array::from_iter((1..10).rev().map(|i| i * 2))); + let output = iter.rfold(Vec::new(), |mut acc, elt| { + acc.push(*elt); + acc + }); + assert_eq!( + Array1::from_vec(output), + Array::from_iter((1..10).rev().map(|i| i * 2)) + ); } } diff --git a/tests/ix0.rs b/tests/ix0.rs index 878095b89..edb9f10fc 100644 --- a/tests/ix0.rs +++ b/tests/ix0.rs @@ -1,4 +1,3 @@ - extern crate ndarray; use ndarray::Array; diff --git a/tests/ixdyn.rs b/tests/ixdyn.rs index 65fce6ecc..814eeaefb 100644 --- a/tests/ixdyn.rs +++ b/tests/ixdyn.rs @@ -1,10 +1,9 @@ - extern crate ndarray; use ndarray::Array; -use ndarray::{Ix0, Ix1, Ix2, Ix3, IxDyn}; use ndarray::IntoDimension; use ndarray::ShapeBuilder; +use ndarray::{Ix0, Ix1, Ix2, Ix3, IxDyn}; #[test] fn test_ixdyn() { diff --git a/tests/oper.rs b/tests/oper.rs index 004231c92..cbad7aee8 100644 --- a/tests/oper.rs +++ b/tests/oper.rs @@ -2,20 +2,19 @@ extern crate defmac; extern crate ndarray; extern crate num_traits; -use ndarray::prelude::*; -use ndarray::{rcarr1, rcarr2}; -use ndarray::{LinalgScalar, Data}; use ndarray::linalg::general_mat_mul; use ndarray::linalg::general_mat_vec_mul; +use ndarray::prelude::*; +use ndarray::{rcarr1, rcarr2}; +use ndarray::{Data, LinalgScalar}; use ndarray::{Ix, Ixs, SliceInfo, SliceOrIndex}; -use std::fmt; -use std::ops::Neg; use defmac::defmac; use num_traits::Float; +use std::fmt; +use std::ops::Neg; -fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) -{ +fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) { let aa = rcarr1(a); let bb = rcarr1(b); let cc = rcarr1(c); @@ -35,7 +34,7 @@ fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) fn test_oper_arr(op: &str, mut aa: ArcArray, bb: ArcArray, cc: ArcArray) where A: NdFloat, - for<'a> &'a A: Neg, + for<'a> &'a A: Neg, D: Dimension, { match op { @@ -43,49 +42,77 @@ where assert_eq!(&aa + &bb, cc); aa += &bb; assert_eq!(aa, cc); - }, + } "-" => { assert_eq!(&aa - &bb, cc); aa -= &bb; assert_eq!(aa, cc); - }, + } "*" => { assert_eq!(&aa * &bb, cc); aa *= &bb; assert_eq!(aa, cc); - }, + } "/" => { assert_eq!(&aa / &bb, cc); aa /= &bb; assert_eq!(aa, cc); - }, + } "%" => { assert_eq!(&aa % &bb, cc); aa %= &bb; assert_eq!(aa, cc); - }, + } "neg" => { assert_eq!(-&aa, cc); assert_eq!(-aa.clone(), cc); - }, - _ => panic!() + } + _ => panic!(), } } #[test] -fn operations() -{ - test_oper("+", &[1.0,2.0,3.0,4.0], &[0.0, 1.0, 2.0, 3.0], &[1.0,3.0,5.0,7.0]); - test_oper("-", &[1.0,2.0,3.0,4.0], &[0.0, 1.0, 2.0, 3.0], &[1.0,1.0,1.0,1.0]); - test_oper("*", &[1.0,2.0,3.0,4.0], &[0.0, 1.0, 2.0, 3.0], &[0.0,2.0,6.0,12.0]); - test_oper("/", &[1.0,2.0,3.0,4.0], &[1.0, 1.0, 2.0, 3.0], &[1.0,2.0,3.0/2.0,4.0/3.0]); - test_oper("%", &[1.0,2.0,3.0,4.0], &[1.0, 1.0, 2.0, 3.0], &[0.0,0.0,1.0,1.0]); - test_oper("neg", &[1.0,2.0,3.0,4.0], &[1.0, 1.0, 2.0, 3.0], &[-1.0,-2.0,-3.0,-4.0]); +fn operations() { + test_oper( + "+", + &[1.0, 2.0, 3.0, 4.0], + &[0.0, 1.0, 2.0, 3.0], + &[1.0, 3.0, 5.0, 7.0], + ); + test_oper( + "-", + &[1.0, 2.0, 3.0, 4.0], + &[0.0, 1.0, 2.0, 3.0], + &[1.0, 1.0, 1.0, 1.0], + ); + test_oper( + "*", + &[1.0, 2.0, 3.0, 4.0], + &[0.0, 1.0, 2.0, 3.0], + &[0.0, 2.0, 6.0, 12.0], + ); + test_oper( + "/", + &[1.0, 2.0, 3.0, 4.0], + &[1.0, 1.0, 2.0, 3.0], + &[1.0, 2.0, 3.0 / 2.0, 4.0 / 3.0], + ); + test_oper( + "%", + &[1.0, 2.0, 3.0, 4.0], + &[1.0, 1.0, 2.0, 3.0], + &[0.0, 0.0, 1.0, 1.0], + ); + test_oper( + "neg", + &[1.0, 2.0, 3.0, 4.0], + &[1.0, 1.0, 2.0, 3.0], + &[-1.0, -2.0, -3.0, -4.0], + ); } #[test] -fn scalar_operations() -{ +fn scalar_operations() { let a = arr0::(1.); let b = rcarr1::(&[1., 1.]); let c = rcarr2(&[[1., 1.], [1., 1.]]); @@ -119,13 +146,19 @@ fn scalar_operations() } fn assert_approx_eq(f: F, g: F, tol: F) -> bool { - assert!((f - g).abs() <= tol, "{:?} approx== {:?} (tol={:?})", - f, g, tol); + assert!( + (f - g).abs() <= tol, + "{:?} approx== {:?} (tol={:?})", + f, + g, + tol + ); true } fn assert_close(a: ArrayView, b: ArrayView) - where D: Dimension, +where + D: Dimension, { let diff = (&a - &b).mapv_into(f64::abs); @@ -134,7 +167,7 @@ fn assert_close(a: ArrayView, b: ArrayView) let crtol = b.mapv(|x| x.abs() * rtol); let tol = crtol + atol; let tol_m_diff = &diff - &tol; - let maxdiff = tol_m_diff.fold(0./0., |x, y| f64::max(x, *y)); + let maxdiff = tol_m_diff.fold(0. / 0., |x, y| f64::max(x, *y)); println!("diff offset from tolerance level= {:.2e}", maxdiff); if maxdiff > 0. { println!("{:.4?}", a); @@ -143,16 +176,17 @@ fn assert_close(a: ArrayView, b: ArrayView) } } -fn reference_dot<'a,A, V1, V2>(a: V1, b: V2) -> A - where A: NdFloat, - V1: AsArray<'a, A>, - V2: AsArray<'a, A>, +fn reference_dot<'a, A, V1, V2>(a: V1, b: V2) -> A +where + A: NdFloat, + V1: AsArray<'a, A>, + V2: AsArray<'a, A>, { let a = a.into(); let b = b.into(); - a.iter().zip(b.iter()).fold(A::zero(), |acc, (&x, &y)| { - acc + x * y - }) + a.iter() + .zip(b.iter()) + .fold(A::zero(), |acc, (&x, &y)| acc + x * y) } #[test] @@ -173,7 +207,6 @@ fn dot_product() { assert_approx_eq(a2.dot(&b2), reference_dot(&a2, &b2), 1e-5); } - let a = a.map(|f| *f as f32); let b = b.map(|f| *f as f32); assert_approx_eq(a.dot(&b), dot as f32, 1e-5); @@ -236,7 +269,7 @@ fn dot_product_neg_stride() { #[test] fn fold_and_sum() { let a = Array::linspace(0., 127., 128).into_shape((8, 16)).unwrap(); - assert_approx_eq(a.fold(0., |acc, &x| acc +x), a.sum(), 1e-5); + assert_approx_eq(a.fold(0., |acc, &x| acc + x), a.sum(), 1e-5); // test different strides let max = 8 as Ixs; @@ -247,7 +280,7 @@ fn fold_and_sum() { for elt in a1.iter() { sum += *elt; } - assert_approx_eq(a1.fold(0., |acc, &x| acc +x), sum, 1e-5); + assert_approx_eq(a1.fold(0., |acc, &x| acc + x), sum, 1e-5); assert_approx_eq(sum, a1.sum(), 1e-5); } } @@ -267,7 +300,7 @@ fn fold_and_sum() { for elt in iter1 { sum += *elt; } - assert_approx_eq(iter2.fold(0., |acc, &x| acc +x), sum, 1e-5); + assert_approx_eq(iter2.fold(0., |acc, &x| acc + x), sum, 1e-5); } } } @@ -293,11 +326,15 @@ fn product() { } fn range_mat(m: Ix, n: Ix) -> Array2 { - Array::linspace(0., (m * n) as f32 - 1., m * n).into_shape((m, n)).unwrap() + Array::linspace(0., (m * n) as f32 - 1., m * n) + .into_shape((m, n)) + .unwrap() } fn range_mat64(m: Ix, n: Ix) -> Array2 { - Array::linspace(0., (m * n) as f64 - 1., m * n).into_shape((m, n)).unwrap() + Array::linspace(0., (m * n) as f64 - 1., m * n) + .into_shape((m, n)) + .unwrap() } fn range1_mat64(m: Ix) -> Array1 { @@ -305,15 +342,17 @@ fn range1_mat64(m: Ix) -> Array1 { } fn range_i32(m: Ix, n: Ix) -> Array2 { - Array::from_iter(0..(m * n) as i32).into_shape((m, n)).unwrap() + Array::from_iter(0..(m * n) as i32) + .into_shape((m, n)) + .unwrap() } // simple, slow, correct (hopefully) mat mul -fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) - -> Array2 - where A: LinalgScalar, - S: Data, - S2: Data, +fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) -> Array2 +where + A: LinalgScalar, + S: Data, + S2: Data, { let ((m, k), (k2, n)) = (lhs.dim(), rhs.dim()); assert!(m.checked_mul(n).is_some()); @@ -327,8 +366,9 @@ fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase let mut j = 0; for rr in &mut res_elems { unsafe { - *rr = (0..k).fold(A::zero(), - move |s, x| s + *lhs.uget((i, x)) * *rhs.uget((x, j))); + *rr = (0..k).fold(A::zero(), move |s, x| { + s + *lhs.uget((i, x)) * *rhs.uget((x, j)) + }); } j += 1; if j == n { @@ -336,33 +376,33 @@ fn reference_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase i += 1; } } - unsafe { - ArrayBase::from_shape_vec_unchecked((m, n), res_elems) - } + unsafe { ArrayBase::from_shape_vec_unchecked((m, n), res_elems) } } // simple, slow, correct (hopefully) mat mul -fn reference_mat_vec_mul(lhs: &ArrayBase, rhs: &ArrayBase) - -> Array1 - where A: LinalgScalar, - S: Data, - S2: Data, +fn reference_mat_vec_mul(lhs: &ArrayBase, rhs: &ArrayBase) -> Array1 +where + A: LinalgScalar, + S: Data, + S2: Data, { let ((m, _), k) = (lhs.dim(), rhs.dim()); reference_mat_mul(lhs, &rhs.to_owned().into_shape((k, 1)).unwrap()) - .into_shape(m).unwrap() + .into_shape(m) + .unwrap() } // simple, slow, correct (hopefully) mat mul -fn reference_vec_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) - -> Array1 - where A: LinalgScalar, - S: Data, - S2: Data, +fn reference_vec_mat_mul(lhs: &ArrayBase, rhs: &ArrayBase) -> Array1 +where + A: LinalgScalar, + S: Data, + S2: Data, { let (m, (_, n)) = (lhs.dim(), rhs.dim()); reference_mat_mul(&lhs.to_owned().into_shape((1, m)).unwrap(), rhs) - .into_shape(n).unwrap() + .into_shape(n) + .unwrap() } #[test] @@ -535,22 +575,22 @@ fn scaled_add() { let d = alpha * &b + &a; assert_eq!(c, d); - } #[test] fn scaled_add_2() { let beta = -2.3; - let sizes = vec![(4, 4, 1, 4), - (8, 8, 1, 8), - (17, 15, 17, 15), - (4, 17, 4, 17), - (17, 3, 1, 3), - (19, 18, 19, 18), - (16, 17, 16, 17), - (15, 16, 15, 16), - (67, 63, 1, 63), - ]; + let sizes = vec![ + (4, 4, 1, 4), + (8, 8, 1, 8), + (17, 15, 17, 15), + (4, 17, 4, 17), + (17, 3, 1, 3), + (19, 18, 19, 18), + (16, 17, 16, 17), + (15, 16, 15, 16), + (67, 63, 1, 63), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { @@ -576,27 +616,24 @@ fn scaled_add_2() { #[test] fn scaled_add_3() { let beta = -2.3; - let sizes = vec![(4, 4, 1, 4), - (8, 8, 1, 8), - (17, 15, 17, 15), - (4, 17, 4, 17), - (17, 3, 1, 3), - (19, 18, 19, 18), - (16, 17, 16, 17), - (15, 16, 15, 16), - (67, 63, 1, 63), - ]; + let sizes = vec![ + (4, 4, 1, 4), + (8, 8, 1, 8), + (17, 15, 17, 15), + (4, 17, 4, 17), + (17, 3, 1, 3), + (19, 18, 19, 18), + (16, 17, 16, 17), + (15, 16, 15, 16), + (67, 63, 1, 63), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { for &(m, k, n, q) in &sizes { let mut a = range_mat64(m, k); let mut answer = a.clone(); - let cdim = if n == 1 { - vec![q] - } else { - vec![n, q] - }; + let cdim = if n == 1 { vec![q] } else { vec![n, q] }; let cslice = if n == 1 { vec![SliceOrIndex::from(..).step_by(s2)] } else { @@ -622,20 +659,21 @@ fn scaled_add_3() { } } - #[test] fn gen_mat_mul() { let alpha = -2.3; let beta = 3.14; - let sizes = vec![(4, 4, 4), (8, 8, 8), - (17, 15, 16), - (4, 17, 3), - (17, 3, 22), - (19, 18, 2), - (16, 17, 15), - (15, 16, 17), - (67, 63, 62), - ]; + let sizes = vec![ + (4, 4, 4), + (8, 8, 8), + (17, 15, 16), + (4, 17, 3), + (17, 3, 22), + (19, 18, 2), + (16, 17, 15), + (15, 16, 17), + (67, 63, 62), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { @@ -661,7 +699,6 @@ fn gen_mat_mul() { } } - // Test y = A x where A is f-order #[test] fn gemm_64_1_f() { @@ -679,15 +716,17 @@ fn gemm_64_1_f() { fn gen_mat_mul_i32() { let alpha = -1; let beta = 2; - let sizes = vec![(4, 4, 4), (8, 8, 8), - (17, 15, 16), - (4, 17, 3), - (17, 3, 22), - (19, 18, 2), - (16, 17, 15), - (15, 16, 17), - (67, 63, 62), - ]; + let sizes = vec![ + (4, 4, 4), + (8, 8, 8), + (17, 15, 16), + (4, 17, 3), + (17, 3, 22), + (19, 18, 2), + (16, 17, 15), + (15, 16, 17), + (67, 63, 62), + ]; for &(m, k, n) in &sizes { let a = range_i32(m, k); let b = range_i32(k, n); @@ -703,16 +742,17 @@ fn gen_mat_mul_i32() { fn gen_mat_vec_mul() { let alpha = -2.3; let beta = 3.14; - let sizes = vec![(4, 4), - (8, 8), - (17, 15), - (4, 17), - (17, 3), - (19, 18), - (16, 17), - (15, 16), - (67, 63), - ]; + let sizes = vec![ + (4, 4), + (8, 8), + (17, 15), + (4, 17), + (17, 3), + (19, 18), + (16, 17), + (15, 16), + (67, 63), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { @@ -746,16 +786,17 @@ fn gen_mat_vec_mul() { #[test] fn vec_mat_mul() { - let sizes = vec![(4, 4), - (8, 8), - (17, 15), - (4, 17), - (17, 3), - (19, 18), - (16, 17), - (15, 16), - (67, 63), - ]; + let sizes = vec![ + (4, 4), + (8, 8), + (17, 15), + (4, 17), + (17, 3), + (19, 18), + (16, 17), + (15, 16), + (67, 63), + ]; // test different strides for &s1 in &[1, 2, -1, -2] { for &s2 in &[1, 2, -1, -2] { diff --git a/tests/par_azip.rs b/tests/par_azip.rs index 4ffe5b347..28bb81726 100644 --- a/tests/par_azip.rs +++ b/tests/par_azip.rs @@ -1,11 +1,11 @@ -#![cfg(feature="rayon")] +#![cfg(feature = "rayon")] -extern crate ndarray; extern crate itertools; +extern crate ndarray; -use ndarray::prelude::*; +use itertools::enumerate; use ndarray::parallel::prelude::*; -use itertools::{enumerate}; +use ndarray::prelude::*; use std::sync::atomic::{AtomicUsize, Ordering}; #[test] @@ -19,7 +19,7 @@ fn test_par_azip1() { #[test] fn test_par_azip2() { let mut a = Array::zeros((5, 7)); - let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2*j) as f32); + let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2 * j) as f32); par_azip!(mut a, b in { *a = b; }); assert_eq!(a, b); } @@ -47,7 +47,7 @@ fn test_zip_dim_mismatch_1() { let mut a = Array::zeros((5, 7)); let mut d = a.raw_dim(); d[0] += 1; - let b = Array::from_shape_fn(d, |(i, j)| 1. / (i + 2*j) as f32); + let b = Array::from_shape_fn(d, |(i, j)| 1. / (i + 2 * j) as f32); par_azip!(mut a, b in { *a = b; }); } diff --git a/tests/par_rayon.rs b/tests/par_rayon.rs index 70b69e0eb..e7c177348 100644 --- a/tests/par_rayon.rs +++ b/tests/par_rayon.rs @@ -1,12 +1,12 @@ -#![cfg(feature="rayon")] +#![cfg(feature = "rayon")] extern crate rayon; -extern crate ndarray; extern crate itertools; +extern crate ndarray; -use ndarray::prelude::*; use ndarray::parallel::prelude::*; +use ndarray::prelude::*; const M: usize = 1024 * 10; const N: usize = 100; @@ -25,9 +25,13 @@ fn test_axis_iter() { #[test] fn test_axis_iter_mut() { - let mut a = Array::linspace(0., 1.0f64, M * N).into_shape((M, N)).unwrap(); + let mut a = Array::linspace(0., 1.0f64, M * N) + .into_shape((M, N)) + .unwrap(); let b = a.mapv(|x| x.exp()); - a.axis_iter_mut(Axis(0)).into_par_iter().for_each(|mut v| v.mapv_inplace(|x| x.exp())); + a.axis_iter_mut(Axis(0)) + .into_par_iter() + .for_each(|mut v| v.mapv_inplace(|x| x.exp())); println!("{:?}", a.slice(s![..10, ..5])); assert!(a.all_close(&b, 0.001)); } diff --git a/tests/par_zip.rs b/tests/par_zip.rs index 9cba9888c..2e5a19d04 100644 --- a/tests/par_zip.rs +++ b/tests/par_zip.rs @@ -1,7 +1,7 @@ -#![cfg(feature="rayon")] +#![cfg(feature = "rayon")] -extern crate ndarray; extern crate itertools; +extern crate ndarray; use ndarray::prelude::*; @@ -14,20 +14,16 @@ const N: usize = 100; fn test_zip_1() { let mut a = Array2::::zeros((M, N)); - Zip::from(&mut a) - .par_apply(|x| { - *x = x.exp() - }); + Zip::from(&mut a).par_apply(|x| *x = x.exp()); } #[test] fn test_zip_index_1() { let mut a = Array2::default((10, 10)); - Zip::indexed(&mut a) - .par_apply(|i, x| { - *x = i; - }); + Zip::indexed(&mut a).par_apply(|i, x| { + *x = i; + }); for (i, elt) in a.indexed_iter() { assert_eq!(*elt, i); @@ -38,10 +34,9 @@ fn test_zip_index_1() { fn test_zip_index_2() { let mut a = Array2::default((M, N)); - Zip::indexed(&mut a) - .par_apply(|i, x| { - *x = i; - }); + Zip::indexed(&mut a).par_apply(|i, x| { + *x = i; + }); for (i, elt) in a.indexed_iter() { assert_eq!(*elt, i); @@ -52,10 +47,9 @@ fn test_zip_index_2() { fn test_zip_index_3() { let mut a = Array::default((1, 2, 1, 2, 3)); - Zip::indexed(&mut a) - .par_apply(|i, x| { - *x = i; - }); + Zip::indexed(&mut a).par_apply(|i, x| { + *x = i; + }); for (i, elt) in a.indexed_iter() { assert_eq!(*elt, i); @@ -67,12 +61,10 @@ fn test_zip_index_4() { let mut a = Array2::zeros((M, N)); let mut b = Array2::zeros((M, N)); - Zip::indexed(&mut a) - .and(&mut b) - .par_apply(|(i, j), x, y| { - *x = i; - *y = j; - }); + Zip::indexed(&mut a).and(&mut b).par_apply(|(i, j), x, y| { + *x = i; + *y = j; + }); for ((i, _), elt) in a.indexed_iter() { assert_eq!(*elt, i); diff --git a/tests/s.rs b/tests/s.rs index 2705b649e..7acdcd8ad 100644 --- a/tests/s.rs +++ b/tests/s.rs @@ -3,8 +3,7 @@ extern crate ndarray; use ndarray::{s, Array}; #[test] -fn test_s() -{ +fn test_s() { let a = Array::::zeros((3, 4)); let vi = a.slice(s![1.., ..;2]); assert_eq!(vi.shape(), &[2, 2]); diff --git a/tests/stacking.rs b/tests/stacking.rs index 267714f70..bc9a53a4d 100644 --- a/tests/stacking.rs +++ b/tests/stacking.rs @@ -1,32 +1,18 @@ extern crate ndarray; - -use ndarray::{ - aview1, - arr2, - stack, - Axis, - Array2, - ErrorKind, -}; +use ndarray::{arr2, aview1, stack, Array2, Axis, ErrorKind}; #[test] fn stacking() { - let a = arr2(&[[2., 2.], - [3., 3.]]); + let a = arr2(&[[2., 2.], [3., 3.]]); let b = ndarray::stack(Axis(0), &[a.view(), a.view()]).unwrap(); - assert_eq!(b, arr2(&[[2., 2.], - [3., 3.], - [2., 2.], - [3., 3.]])); + assert_eq!(b, arr2(&[[2., 2.], [3., 3.], [2., 2.], [3., 3.]])); let c = stack![Axis(0), a, b]; - assert_eq!(c, arr2(&[[2., 2.], - [3., 3.], - [2., 2.], - [3., 3.], - [2., 2.], - [3., 3.]])); + assert_eq!( + c, + arr2(&[[2., 2.], [3., 3.], [2., 2.], [3., 3.], [2., 2.], [3., 3.]]) + ); let d = stack![Axis(0), a.row(0), &[9., 9.]]; assert_eq!(d, aview1(&[2., 2., 9., 9.])); diff --git a/tests/windows.rs b/tests/windows.rs index 1155be4a0..fc32a70b5 100644 --- a/tests/windows.rs +++ b/tests/windows.rs @@ -1,5 +1,5 @@ -extern crate ndarray; extern crate itertools; +extern crate ndarray; use ndarray::prelude::*; use ndarray::Zip; @@ -22,18 +22,14 @@ use ndarray::Zip; #[test] #[should_panic] fn windows_iterator_zero_size() { - let a = Array::from_iter(10..37) - .into_shape((3, 3, 3)) - .unwrap(); + let a = Array::from_iter(10..37).into_shape((3, 3, 3)).unwrap(); a.windows(Dim((0, 0, 0))); } /// Test that verifites that no windows are yielded on oversized window sizes. #[test] fn windows_iterator_oversized() { - let a = Array::from_iter(10..37) - .into_shape((3, 3, 3)) - .unwrap(); + let a = Array::from_iter(10..37).into_shape((3, 3, 3)).unwrap(); let mut iter = a.windows((4, 3, 2)).into_iter(); // (4,3,2) doesn't fit into (3,3,3) => oversized! assert_eq!(iter.next(), None); } @@ -51,8 +47,9 @@ fn windows_iterator_1d() { arr1(&[13, 14, 15, 16]), arr1(&[14, 15, 16, 17]), arr1(&[15, 16, 17, 18]), - arr1(&[16, 17, 18, 19]) - ]); + arr1(&[16, 17, 18, 19]), + ], + ); } /// Simple test for iterating 2d-arrays via `Windows`. @@ -62,18 +59,17 @@ fn windows_iterator_2d() { itertools::assert_equal( a.windows(Dim((3, 2))), vec![ - arr2(&[ [10, 11], [14, 15], [18, 19] ]), - arr2(&[ [11, 12], [15, 16], [19, 20] ]), - arr2(&[ [12, 13], [16, 17], [20, 21] ]), - - arr2(&[ [14, 15], [18, 19], [22, 23] ]), - arr2(&[ [15, 16], [19, 20], [23, 24] ]), - arr2(&[ [16, 17], [20, 21], [24, 25] ]), - - arr2(&[ [18, 19], [22, 23], [26, 27] ]), - arr2(&[ [19, 20], [23, 24], [27, 28] ]), - arr2(&[ [20, 21], [24, 25], [28, 29] ]) - ]); + arr2(&[[10, 11], [14, 15], [18, 19]]), + arr2(&[[11, 12], [15, 16], [19, 20]]), + arr2(&[[12, 13], [16, 17], [20, 21]]), + arr2(&[[14, 15], [18, 19], [22, 23]]), + arr2(&[[15, 16], [19, 20], [23, 24]]), + arr2(&[[16, 17], [20, 21], [24, 25]]), + arr2(&[[18, 19], [22, 23], [26, 27]]), + arr2(&[[19, 20], [23, 24], [27, 28]]), + arr2(&[[20, 21], [24, 25], [28, 29]]), + ], + ); } /// Simple test for iterating 3d-arrays via `Windows`. @@ -84,18 +80,16 @@ fn windows_iterator_3d() { itertools::assert_equal( a.windows(Dim((2, 2, 2))), vec![ - arr3(&[ [[10, 11], [13, 14]], [[19, 20], [22, 23]] ]), - arr3(&[ [[11, 12], [14, 15]], [[20, 21], [23, 24]] ]), - - arr3(&[ [[13, 14], [16, 17]], [[22, 23], [25, 26]] ]), - arr3(&[ [[14, 15], [17, 18]], [[23, 24], [26, 27]] ]), - - arr3(&[ [[19, 20], [22, 23]], [[28, 29], [31, 32]] ]), - arr3(&[ [[20, 21], [23, 24]], [[29, 30], [32, 33]] ]), - - arr3(&[ [[22, 23], [25, 26]], [[31, 32], [34, 35]] ]), - arr3(&[ [[23, 24], [26, 27]], [[32, 33], [35, 36]] ]), - ]); + arr3(&[[[10, 11], [13, 14]], [[19, 20], [22, 23]]]), + arr3(&[[[11, 12], [14, 15]], [[20, 21], [23, 24]]]), + arr3(&[[[13, 14], [16, 17]], [[22, 23], [25, 26]]]), + arr3(&[[[14, 15], [17, 18]], [[23, 24], [26, 27]]]), + arr3(&[[[19, 20], [22, 23]], [[28, 29], [31, 32]]]), + arr3(&[[[20, 21], [23, 24]], [[29, 30], [32, 33]]]), + arr3(&[[[22, 23], [25, 26]], [[31, 32], [34, 35]]]), + arr3(&[[[23, 24], [26, 27]], [[32, 33], [35, 36]]]), + ], + ); } #[test] @@ -105,18 +99,15 @@ fn test_window_zip() { for x in 1..4 { for y in 1..4 { for z in 1..4 { - Zip::indexed(a.windows((x, y, z))) - .apply(|(i, j, k), window| { - let x = x as isize; - let y = y as isize; - let z = z as isize; - let i = i as isize; - let j = j as isize; - let k = k as isize; - assert_eq!(window, a.slice(s![i .. i + x, - j .. j + y, - k .. k + z])); - }) + Zip::indexed(a.windows((x, y, z))).apply(|(i, j, k), window| { + let x = x as isize; + let y = y as isize; + let z = z as isize; + let i = i as isize; + let j = j as isize; + let k = k as isize; + assert_eq!(window, a.slice(s![i..i + x, j..j + y, k..k + z])); + }) } } } diff --git a/tests/zst.rs b/tests/zst.rs index e8fababb3..e078c1198 100644 --- a/tests/zst.rs +++ b/tests/zst.rs @@ -1,4 +1,3 @@ - extern crate ndarray; use ndarray::arr2; @@ -6,7 +5,7 @@ use ndarray::ArcArray; #[test] fn test_swap() { - let mut a = arr2(&[[();3];3]); + let mut a = arr2(&[[(); 3]; 3]); let b = a.clone();