Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update ndarray 0.14 #110

Merged
merged 9 commits into from
Apr 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ blas = ["ndarray/blas"]
[dependencies]
num-traits = "0.2"
thiserror = "1"
rand = { version = "0.7", features = ["small_rng"] }
ndarray = { version = "0.13", default-features = false, features = ["approx"] }
ndarray-linalg = { version = "0.12.1", optional = true }
rand = { version = "0.8", features = ["small_rng"] }
ndarray = { version = "0.14", default-features = false, features = ["approx"] }
ndarray-linalg = { version = "0.13", optional = true }

[dependencies.intel-mkl-src]
version = "0.6.0"
Expand All @@ -57,8 +57,8 @@ default-features = false
features = ["cblas"]

[dev-dependencies]
ndarray-rand = "0.11"
approx = { version = "0.3", default-features = false, features = ["std"] }
ndarray-rand = "0.13"
approx = { version = "0.4", default-features = false, features = ["std"] }

linfa-datasets = { path = "datasets", features = ["winequality", "iris", "diabetes"] }

Expand Down
6 changes: 3 additions & 3 deletions algorithms/linfa-bayes/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@ keywords = ["factorization", "machine-learning", "linfa", "unsupervised"]
categories = ["algorithms", "mathematics", "science"]

[dependencies]
ndarray = { version = "0.13" , features = ["blas", "approx"]}
ndarray-stats = "0.3"
ndarray = { version = "0.14" , features = ["blas", "approx"]}
ndarray-stats = "0.4"
thiserror = "1"

linfa = { version = "0.3.1", path = "../.." }

[dev-dependencies]
approx = "0.3"
approx = "0.4"
linfa-datasets = { version = "0.3.1", path = "../../datasets", features = ["winequality"] }
18 changes: 9 additions & 9 deletions algorithms/linfa-clustering/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,22 +28,22 @@ default-features = false
features = ["std", "derive"]

[dependencies]
ndarray = { version = "0.13", features = ["rayon", "approx"]}
ndarray-linalg = "0.12"
ndarray-rand = "0.11"
ndarray-stats = "0.3"
sprs = "0.7"
num-traits = "0.1.32"
rand_isaac = "0.2.0"
ndarray = { version = "0.14", features = ["rayon", "approx"]}
ndarray-linalg = "0.13"
lax = "0.1"
ndarray-rand = "0.13"
ndarray-stats = "0.4"
num-traits = "0.2"
rand_isaac = "0.3"

linfa = { version = "0.3.1", path = "../.." }
partitions = "0.2.4"

[dev-dependencies]
ndarray-npy = { version = "0.5", default-features = false }
ndarray-npy = { version = "0.7", default-features = false }
criterion = "0.3"
serde_json = "1"
approx = "0.3"
approx = "0.4"

[[bench]]
name = "k_means"
Expand Down
4 changes: 2 additions & 2 deletions algorithms/linfa-clustering/examples/appx_dbscan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,10 @@ fn main() {

// Save to disk our dataset (and the cluster label assigned to each observation)
// We use the `npy` format for compatibility with NumPy
write_npy("clustered_dataset.npy", records).expect("Failed to write .npy file");
write_npy("clustered_dataset.npy", &records).expect("Failed to write .npy file");
write_npy(
"clustered_memberships.npy",
cluster_memberships.map(|&x| x.map(|c| c as i64).unwrap_or(-1)),
&cluster_memberships.map(|&x| x.map(|c| c as i64).unwrap_or(-1)),
)
.expect("Failed to write .npy file");
}
4 changes: 2 additions & 2 deletions algorithms/linfa-clustering/examples/dbscan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@ fn main() {

// Save to disk our dataset (and the cluster label assigned to each observation)
// We use the `npy` format for compatibility with NumPy
write_npy("clustered_dataset.npy", records).expect("Failed to write .npy file");
write_npy("clustered_dataset.npy", &records).expect("Failed to write .npy file");
write_npy(
"clustered_memberships.npy",
cluster_memberships.map(|&x| x.map(|c| c as i64).unwrap_or(-1)),
&cluster_memberships.map(|&x| x.map(|c| c as i64).unwrap_or(-1)),
)
.expect("Failed to write .npy file");
}
4 changes: 2 additions & 2 deletions algorithms/linfa-clustering/examples/kmeans.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ fn main() {

// Save to disk our dataset (and the cluster label assigned to each observation)
// We use the `npy` format for compatibility with NumPy
write_npy("clustered_dataset.npy", records).expect("Failed to write .npy file");
write_npy("clustered_memberships.npy", targets.map(|&x| x as u64))
write_npy("clustered_dataset.npy", &records).expect("Failed to write .npy file");
write_npy("clustered_memberships.npy", &targets.map(|&x| x as u64))
.expect("Failed to write .npy file");
}
27 changes: 15 additions & 12 deletions algorithms/linfa-clustering/src/gaussian_mixture/algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ impl<F: Float + Lapack + Scalar> GaussianMixtureModel<F> {
let n_samples = observations.nrows();
let (weights, means, covariances) = Self::estimate_gaussian_parameters(
&observations,
&log_resp.mapv(|v| v.exp()),
&log_resp.mapv(|v| Scalar::exp(v)),
&self.covar_type,
reg_covar,
)?;
Expand Down Expand Up @@ -325,9 +325,9 @@ impl<F: Float + Lapack + Scalar> GaussianMixtureModel<F> {
) -> (Array1<F>, Array2<F>) {
let weighted_log_prob = self.estimate_weighted_log_prob(&observations);
let log_prob_norm = weighted_log_prob
.mapv(|v| v.exp())
.mapv(|v| Scalar::exp(v))
.sum_axis(Axis(1))
.mapv(|v| v.ln());
.mapv(|v| Scalar::ln(v));
let log_resp = weighted_log_prob - log_prob_norm.to_owned().insert_axis(Axis(1));
(log_prob_norm, log_resp)
}
Expand Down Expand Up @@ -384,12 +384,12 @@ impl<F: Float + Lapack + Scalar> GaussianMixtureModel<F> {
.unwrap()
.slice(s![.., ..; n_features+1])
.to_owned()
.mapv(|v| v.ln());
.mapv(|v| Scalar::ln(v));
log_diags.sum_axis(Axis(1))
}

fn estimate_log_weights(&self) -> Array1<F> {
self.weights().mapv(|v| v.ln())
self.weights().mapv(|v| Scalar::ln(v))
}
}

Expand Down Expand Up @@ -420,7 +420,7 @@ impl<'a, F: Float + Lapack + Scalar, R: Rng + SeedableRng + Clone, D: Data<Elem
lower_bound =
GaussianMixtureModel::<F>::compute_lower_bound(&log_resp, log_prob_norm);
let change = lower_bound - prev_lower_bound;
if change.abs() < self.tolerance() {
if ndarray_rand::rand_distr::num_traits::Float::abs(change) < self.tolerance() {
converged_iter = Some(n_iter);
break;
}
Expand Down Expand Up @@ -456,7 +456,7 @@ impl<F: Float + Lapack + Scalar, D: Data<Elem = F>> PredictRef<ArrayBase<D, Ix2>
fn predict_ref<'a>(&'a self, observations: &ArrayBase<D, Ix2>) -> Array1<usize> {
let (_, log_resp) = self.estimate_log_prob_resp(&observations);
log_resp
.mapv(|v| v.exp())
.mapv(|v| Scalar::exp(v))
.map_axis(Axis(1), |row| row.argmax().unwrap())
}
}
Expand All @@ -466,7 +466,8 @@ mod tests {
use super::*;
use crate::generate_blobs;
use approx::{abs_diff_eq, assert_abs_diff_eq};
use ndarray::{array, stack, ArrayView1, ArrayView2, Axis};
use lax::error::Error;
use ndarray::{array, concatenate, ArrayView1, ArrayView2, Axis};
use ndarray_linalg::error::LinalgError;
use ndarray_linalg::error::Result as LAResult;
use ndarray_rand::rand::SeedableRng;
Expand Down Expand Up @@ -560,7 +561,7 @@ mod tests {
let mut rng = Isaac64Rng::seed_from_u64(42);
let xt = Array2::random_using((50, 1), Uniform::new(0., 1.), &mut rng);
let yt = function_test_1d(&xt);
let data = stack(Axis(1), &[xt.view(), yt.view()]).unwrap();
let data = concatenate(Axis(1), &[xt.view(), yt.view()]).unwrap();
let dataset = DatasetBase::from(data);

// Test that cholesky decomposition fails when reg_covariance is zero
Expand All @@ -571,7 +572,8 @@ mod tests {
assert!(
match gmm.expect_err("should generate an error with reg_covar being nul") {
GmmError::LinalgError(e) => match e {
LinalgError::Lapack { return_code: 2 } => true,
LinalgError::Lapack(Error::LapackComputationalFailure { return_code: 2 }) =>
true,
_ => panic!("should be a lapack error 2"),
},
_ => panic!("should be a linear algebra error"),
Expand All @@ -588,7 +590,7 @@ mod tests {
fn test_zeroed_reg_covar_const_failure() {
// repeat values such that covariance is zero
let xt = Array2::ones((50, 1));
let data = stack(Axis(1), &[xt.view(), xt.view()]).unwrap();
let data = concatenate(Axis(1), &[xt.view(), xt.view()]).unwrap();
let dataset = DatasetBase::from(data);

// Test that cholesky decomposition fails when reg_covariance is zero
Expand All @@ -599,7 +601,8 @@ mod tests {
assert!(
match gmm.expect_err("should generate an error with reg_covar being nul") {
GmmError::LinalgError(e) => match e {
LinalgError::Lapack { return_code: 1 } => true,
LinalgError::Lapack(Error::LapackComputationalFailure { return_code: 1 }) =>
true,
_ => panic!("should be a lapack error 1"),
},
_ => panic!("should be a linear algebra error"),
Expand Down
11 changes: 6 additions & 5 deletions algorithms/linfa-clustering/src/k_means/algorithm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -595,7 +595,7 @@ mod tests {
use super::super::KMeansInit;
use super::*;
use approx::assert_abs_diff_eq;
use ndarray::{array, stack, Array, Array1, Array2, Axis};
use ndarray::{array, concatenate, Array, Array1, Array2, Axis};
use ndarray_rand::rand::SeedableRng;
use ndarray_rand::rand_distr::Uniform;
use ndarray_rand::RandomExt;
Expand All @@ -619,7 +619,7 @@ mod tests {
let mut rng = Isaac64Rng::seed_from_u64(42);
let xt = Array::random_using(100, Uniform::new(0., 1.0), &mut rng).insert_axis(Axis(1));
let yt = function_test_1d(&xt);
let data = stack(Axis(1), &[xt.view(), yt.view()]).unwrap();
let data = concatenate(Axis(1), &[xt.view(), yt.view()]).unwrap();

for init in &[
KMeansInit::Random,
Expand Down Expand Up @@ -673,9 +673,10 @@ mod tests {
let memberships_2 = Array1::ones(cluster_size);
let expected_centroid_2 = cluster_2.sum_axis(Axis(0)) / (cluster_size + 1) as f64;

// `stack` combines arrays along a given axis: https://docs.rs/ndarray/0.13.0/ndarray/fn.stack.html
let observations = stack(Axis(0), &[cluster_1.view(), cluster_2.view()]).unwrap();
let memberships = stack(Axis(0), &[memberships_1.view(), memberships_2.view()]).unwrap();
// `concatenate` combines arrays along a given axis: https://docs.rs/ndarray/0.13.0/ndarray/fn.concatenate.html
let observations = concatenate(Axis(0), &[cluster_1.view(), cluster_2.view()]).unwrap();
let memberships =
concatenate(Axis(0), &[memberships_1.view(), memberships_2.view()]).unwrap();

// Does it work?
let old_centroids = Array2::zeros((2, n_features));
Expand Down
14 changes: 7 additions & 7 deletions algorithms/linfa-clustering/src/k_means/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ fn k_means_para<R: Rng + SeedableRng, F: Float>(
let mut candidates = Array2::zeros((n_clusters * n_rounds, n_features));

// Pick 1st centroid randomly
let first_idx = rng.gen_range(0, n_samples);
let first_idx = rng.gen_range(0..n_samples);
candidates.row_mut(0).assign(&observations.row(first_idx));
let mut n_candidates = 1;

Expand All @@ -149,7 +149,7 @@ fn k_means_para<R: Rng + SeedableRng, F: Float>(
let next_candidates_idx = sample_subsequent_candidates::<R, _>(
&dists,
F::from(candidates_per_round).unwrap(),
rng.gen_range(0, std::u64::MAX),
rng.gen_range(0..std::u64::MAX),
);

// Append the newly generated candidates to the current cadidates, breaking out of the loop
Expand Down Expand Up @@ -199,7 +199,7 @@ fn sample_subsequent_candidates<R: Rng + SeedableRng, F: Float>(
|| R::seed_from_u64(seed.fetch_add(1, Relaxed)),
move |rng, (i, d)| {
let d = *d.into_scalar();
let rand = F::from(rng.gen_range(0.0, 1.0)).unwrap();
let rand = F::from(rng.gen_range(0.0..1.0)).unwrap();
let prob = multiplier * d / cost;
(i, rand, prob)
},
Expand Down Expand Up @@ -227,7 +227,7 @@ mod tests {
use super::super::algorithm::{compute_inertia, update_cluster_memberships};
use super::*;
use approx::{abs_diff_eq, assert_abs_diff_eq, assert_abs_diff_ne};
use ndarray::{array, stack, Array};
use ndarray::{array, concatenate, Array};
use ndarray_rand::rand::SeedableRng;
use ndarray_rand::rand_distr::Normal;
use ndarray_rand::RandomExt;
Expand Down Expand Up @@ -307,7 +307,7 @@ mod tests {
// Make sure we don't panic on degenerate data (n_clusters > n_samples)
let degenerate_data = array![[1.0, 2.0]];
let out = init.run(2, degenerate_data.view(), &mut rng);
assert_abs_diff_eq!(out, stack![Axis(0), degenerate_data, degenerate_data]);
assert_abs_diff_eq!(out, concatenate![Axis(0), degenerate_data, degenerate_data]);

// Build 3 separated clusters of points
let centroids = [20.0, -1000.0, 1000.0];
Expand All @@ -316,7 +316,7 @@ mod tests {
.map(|&c| Array::random_using((50, 2), Normal::new(c, 1.).unwrap(), &mut rng))
.collect();
let obs = clusters.iter().fold(Array2::default((0, 2)), |a, b| {
stack(Axis(0), &[a.view(), b.view()]).unwrap()
concatenate(Axis(0), &[a.view(), b.view()]).unwrap()
});

// Look for the right number of centroids
Expand Down Expand Up @@ -353,7 +353,7 @@ mod tests {
.map(|&c| Array::random_using((50, 2), Normal::new(c, 1.).unwrap(), &mut rng))
.collect();
let obs = clusters.iter().fold(Array2::default((0, 2)), |a, b| {
stack(Axis(0), &[a.view(), b.view()]).unwrap()
concatenate(Axis(0), &[a.view(), b.view()]).unwrap()
});

let out_rand = random_init(3, obs.view(), &mut rng.clone());
Expand Down
10 changes: 5 additions & 5 deletions algorithms/linfa-elasticnet/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,16 @@ default-features = false
features = ["std", "derive"]

[dependencies]
ndarray = {version = "0.13", features = ["blas", "approx"]}
ndarray-linalg = "0.12"
ndarray = { version = "0.14", features = ["blas", "approx"]}
ndarray-linalg = "0.13"

num-traits = "0.2"
approx = "0.3.2"
approx = "0.4"
thiserror = "1"

linfa = { version = "0.3.1", path = "../.." }

[dev-dependencies]
linfa-datasets = { version = "0.3.1", path = "../../datasets", features = ["diabetes"] }
ndarray-rand = "0.11"
rand_isaac = "0.2"
ndarray-rand = "0.13"
rand_isaac = "0.3"
6 changes: 3 additions & 3 deletions algorithms/linfa-hierarchical/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@ keywords = ["hierachical", "agglomerative", "clustering", "machine-learning", "l
categories = ["algorithms", "mathematics", "science"]

[dependencies]
ndarray = { version = "0.13", default-features = false }
ndarray = { version = "0.14", default-features = false }
kodama = "0.2"

linfa = { version = "0.3.1", path = "../.." }
linfa-kernel = { version = "0.3.1", path = "../linfa-kernel" }

[dev-dependencies]
rand = "0.7"
ndarray-rand = "0.11"
rand = "0.8"
ndarray-rand = "0.13"
linfa-datasets = { version = "0.3.1", path = "../../datasets", features = ["iris"] }
2 changes: 1 addition & 1 deletion algorithms/linfa-hierarchical/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ mod tests {
// we have 10 observations per cluster
let npoints = 10;
// generate data
let entries = ndarray::stack(
let entries = ndarray::concatenate(
Axis(0),
&[
Array::random((npoints, 2), Normal::new(-1., 0.1).unwrap()).view(),
Expand Down
12 changes: 6 additions & 6 deletions algorithms/linfa-ica/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@ default-features = false
features = ["std", "derive"]

[dependencies]
ndarray = { version = "0.13", default-features = false }
ndarray-linalg = "0.12"
ndarray-rand = "0.11"
ndarray-stats = "0.3"
ndarray = { version = "0.14", default-features = false }
ndarray-linalg = "0.13"
ndarray-rand = "0.13"
ndarray-stats = "0.4"
num-traits = "0.2"
rand_isaac = "0.2.0"
rand_isaac = "0.3"

linfa = { version = "0.3.1", path = "../.." }

[dev-dependencies]
ndarray-npy = { version = "0.5", default-features = false }
ndarray-npy = { version = "0.7", default-features = false }
paste = "1.0"
Loading