Skip to content

Commit 60f83a0

Browse files
committed
Update ndarray 0.14
* approx = 0.4 * argmin = 0.4 * ndarray-csv = 0.5 * ndarray-linalg = 0.13 * ndarray-npy = 0.7 * ndarray-rand = 0.13 * ndarray-stats = 0.4 * num-traits = 0.2 * rand = 0.8 * rand-isaac = 0.3 * sprs = git master rev=761d4f0
1 parent 6065062 commit 60f83a0

File tree

43 files changed

+199
-189
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+199
-189
lines changed

Cargo.toml

+5-5
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,9 @@ blas = ["ndarray/blas"]
3535
[dependencies]
3636
num-traits = "0.2"
3737
thiserror = "1"
38-
rand = { version = "0.7", features = ["small_rng"] }
39-
ndarray = { version = "0.13", default-features = false, features = ["approx"] }
40-
ndarray-linalg = { version = "0.12.1", optional = true }
38+
rand = { version = "0.8", features = ["small_rng"] }
39+
ndarray = { version = "0.14", default-features = false, features = ["approx"] }
40+
ndarray-linalg = { version = "0.13", optional = true }
4141

4242
[dependencies.intel-mkl-src]
4343
version = "0.6.0"
@@ -57,8 +57,8 @@ default-features = false
5757
features = ["cblas"]
5858

5959
[dev-dependencies]
60-
ndarray-rand = "0.11"
61-
approx = { version = "0.3", default-features = false, features = ["std"] }
60+
ndarray-rand = "0.13"
61+
approx = { version = "0.4", default-features = false, features = ["std"] }
6262

6363
linfa-datasets = { path = "datasets", features = ["winequality", "iris", "diabetes"] }
6464

algorithms/linfa-bayes/Cargo.toml

+3-3
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,12 @@ keywords = ["factorization", "machine-learning", "linfa", "unsupervised"]
1111
categories = ["algorithms", "mathematics", "science"]
1212

1313
[dependencies]
14-
ndarray = { version = "0.13" , features = ["blas", "approx"]}
15-
ndarray-stats = "0.3"
14+
ndarray = { version = "0.14" , features = ["blas", "approx"]}
15+
ndarray-stats = "0.4"
1616
thiserror = "1"
1717

1818
linfa = { version = "0.3.1", path = "../.." }
1919

2020
[dev-dependencies]
21-
approx = "0.3"
21+
approx = "0.4"
2222
linfa-datasets = { version = "0.3.1", path = "../../datasets", features = ["winequality"] }

algorithms/linfa-clustering/Cargo.toml

+8-9
Original file line numberDiff line numberDiff line change
@@ -28,22 +28,21 @@ default-features = false
2828
features = ["std", "derive"]
2929

3030
[dependencies]
31-
ndarray = { version = "0.13", features = ["rayon", "approx"]}
32-
ndarray-linalg = "0.12"
33-
ndarray-rand = "0.11"
34-
ndarray-stats = "0.3"
35-
sprs = "0.7"
36-
num-traits = "0.1.32"
37-
rand_isaac = "0.2.0"
31+
ndarray = { version = "0.14", features = ["rayon", "approx"]}
32+
ndarray-linalg = "0.13"
33+
ndarray-rand = "0.13"
34+
ndarray-stats = "0.4"
35+
num-traits = "0.2"
36+
rand_isaac = "0.3"
3837

3938
linfa = { version = "0.3.1", path = "../.." }
4039
partitions = "0.2.4"
4140

4241
[dev-dependencies]
43-
ndarray-npy = { version = "0.5", default-features = false }
42+
ndarray-npy = { version = "0.7", default-features = false }
4443
criterion = "0.3"
4544
serde_json = "1"
46-
approx = "0.3"
45+
approx = "0.4"
4746

4847
[[bench]]
4948
name = "k_means"

algorithms/linfa-clustering/examples/appx_dbscan.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -54,10 +54,10 @@ fn main() {
5454

5555
// Save to disk our dataset (and the cluster label assigned to each observation)
5656
// We use the `npy` format for compatibility with NumPy
57-
write_npy("clustered_dataset.npy", records).expect("Failed to write .npy file");
57+
write_npy("clustered_dataset.npy", &records).expect("Failed to write .npy file");
5858
write_npy(
5959
"clustered_memberships.npy",
60-
cluster_memberships.map(|&x| x.map(|c| c as i64).unwrap_or(-1)),
60+
&cluster_memberships.map(|&x| x.map(|c| c as i64).unwrap_or(-1)),
6161
)
6262
.expect("Failed to write .npy file");
6363
}

algorithms/linfa-clustering/examples/dbscan.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -51,10 +51,10 @@ fn main() {
5151

5252
// Save to disk our dataset (and the cluster label assigned to each observation)
5353
// We use the `npy` format for compatibility with NumPy
54-
write_npy("clustered_dataset.npy", records).expect("Failed to write .npy file");
54+
write_npy("clustered_dataset.npy", &records).expect("Failed to write .npy file");
5555
write_npy(
5656
"clustered_memberships.npy",
57-
cluster_memberships.map(|&x| x.map(|c| c as i64).unwrap_or(-1)),
57+
&cluster_memberships.map(|&x| x.map(|c| c as i64).unwrap_or(-1)),
5858
)
5959
.expect("Failed to write .npy file");
6060
}

algorithms/linfa-clustering/examples/kmeans.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ fn main() {
3333

3434
// Save to disk our dataset (and the cluster label assigned to each observation)
3535
// We use the `npy` format for compatibility with NumPy
36-
write_npy("clustered_dataset.npy", records).expect("Failed to write .npy file");
37-
write_npy("clustered_memberships.npy", targets.map(|&x| x as u64))
36+
write_npy("clustered_dataset.npy", &records).expect("Failed to write .npy file");
37+
write_npy("clustered_memberships.npy", &targets.map(|&x| x as u64))
3838
.expect("Failed to write .npy file");
3939
}

algorithms/linfa-clustering/src/gaussian_mixture/algorithm.rs

+12-12
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ impl<F: Float + Lapack + Scalar> GaussianMixtureModel<F> {
293293
let n_samples = observations.nrows();
294294
let (weights, means, covariances) = Self::estimate_gaussian_parameters(
295295
&observations,
296-
&log_resp.mapv(|v| v.exp()),
296+
&log_resp.mapv(|v| Scalar::exp(v)),
297297
&self.covar_type,
298298
reg_covar,
299299
)?;
@@ -322,9 +322,9 @@ impl<F: Float + Lapack + Scalar> GaussianMixtureModel<F> {
322322
) -> (Array1<F>, Array2<F>) {
323323
let weighted_log_prob = self.estimate_weighted_log_prob(&observations);
324324
let log_prob_norm = weighted_log_prob
325-
.mapv(|v| v.exp())
325+
.mapv(|v| Scalar::exp(v))
326326
.sum_axis(Axis(1))
327-
.mapv(|v| v.ln());
327+
.mapv(|v| Scalar::ln(v));
328328
let log_resp = weighted_log_prob - log_prob_norm.to_owned().insert_axis(Axis(1));
329329
(log_prob_norm, log_resp)
330330
}
@@ -381,12 +381,12 @@ impl<F: Float + Lapack + Scalar> GaussianMixtureModel<F> {
381381
.unwrap()
382382
.slice(s![.., ..; n_features+1])
383383
.to_owned()
384-
.mapv(|v| v.ln());
384+
.mapv(|v| Scalar::ln(v));
385385
log_diags.sum_axis(Axis(1))
386386
}
387387

388388
fn estimate_log_weights(&self) -> Array1<F> {
389-
self.weights().mapv(|v| v.ln())
389+
self.weights().mapv(|v| Scalar::ln(v))
390390
}
391391
}
392392

@@ -417,7 +417,7 @@ impl<'a, F: Float + Lapack + Scalar, R: Rng + Clone, D: Data<Elem = F>, T>
417417
lower_bound =
418418
GaussianMixtureModel::<F>::compute_lower_bound(&log_resp, log_prob_norm);
419419
let change = lower_bound - prev_lower_bound;
420-
if change.abs() < self.tolerance() {
420+
if ndarray_rand::rand_distr::num_traits::Float::abs(change) < self.tolerance() {
421421
converged_iter = Some(n_iter);
422422
break;
423423
}
@@ -453,7 +453,7 @@ impl<F: Float + Lapack + Scalar, D: Data<Elem = F>> PredictRef<ArrayBase<D, Ix2>
453453
fn predict_ref<'a>(&'a self, observations: &ArrayBase<D, Ix2>) -> Array1<usize> {
454454
let (_, log_resp) = self.estimate_log_prob_resp(&observations);
455455
log_resp
456-
.mapv(|v| v.exp())
456+
.mapv(|v| Scalar::exp(v))
457457
.map_axis(Axis(1), |row| row.argmax().unwrap())
458458
}
459459
}
@@ -463,7 +463,7 @@ mod tests {
463463
use super::*;
464464
use crate::generate_blobs;
465465
use approx::{abs_diff_eq, assert_abs_diff_eq};
466-
use ndarray::{array, stack, ArrayView1, ArrayView2, Axis};
466+
use ndarray::{array, concatenate, ArrayView1, ArrayView2, Axis};
467467
use ndarray_linalg::error::LinalgError;
468468
use ndarray_linalg::error::Result as LAResult;
469469
use ndarray_rand::rand::SeedableRng;
@@ -557,7 +557,7 @@ mod tests {
557557
let mut rng = Isaac64Rng::seed_from_u64(42);
558558
let xt = Array2::random_using((50, 1), Uniform::new(0., 1.), &mut rng);
559559
let yt = function_test_1d(&xt);
560-
let data = stack(Axis(1), &[xt.view(), yt.view()]).unwrap();
560+
let data = concatenate(Axis(1), &[xt.view(), yt.view()]).unwrap();
561561
let dataset = DatasetBase::from(data);
562562

563563
// Test that cholesky decomposition fails when reg_covariance is zero
@@ -568,7 +568,7 @@ mod tests {
568568
assert!(
569569
match gmm.expect_err("should generate an error with reg_covar being nul") {
570570
GmmError::LinalgError(e) => match e {
571-
LinalgError::Lapack { return_code: 2 } => true,
571+
LinalgError::Lapack(_) => true,
572572
_ => panic!("should be a lapack error 2"),
573573
},
574574
_ => panic!("should be a linear algebra error"),
@@ -585,7 +585,7 @@ mod tests {
585585
fn test_zeroed_reg_covar_const_failure() {
586586
// repeat values such that covariance is zero
587587
let xt = Array2::ones((50, 1));
588-
let data = stack(Axis(1), &[xt.view(), xt.view()]).unwrap();
588+
let data = concatenate(Axis(1), &[xt.view(), xt.view()]).unwrap();
589589
let dataset = DatasetBase::from(data);
590590

591591
// Test that cholesky decomposition fails when reg_covariance is zero
@@ -596,7 +596,7 @@ mod tests {
596596
assert!(
597597
match gmm.expect_err("should generate an error with reg_covar being nul") {
598598
GmmError::LinalgError(e) => match e {
599-
LinalgError::Lapack { return_code: 1 } => true,
599+
LinalgError::Lapack(_) => true,
600600
_ => panic!("should be a lapack error 1"),
601601
},
602602
_ => panic!("should be a linear algebra error"),

algorithms/linfa-clustering/src/k_means/algorithm.rs

+6-5
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ fn get_random_centroids<F: Float, D: Data<Elem = F>>(
365365
mod tests {
366366
use super::*;
367367
use approx::assert_abs_diff_eq;
368-
use ndarray::{array, stack, Array, Array1, Array2, Axis};
368+
use ndarray::{array, concatenate, Array, Array1, Array2, Axis};
369369
use ndarray_rand::rand::SeedableRng;
370370
use ndarray_rand::rand_distr::Uniform;
371371
use ndarray_rand::RandomExt;
@@ -389,7 +389,7 @@ mod tests {
389389
let mut rng = Isaac64Rng::seed_from_u64(42);
390390
let xt = Array::random_using(50, Uniform::new(0., 1.), &mut rng).insert_axis(Axis(1));
391391
let yt = function_test_1d(&xt);
392-
let data = stack(Axis(1), &[xt.view(), yt.view()]).unwrap();
392+
let data = concatenate(Axis(1), &[xt.view(), yt.view()]).unwrap();
393393

394394
// First clustering with one iteration
395395
let dataset = DatasetBase::from(data);
@@ -428,9 +428,10 @@ mod tests {
428428
let memberships_2 = Array1::ones(cluster_size);
429429
let expected_centroid_2 = cluster_2.sum_axis(Axis(0)) / (cluster_size + 1) as f64;
430430

431-
// `stack` combines arrays along a given axis: https://docs.rs/ndarray/0.13.0/ndarray/fn.stack.html
432-
let observations = stack(Axis(0), &[cluster_1.view(), cluster_2.view()]).unwrap();
433-
let memberships = stack(Axis(0), &[memberships_1.view(), memberships_2.view()]).unwrap();
431+
// `concatenate` combines arrays along a given axis: https://docs.rs/ndarray/0.13.0/ndarray/fn.concatenate.html
432+
let observations = concatenate(Axis(0), &[cluster_1.view(), cluster_2.view()]).unwrap();
433+
let memberships =
434+
concatenate(Axis(0), &[memberships_1.view(), memberships_2.view()]).unwrap();
434435

435436
// Does it work?
436437
let old_centroids = Array2::zeros((2, n_features));

algorithms/linfa-elasticnet/Cargo.toml

+5-5
Original file line numberDiff line numberDiff line change
@@ -28,16 +28,16 @@ default-features = false
2828
features = ["std", "derive"]
2929

3030
[dependencies]
31-
ndarray = {version = "0.13", features = ["blas", "approx"]}
32-
ndarray-linalg = "0.12"
31+
ndarray = { version = "0.14", features = ["blas", "approx"]}
32+
ndarray-linalg = "0.13"
3333

3434
num-traits = "0.2"
35-
approx = "0.3.2"
35+
approx = "0.4"
3636
thiserror = "1"
3737

3838
linfa = { version = "0.3.1", path = "../.." }
3939

4040
[dev-dependencies]
4141
linfa-datasets = { version = "0.3.1", path = "../../datasets", features = ["diabetes"] }
42-
ndarray-rand = "0.11"
43-
rand_isaac = "0.2"
42+
ndarray-rand = "0.13"
43+
rand_isaac = "0.3"

algorithms/linfa-hierarchical/Cargo.toml

+3-3
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,13 @@ keywords = ["hierachical", "agglomerative", "clustering", "machine-learning", "l
1414
categories = ["algorithms", "mathematics", "science"]
1515

1616
[dependencies]
17-
ndarray = { version = "0.13", default-features = false }
17+
ndarray = { version = "0.14", default-features = false }
1818
kodama = "0.2"
1919

2020
linfa = { version = "0.3.1", path = "../.." }
2121
linfa-kernel = { version = "0.3.1", path = "../linfa-kernel" }
2222

2323
[dev-dependencies]
24-
rand = "0.7"
25-
ndarray-rand = "0.11"
24+
rand = "0.8"
25+
ndarray-rand = "0.13"
2626
linfa-datasets = { version = "0.3.1", path = "../../datasets", features = ["iris"] }

algorithms/linfa-hierarchical/src/lib.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ mod tests {
186186
// we have 10 observations per cluster
187187
let npoints = 10;
188188
// generate data
189-
let entries = ndarray::stack(
189+
let entries = ndarray::concatenate(
190190
Axis(0),
191191
&[
192192
Array::random((npoints, 2), Normal::new(-1., 0.1).unwrap()).view(),

algorithms/linfa-ica/Cargo.toml

+6-6
Original file line numberDiff line numberDiff line change
@@ -24,15 +24,15 @@ default-features = false
2424
features = ["std", "derive"]
2525

2626
[dependencies]
27-
ndarray = { version = "0.13", default-features = false }
28-
ndarray-linalg = "0.12"
29-
ndarray-rand = "0.11"
30-
ndarray-stats = "0.3"
27+
ndarray = { version = "0.14", default-features = false }
28+
ndarray-linalg = "0.13"
29+
ndarray-rand = "0.13"
30+
ndarray-stats = "0.4"
3131
num-traits = "0.2"
32-
rand_isaac = "0.2.0"
32+
rand_isaac = "0.3"
3333

3434
linfa = { version = "0.3.1", path = "../.." }
3535

3636
[dev-dependencies]
37-
ndarray-npy = { version = "0.5", default-features = false }
37+
ndarray-npy = { version = "0.7", default-features = false }
3838
paste = "1.0"

algorithms/linfa-ica/examples/fast_ica.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use linfa::{
33
traits::{Fit, Predict},
44
};
55
use linfa_ica::fast_ica::{FastIca, GFunc};
6-
use ndarray::{array, stack};
6+
use ndarray::{array, concatenate};
77
use ndarray::{Array, Array2, Axis};
88
use ndarray_npy::write_npy;
99
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
@@ -29,9 +29,9 @@ fn main() -> Result<(), Box<dyn Error>> {
2929
let sources_ica = ica.predict(&sources_mixed);
3030

3131
// Saving to disk
32-
write_npy("sources_original.npy", sources_original).expect("Failed to write .npy file");
33-
write_npy("sources_mixed.npy", sources_mixed).expect("Failed to write .npy file");
34-
write_npy("sources_ica.npy", sources_ica).expect("Failed to write .npy file");
32+
write_npy("sources_original.npy", &sources_original).expect("Failed to write .npy file");
33+
write_npy("sources_mixed.npy", &sources_mixed).expect("Failed to write .npy file");
34+
write_npy("sources_ica.npy", &sources_ica).expect("Failed to write .npy file");
3535

3636
Ok(())
3737
}
@@ -53,8 +53,8 @@ fn create_data() -> (Array2<f64>, Array2<f64>) {
5353
-1.
5454
});
5555

56-
// Column stacking both the signals
57-
let mut sources_original = stack![
56+
// Column concatenating both the signals
57+
let mut sources_original = concatenate![
5858
Axis(1),
5959
source1.insert_axis(Axis(1)),
6060
source2.insert_axis(Axis(1))

algorithms/linfa-ica/src/fast_ica.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
33
use linfa::{dataset::DatasetBase, traits::*, Float};
44
use ndarray::{Array, Array1, Array2, ArrayBase, Axis, Data, Ix2};
5-
use ndarray_linalg::{eigh::Eigh, lapack::UPLO, svd::SVD, Lapack};
5+
use ndarray_linalg::{eigh::Eigh, solveh::UPLO, svd::SVD, Lapack};
66
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
77
use ndarray_stats::QuantileExt;
88
use rand_isaac::Isaac64Rng;
@@ -368,8 +368,8 @@ mod tests {
368368
let mut rng = Isaac64Rng::seed_from_u64(42);
369369
let source2 = Array::random_using((nsamples, 1), StudentT::new(1.0).unwrap(), &mut rng);
370370

371-
// Column stacking both the sources
372-
let mut sources = stack![Axis(1), source1.insert_axis(Axis(1)), source2];
371+
// Column concatenating both the sources
372+
let mut sources = concatenate![Axis(1), source1.insert_axis(Axis(1)), source2];
373373
center_and_norm(&mut sources);
374374

375375
// Mixing the two sources

algorithms/linfa-ica/src/lib.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
//! traits::{Fit, Predict},
3131
//! };
3232
//! use linfa_ica::fast_ica::{FastIca, GFunc};
33-
//! use ndarray::{array, stack};
33+
//! use ndarray::{array, concatenate};
3434
//! use ndarray::{Array, Array2, Axis};
3535
//! use ndarray_npy::write_npy;
3636
//! use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
@@ -48,8 +48,8 @@
4848
//! -1.
4949
//! });
5050
//!
51-
//! // Column stacking both the signals
52-
//! let mut sources_original = stack![
51+
//! // Column concatenating both the signals
52+
//! let mut sources_original = concatenate![
5353
//! Axis(1),
5454
//! source1.insert_axis(Axis(1)),
5555
//! source2.insert_axis(Axis(1))

algorithms/linfa-kernel/Cargo.toml

+3-2
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,9 @@ default-features = false
2424
features = ["std", "derive"]
2525

2626
[dependencies]
27-
ndarray = "0.13"
28-
sprs = { version = "0.9.3", default-features = false }
27+
ndarray = "0.14"
28+
num-traits = "0.2"
29+
sprs = { git="https://github.com/vbarrielle/sprs.git", rev="761d4f0", default-features = false }
2930
hnsw = "0.6"
3031
space = "0.10"
3132

0 commit comments

Comments
 (0)