From 0960fcd6de3ede811b2f00dab73cdad1fbee3a0c Mon Sep 17 00:00:00 2001 From: inakleinbottle <41870650+inakleinbottle@users.noreply.github.com> Date: Thu, 23 Nov 2023 10:54:20 +0000 Subject: [PATCH] Type deduction fix (#51) * update changelog * minor adjustment to stype algorithm * Added extra tests * Added type deduction tests for major constructors * Do not gate scalar type based on integer size, just use double * rework type deduction in dlpack parsing * formatting? * Update CHANGELOG --- CHANGELOG | 5 +++++ roughpy/src/scalars/scalars.cpp | 16 +++++++++------ scalars/src/scalar_type.cpp | 10 ++++----- tests/algebra/test_free_tensor.py | 26 +++++++++++++++++++++--- tests/algebra/test_lie.py | 19 +++++++++++++++++ tests/algebra/test_shuffle_tensor.py | 24 ++++++++++++++++++++++ tests/streams/test_lie_increment_path.py | 18 ++++++++++++++++ 7 files changed, 104 insertions(+), 14 deletions(-) create mode 100644 tests/algebra/test_shuffle_tensor.py diff --git a/CHANGELOG b/CHANGELOG index a9c6da19..2feeb85c 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,8 @@ +Version 0.1.0: + - Added framework for integrating device support and redesigned scalars module to accommodate the changes. + - Made changes to type deduction in constructors to avoid exceptions when providing lists of python ints/floats. + + Version 0.0.8: - Disabled linking to BLAS/LAPACK to reduce compile times whilst under development. - Greatly expanded the serialization support internally. diff --git a/roughpy/src/scalars/scalars.cpp b/roughpy/src/scalars/scalars.cpp index e574f57e..06d3f9a6 100644 --- a/roughpy/src/scalars/scalars.cpp +++ b/roughpy/src/scalars/scalars.cpp @@ -257,15 +257,19 @@ static bool try_fill_buffer_dlpack( // This function throws if no matching dtype is found - const auto* tensor_stype - = python::scalar_type_of_dl_info(dltensor.dtype, dltensor.device); + const auto tensor_stype_info = convert_from_dl_datatype(dltensor.dtype); + const auto tensor_stype = scalars::scalar_type_of(tensor_stype_info); + if (options.type == nullptr) { - options.type = tensor_stype; + if (tensor_stype) { options.type = *tensor_stype; } else { + options.type = scalars::ScalarType::for_info(tensor_stype_info); + } } + RPY_DBG_ASSERT(options.type != nullptr); + if (buffer.type() == nullptr) { buffer = scalars::KeyScalarArray(options.type); } - RPY_DBG_ASSERT(options.type != nullptr); if (data == nullptr) { // The array is empty, empty result. @@ -282,7 +286,7 @@ static bool try_fill_buffer_dlpack( buffer.allocate_scalars(size); options.type->convert_copy( buffer, - {tensor_stype, data, static_cast(size)} + {tensor_stype_info, data, static_cast(size)} ); } else { buffer.allocate_scalars(size); @@ -290,7 +294,7 @@ static bool try_fill_buffer_dlpack( ndim, shape, strides, - {tensor_stype, data, static_cast(size)}, + {tensor_stype_info, data, static_cast(size)}, buffer ); } diff --git a/scalars/src/scalar_type.cpp b/scalars/src/scalar_type.cpp index 00c3ae98..3bb281fb 100644 --- a/scalars/src/scalar_type.cpp +++ b/scalars/src/scalar_type.cpp @@ -46,11 +46,11 @@ const ScalarType* ScalarType::for_info(const devices::TypeInfo& info) switch (info.code) { case devices::TypeCode::Int: case devices::TypeCode::UInt: - if (info.bytes > 3) { - return *scalar_type_of(); - } else { - return *scalar_type_of(); - } + // if (info.bytes <= 3) { + // return *scalar_type_of(); + // } else { + return *scalar_type_of(); + // } case devices::TypeCode::Float: switch (info.bytes) { case 4: return *scalar_type_of(); diff --git a/tests/algebra/test_free_tensor.py b/tests/algebra/test_free_tensor.py index c877ed60..3745cc73 100644 --- a/tests/algebra/test_free_tensor.py +++ b/tests/algebra/test_free_tensor.py @@ -1,12 +1,11 @@ import pickle import numpy as np - - import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal import roughpy +import roughpy as rp from roughpy import FreeTensor, TensorKey, DPReal DEPTH_LIMITS = { @@ -446,4 +445,25 @@ def test_free_tensor_pickle_roundtrip(): t2 = pickle.loads(pickle.dumps(t)) - assert t2 == t \ No newline at end of file + assert t2 == t + + +TYPE_DEDUCTION_WIDTH = 3 +TYPE_DEDUCTION_DEPTH = 3 + +TYPE_DEDUCTION_ARGS = [ + (1, rp.DPReal), + (1.0, rp.DPReal), + ([1], rp.DPReal), + ([1.0], rp.DPReal), + (np.array([1], dtype="int32"), rp.DPReal), + (np.array([1.0], dtype="float32"), rp.SPReal), + (np.array([1.0], dtype="float64"), rp.DPReal), +] + + +@pytest.mark.parametrize("data,typ", TYPE_DEDUCTION_ARGS) +def test_ft_ctor_type_deduction(data, typ): + f = FreeTensor(data, width=TYPE_DEDUCTION_WIDTH, depth=TYPE_DEDUCTION_DEPTH) + + assert f.dtype == typ diff --git a/tests/algebra/test_lie.py b/tests/algebra/test_lie.py index ee915610..e51e4bd8 100644 --- a/tests/algebra/test_lie.py +++ b/tests/algebra/test_lie.py @@ -194,3 +194,22 @@ def test_Lie_pikle_roundtrip(): l2 = pickle.loads(pickle.dumps(l)) assert l == l2 + + +TYPE_DEDUCTION_WIDTH = 3 +TYPE_DEDUCTION_DEPTH = 3 + +TYPE_DEDUCTION_ARGS = [ + ([1] * TYPE_DEDUCTION_WIDTH, roughpy.DPReal), + ([1.0] * TYPE_DEDUCTION_WIDTH, roughpy.DPReal), + (np.array([1] * TYPE_DEDUCTION_WIDTH, dtype="int32"), roughpy.DPReal), + (np.array([1.0] * TYPE_DEDUCTION_WIDTH, dtype="float32"), roughpy.SPReal), + (np.array([1.0] * TYPE_DEDUCTION_WIDTH, dtype="float64"), roughpy.DPReal), +] + + +@pytest.mark.parametrize("data,typ", TYPE_DEDUCTION_ARGS) +def test_ft_ctor_type_deduction(data, typ): + f = Lie(data, width=TYPE_DEDUCTION_WIDTH, depth=TYPE_DEDUCTION_DEPTH) + + assert f.dtype == typ diff --git a/tests/algebra/test_shuffle_tensor.py b/tests/algebra/test_shuffle_tensor.py new file mode 100644 index 00000000..792fbb15 --- /dev/null +++ b/tests/algebra/test_shuffle_tensor.py @@ -0,0 +1,24 @@ +import numpy as np +import pytest + +import roughpy as rp + +TYPE_DEDUCTION_WIDTH = 3 +TYPE_DEDUCTION_DEPTH = 3 + +TYPE_DEDUCTION_ARGS = [ + (1, rp.DPReal), + (1.0, rp.DPReal), + ([1], rp.DPReal), + ([1.0], rp.DPReal), + (np.array([1], dtype="int32"), rp.DPReal), + (np.array([1.0], dtype="float32"), rp.SPReal), + (np.array([1.0], dtype="float64"), rp.DPReal), +] + + +@pytest.mark.parametrize("data,typ", TYPE_DEDUCTION_ARGS) +def test_ft_ctor_type_deduction(data, typ): + f = rp.ShuffleTensor(data, width=TYPE_DEDUCTION_WIDTH, depth=TYPE_DEDUCTION_DEPTH) + + assert f.dtype == typ diff --git a/tests/streams/test_lie_increment_path.py b/tests/streams/test_lie_increment_path.py index e2b8e4b0..8119e230 100644 --- a/tests/streams/test_lie_increment_path.py +++ b/tests/streams/test_lie_increment_path.py @@ -291,3 +291,21 @@ def test_lie_incr_stream_from_randints_no_deduction_transposed(rng): assert_array_equal(np.array(sig)[:4], np.hstack([[1.0], np.sum(array, axis=0)[:]])) + + +TYPE_DEDUCTION_WIDTH = 3 +TYPE_DEDUCTION_DEPTH = 3 +TYPE_DEDUCTION_ARGS = [ + ([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], roughpy.DPReal), + ([[1, 2, 3], [1, 2, 3]], roughpy.DPReal), + (np.array([[1, 2, 3], [1, 2, 3]], dtype=np.int32), roughpy.DPReal), + (np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32), roughpy.SPReal), + (np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float64), roughpy.DPReal), +] + + +@pytest.mark.parametrize('data,typ', TYPE_DEDUCTION_ARGS) +def test_ctor_type_deduction(data, typ): + stream = LieIncrementStream.from_increments(data, width=TYPE_DEDUCTION_WIDTH, depth=TYPE_DEDUCTION_DEPTH) + + assert stream.dtype == typ