Skip to content

Commit

Permalink
Type deduction fix (#51)
Browse files Browse the repository at this point in the history
* update changelog

* minor adjustment to stype algorithm

* Added extra tests

* Added type deduction tests for major constructors

* Do not gate scalar type based on integer size, just use double

* rework type deduction in dlpack parsing

* formatting?

* Update CHANGELOG
  • Loading branch information
inakleinbottle authored Nov 23, 2023
1 parent 16a2cba commit 0960fcd
Show file tree
Hide file tree
Showing 7 changed files with 104 additions and 14 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
Version 0.1.0:
- Added framework for integrating device support and redesigned scalars module to accommodate the changes.
- Made changes to type deduction in constructors to avoid exceptions when providing lists of python ints/floats.


Version 0.0.8:
- Disabled linking to BLAS/LAPACK to reduce compile times whilst under development.
- Greatly expanded the serialization support internally.
Expand Down
16 changes: 10 additions & 6 deletions roughpy/src/scalars/scalars.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -257,15 +257,19 @@ static bool try_fill_buffer_dlpack(

// This function throws if no matching dtype is found

const auto* tensor_stype
= python::scalar_type_of_dl_info(dltensor.dtype, dltensor.device);
const auto tensor_stype_info = convert_from_dl_datatype(dltensor.dtype);
const auto tensor_stype = scalars::scalar_type_of(tensor_stype_info);

if (options.type == nullptr) {
options.type = tensor_stype;
if (tensor_stype) { options.type = *tensor_stype; } else {
options.type = scalars::ScalarType::for_info(tensor_stype_info);
}
}
RPY_DBG_ASSERT(options.type != nullptr);

if (buffer.type() == nullptr) {
buffer = scalars::KeyScalarArray(options.type);
}
RPY_DBG_ASSERT(options.type != nullptr);

if (data == nullptr) {
// The array is empty, empty result.
Expand All @@ -282,15 +286,15 @@ static bool try_fill_buffer_dlpack(
buffer.allocate_scalars(size);
options.type->convert_copy(
buffer,
{tensor_stype, data, static_cast<dimn_t>(size)}
{tensor_stype_info, data, static_cast<dimn_t>(size)}
);
} else {
buffer.allocate_scalars(size);
dl_copy_strided(
ndim,
shape,
strides,
{tensor_stype, data, static_cast<dimn_t>(size)},
{tensor_stype_info, data, static_cast<dimn_t>(size)},
buffer
);
}
Expand Down
10 changes: 5 additions & 5 deletions scalars/src/scalar_type.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,11 @@ const ScalarType* ScalarType::for_info(const devices::TypeInfo& info)
switch (info.code) {
case devices::TypeCode::Int:
case devices::TypeCode::UInt:
if (info.bytes > 3) {
return *scalar_type_of<float>();
} else {
return *scalar_type_of<double>();
}
// if (info.bytes <= 3) {
// return *scalar_type_of<float>();
// } else {
return *scalar_type_of<double>();
// }
case devices::TypeCode::Float:
switch (info.bytes) {
case 4: return *scalar_type_of<float>();
Expand Down
26 changes: 23 additions & 3 deletions tests/algebra/test_free_tensor.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
import pickle

import numpy as np


import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal

import roughpy
import roughpy as rp
from roughpy import FreeTensor, TensorKey, DPReal

DEPTH_LIMITS = {
Expand Down Expand Up @@ -446,4 +445,25 @@ def test_free_tensor_pickle_roundtrip():

t2 = pickle.loads(pickle.dumps(t))

assert t2 == t
assert t2 == t


TYPE_DEDUCTION_WIDTH = 3
TYPE_DEDUCTION_DEPTH = 3

TYPE_DEDUCTION_ARGS = [
(1, rp.DPReal),
(1.0, rp.DPReal),
([1], rp.DPReal),
([1.0], rp.DPReal),
(np.array([1], dtype="int32"), rp.DPReal),
(np.array([1.0], dtype="float32"), rp.SPReal),
(np.array([1.0], dtype="float64"), rp.DPReal),
]


@pytest.mark.parametrize("data,typ", TYPE_DEDUCTION_ARGS)
def test_ft_ctor_type_deduction(data, typ):
f = FreeTensor(data, width=TYPE_DEDUCTION_WIDTH, depth=TYPE_DEDUCTION_DEPTH)

assert f.dtype == typ
19 changes: 19 additions & 0 deletions tests/algebra/test_lie.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,3 +194,22 @@ def test_Lie_pikle_roundtrip():
l2 = pickle.loads(pickle.dumps(l))

assert l == l2


TYPE_DEDUCTION_WIDTH = 3
TYPE_DEDUCTION_DEPTH = 3

TYPE_DEDUCTION_ARGS = [
([1] * TYPE_DEDUCTION_WIDTH, roughpy.DPReal),
([1.0] * TYPE_DEDUCTION_WIDTH, roughpy.DPReal),
(np.array([1] * TYPE_DEDUCTION_WIDTH, dtype="int32"), roughpy.DPReal),
(np.array([1.0] * TYPE_DEDUCTION_WIDTH, dtype="float32"), roughpy.SPReal),
(np.array([1.0] * TYPE_DEDUCTION_WIDTH, dtype="float64"), roughpy.DPReal),
]


@pytest.mark.parametrize("data,typ", TYPE_DEDUCTION_ARGS)
def test_ft_ctor_type_deduction(data, typ):
f = Lie(data, width=TYPE_DEDUCTION_WIDTH, depth=TYPE_DEDUCTION_DEPTH)

assert f.dtype == typ
24 changes: 24 additions & 0 deletions tests/algebra/test_shuffle_tensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import numpy as np
import pytest

import roughpy as rp

TYPE_DEDUCTION_WIDTH = 3
TYPE_DEDUCTION_DEPTH = 3

TYPE_DEDUCTION_ARGS = [
(1, rp.DPReal),
(1.0, rp.DPReal),
([1], rp.DPReal),
([1.0], rp.DPReal),
(np.array([1], dtype="int32"), rp.DPReal),
(np.array([1.0], dtype="float32"), rp.SPReal),
(np.array([1.0], dtype="float64"), rp.DPReal),
]


@pytest.mark.parametrize("data,typ", TYPE_DEDUCTION_ARGS)
def test_ft_ctor_type_deduction(data, typ):
f = rp.ShuffleTensor(data, width=TYPE_DEDUCTION_WIDTH, depth=TYPE_DEDUCTION_DEPTH)

assert f.dtype == typ
18 changes: 18 additions & 0 deletions tests/streams/test_lie_increment_path.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,3 +291,21 @@ def test_lie_incr_stream_from_randints_no_deduction_transposed(rng):

assert_array_equal(np.array(sig)[:4],
np.hstack([[1.0], np.sum(array, axis=0)[:]]))


TYPE_DEDUCTION_WIDTH = 3
TYPE_DEDUCTION_DEPTH = 3
TYPE_DEDUCTION_ARGS = [
([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], roughpy.DPReal),
([[1, 2, 3], [1, 2, 3]], roughpy.DPReal),
(np.array([[1, 2, 3], [1, 2, 3]], dtype=np.int32), roughpy.DPReal),
(np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32), roughpy.SPReal),
(np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float64), roughpy.DPReal),
]


@pytest.mark.parametrize('data,typ', TYPE_DEDUCTION_ARGS)
def test_ctor_type_deduction(data, typ):
stream = LieIncrementStream.from_increments(data, width=TYPE_DEDUCTION_WIDTH, depth=TYPE_DEDUCTION_DEPTH)

assert stream.dtype == typ

0 comments on commit 0960fcd

Please sign in to comment.