Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix dispatch of Elemwised ScalarLoop in Numba backend #1137

Merged
merged 1 commit into from
Jan 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 3 additions & 9 deletions pytensor/link/numba/dispatch/elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,20 +30,19 @@
OR,
XOR,
Add,
Composite,
IntDiv,
Mul,
ScalarMaximum,
ScalarMinimum,
Sub,
TrueDiv,
get_scalar_type,
scalar_maximum,
)
from pytensor.scalar.basic import add as add_as
from pytensor.tensor.elemwise import CAReduce, DimShuffle, Elemwise
from pytensor.tensor.math import Argmax, MulWithoutZeros, Sum
from pytensor.tensor.special import LogSoftmax, Softmax, SoftmaxGrad
from pytensor.tensor.type import scalar


@singledispatch
Expand Down Expand Up @@ -348,13 +347,8 @@ def axis_apply_fn(x):

@numba_funcify.register(Elemwise)
def numba_funcify_Elemwise(op, node, **kwargs):
# Creating a new scalar node is more involved and unnecessary
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was not true (at least not anymore)

# if the scalar_op is composite, as the fgraph already contains
# all the necessary information.
scalar_node = None
if not isinstance(op.scalar_op, Composite):
scalar_inputs = [scalar(dtype=input.dtype) for input in node.inputs]
scalar_node = op.scalar_op.make_node(*scalar_inputs)
scalar_inputs = [get_scalar_type(dtype=input.dtype)() for input in node.inputs]
scalar_node = op.scalar_op.make_node(*scalar_inputs)

scalar_op_fn = numba_funcify(
op.scalar_op,
Expand Down
6 changes: 3 additions & 3 deletions tests/link/numba/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,11 +267,11 @@ def assert_fn(x, y):
x, y
)

if isinstance(fgraph, tuple):
fn_inputs, fn_outputs = fgraph
else:
if isinstance(fgraph, FunctionGraph):
fn_inputs = fgraph.inputs
fn_outputs = fgraph.outputs
else:
fn_inputs, fn_outputs = fgraph

fn_inputs = [i for i in fn_inputs if not isinstance(i, SharedVariable)]

Expand Down
17 changes: 16 additions & 1 deletion tests/link/numba/test_elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@
from pytensor.gradient import grad
from pytensor.graph.basic import Constant
from pytensor.graph.fg import FunctionGraph
from pytensor.tensor.elemwise import CAReduce, DimShuffle
from pytensor.scalar import float64
from pytensor.tensor.elemwise import CAReduce, DimShuffle, Elemwise
from pytensor.tensor.math import All, Any, Max, Min, Prod, ProdWithoutZeros, Sum
from pytensor.tensor.special import LogSoftmax, Softmax, SoftmaxGrad
from tests.link.numba.test_basic import (
Expand Down Expand Up @@ -691,3 +692,17 @@ def test_numba_careduce_benchmark(axis, c_contiguous, benchmark):
return careduce_benchmark_tester(
axis, c_contiguous, mode="NUMBA", benchmark=benchmark
)


def test_scalar_loop():
a = float64("a")
scalar_loop = pytensor.scalar.ScalarLoop([a], [a + a])

x = pt.tensor("x", shape=(3,))
elemwise_loop = Elemwise(scalar_loop)(3, x)

with pytest.warns(UserWarning, match="object mode"):
compare_numba_and_py(
([x], [elemwise_loop]),
(np.array([1, 2, 3], dtype="float64"),),
)
Loading