Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement Dot and BatchedDot in PyTensor #878

Merged
merged 13 commits into from
Jul 18, 2024
1 change: 1 addition & 0 deletions pytensor/link/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from pytensor.link.pytorch.linker import PytorchLinker
3 changes: 3 additions & 0 deletions pytensor/link/pytorch/dispatch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,11 @@
from pytensor.link.pytorch.dispatch.basic import pytorch_funcify, pytorch_typify

# # Load dispatch specializations
import pytensor.link.pytorch.dispatch.blas
import pytensor.link.pytorch.dispatch.scalar
import pytensor.link.pytorch.dispatch.elemwise
import pytensor.link.pytorch.dispatch.math
import pytensor.link.pytorch.dispatch.extra_ops
import pytensor.link.pytorch.dispatch.sort

# isort: on
14 changes: 14 additions & 0 deletions pytensor/link/pytorch/dispatch/blas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import torch

from pytensor.link.pytorch.dispatch import pytorch_funcify
from pytensor.tensor.blas import BatchedDot


@pytorch_funcify.register(BatchedDot)
def pytorch_funcify_BatchedDot(op, **kwargs):
def batched_dot(a, b):
if a.shape[0] != b.shape[0]:
raise TypeError("Shapes must match in the 0-th dimension")
return torch.bmm(a, b)

Check warning on line 12 in pytensor/link/pytorch/dispatch/blas.py

View check run for this annotation

Codecov / codecov/patch

pytensor/link/pytorch/dispatch/blas.py#L12

Added line #L12 was not covered by tests

return batched_dot
12 changes: 12 additions & 0 deletions pytensor/link/pytorch/dispatch/math.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import torch

from pytensor.link.pytorch.dispatch import pytorch_funcify
from pytensor.tensor.math import Dot


@pytorch_funcify.register(Dot)
def pytorch_funcify_Dot(op, **kwargs):
def dot(x, y):
return torch.matmul(x, y)

Check warning on line 10 in pytensor/link/pytorch/dispatch/math.py

View check run for this annotation

Codecov / codecov/patch

pytensor/link/pytorch/dispatch/math.py#L10

Added line #L10 was not covered by tests

return dot
35 changes: 35 additions & 0 deletions tests/link/pytorch/test_blas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import numpy as np
import pytest

from pytensor.compile.mode import Mode
from pytensor.configdefaults import config
from pytensor.graph.fg import FunctionGraph
from pytensor.graph.op import get_test_value
from pytensor.link.pytorch.linker import PytorchLinker
from pytensor.tensor import blas as pt_blas
from pytensor.tensor.type import tensor3
from tests.link.pytorch.test_basic import compare_pytorch_and_py


def test_pytorch_BatchedDot():
# tensor3 . tensor3
a = tensor3("a")
a.tag.test_value = (
np.linspace(-1, 1, 10 * 5 * 3).astype(config.floatX).reshape((10, 5, 3))
)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We are getting rid of the test_value machinery. Just pass these directly to the test function, no point in putting them in the tag to then retrieve it again

b = tensor3("b")
b.tag.test_value = (
np.linspace(1, -1, 10 * 3 * 2).astype(config.floatX).reshape((10, 3, 2))
)
out = pt_blas.BatchedDot()(a, b)
fgraph = FunctionGraph([a, b], [out])
pytensor_pytorch_fn, _ = compare_pytorch_and_py(
fgraph, [get_test_value(i) for i in fgraph.inputs]
)

# A dimension mismatch should raise a TypeError for compatibility
inputs = [get_test_value(a)[:-1], get_test_value(b)]
pytorch_mode_no_rewrites = Mode(PytorchLinker(), None)
pytensor_pytorch_fn.mode = pytorch_mode_no_rewrites
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is not a thing you can do (or rather has no effect). Once a function it's compiled that's it, the mode plays no role anymore

Suggested change
pytorch_mode_no_rewrites = Mode(PytorchLinker(), None)
pytensor_pytorch_fn.mode = pytorch_mode_no_rewrites

with pytest.raises(TypeError):
pytensor_pytorch_fn(*inputs)
30 changes: 30 additions & 0 deletions tests/link/pytorch/test_math.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import numpy as np

from pytensor.configdefaults import config
from pytensor.graph.fg import FunctionGraph
from pytensor.graph.op import get_test_value
from pytensor.tensor.type import matrix, scalar, vector
from tests.link.pytorch.test_basic import compare_pytorch_and_py


def test_pytorch_dot():
y = vector("y")
y.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)
x = vector("x")
x.tag.test_value = np.r_[3.0, 4.0].astype(config.floatX)
A = matrix("A")
A.tag.test_value = np.array([[6, 3], [3, 0]], dtype=config.floatX)
alpha = scalar("alpha")
alpha.tag.test_value = np.array(3.0, dtype=config.floatX)
beta = scalar("beta")
beta.tag.test_value = np.array(5.0, dtype=config.floatX)

# 2D * 2D
out = A.dot(A * alpha) + beta * A
fgraph = FunctionGraph([A, alpha, beta], [out])
compare_pytorch_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])

# 1D * 2D and 1D * 1D
out = y.dot(alpha * A).dot(x) + beta * y
fgraph = FunctionGraph([y, x, A, alpha, beta], [out])
compare_pytorch_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
Loading