Skip to content

Commit

Permalink
add backward tests
Browse files Browse the repository at this point in the history
  • Loading branch information
taoleicn committed May 18, 2021
1 parent 26e2579 commit 1ca60b9
Showing 1 changed file with 85 additions and 1 deletion.
86 changes: 85 additions & 1 deletion test/sru/test_sru.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
@pytest.mark.parametrize("compat", [False, True])
@pytest.mark.parametrize("bidirectional", [False, True])
@pytest.mark.parametrize("layer_norm", [False, True])
def test_cell(cuda, with_grad, compat, bidirectional, layer_norm):
def test_sru(cuda, with_grad, compat, bidirectional, layer_norm):
torch.manual_seed(123)
if cuda:
torch.backends.cudnn.deterministic = True
Expand Down Expand Up @@ -76,3 +76,87 @@ def cell_to_emb(cell, batch_size):
else:
with torch.no_grad():
run()


@pytest.mark.parametrize(
"cuda",
[
False,
pytest.param(
True,
marks=pytest.mark.skipif(
not torch.cuda.is_available(), reason="no cuda available"
),
),
],
)
@pytest.mark.parametrize("bidirectional", [False, True])
@pytest.mark.parametrize("layer_norm", [False, True])
@pytest.mark.parametrize("normalize_after", [False, True])
@pytest.mark.parametrize("rescale", [False, True])
@pytest.mark.parametrize("has_skip_term", [False, True])
def test_sru_backward_simple(cuda, bidirectional, layer_norm, normalize_after, rescale, has_skip_term):
torch.manual_seed(123)
if cuda:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

input_length = 3
batch_size = 5
input_size = 4
hidden_size = 2
encoder = sru.SRU(input_size, hidden_size,
bidirectional=bidirectional,
layer_norm=layer_norm,
normalize_after=normalize_after,
rescale=rescale,
has_skip_term=has_skip_term)
if cuda:
encoder = encoder.cuda()

def run(x):
if cuda:
x = x.cuda()
output, state = encoder(x)
output.mean().backward()

# test batch size > 1
input_data = torch.rand(input_length, batch_size, input_size)
run(input_data)


@pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available')
@pytest.mark.parametrize("bidirectional", [False, True])
@pytest.mark.parametrize("layer_norm", [False, True])
@pytest.mark.parametrize("normalize_after", [False, True])
def test_sru_backward(bidirectional, layer_norm, normalize_after):
eps = 1e-4
torch.manual_seed(123)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

input_length = 3
batch_size = 5
input_size = 4
hidden_size = 2
encoder = sru.SRU(input_size, hidden_size,
bidirectional=bidirectional,
layer_norm=layer_norm,
normalize_after=normalize_after)
x = torch.randn(input_length, batch_size, input_size)

# backward in CPU mode
h, c = encoder(x)
h.sum().backward()
grads = [p.grad.clone() for p in encoder.parameters() if p.requires_grad]

# backward in GPU mode
encoder.zero_grad()
encoder, x = encoder.cuda(), x.cuda()
h_, c_ = encoder(x)
h_.sum().backward()
grads_ = [p.grad.cpu().clone() for p in encoder.parameters() if p.requires_grad]

assert len(grads) == len(grads_)
for g1, g2 in zip(grads, grads_):
assert (g1 - g2).abs().max() <= eps

0 comments on commit 1ca60b9

Please sign in to comment.