Skip to content

Commit f5a07fc

Browse files
author
Philippe Rémy
authored
Merge pull request #40 from philipperemy/issue_39
dilation name to make it unique
2 parents d6f5d8e + f0bfea6 commit f5a07fc

File tree

1 file changed

+7
-6
lines changed

1 file changed

+7
-6
lines changed

tcn/tcn.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,14 +44,15 @@ def wave_net_activation(x):
4444
return keras.layers.multiply([tanh_out, sigm_out])
4545

4646

47-
def residual_block(x, s, i, activation, nb_filters, kernel_size, padding, dropout_rate=0, name=''):
48-
# type: (Layer, int, int, str, int, int, str, float, str) -> Tuple[Layer, Layer]
47+
def residual_block(x, s, i, c, activation, nb_filters, kernel_size, padding, dropout_rate=0, name=''):
48+
# type: (Layer, int, int, int, str, int, int, str, float, str) -> Tuple[Layer, Layer]
4949
"""Defines the residual block for the WaveNet TCN
5050
5151
Args:
5252
x: The previous layer in the model
5353
s: The stack index i.e. which stack in the overall TCN
5454
i: The dilation power of 2 we are using for this residual block
55+
c: The dilation name to make it unique. In case we have same dilation twice: [1, 1, 2, 4].
5556
activation: The name of the type of activation to use
5657
nb_filters: The number of convolutional filters to use in this block
5758
kernel_size: The size of the convolutional kernel
@@ -67,7 +68,7 @@ def residual_block(x, s, i, activation, nb_filters, kernel_size, padding, dropou
6768
original_x = x
6869
conv = Conv1D(filters=nb_filters, kernel_size=kernel_size,
6970
dilation_rate=i, padding=padding,
70-
name=name + '_d_%s_conv_%d_tanh_s%d' % (padding, i, s))(x)
71+
name=name + '_d_%s_conv_%d-%d_tanh_s%d' % (padding, i, c, s))(x)
7172
if activation == 'norm_relu':
7273
x = Activation('relu')(conv)
7374
x = Lambda(channel_normalization)(x)
@@ -76,7 +77,7 @@ def residual_block(x, s, i, activation, nb_filters, kernel_size, padding, dropou
7677
else:
7778
x = Activation(activation)(conv)
7879

79-
x = SpatialDropout1D(dropout_rate, name=name + '_spatial_dropout1d_%d_s%d_%f' % (i, s, dropout_rate))(x)
80+
x = SpatialDropout1D(dropout_rate, name=name + '_spatial_dropout1d_%d-%d_s%d_%f' % (i, c, s, dropout_rate))(x)
8081

8182
# 1x1 conv.
8283
x = Convolution1D(nb_filters, 1, padding='same')(x)
@@ -156,8 +157,8 @@ def __call__(self, inputs):
156157
x = Convolution1D(self.nb_filters, 1, padding=self.padding, name=self.name + '_initial_conv')(x)
157158
skip_connections = []
158159
for s in range(self.nb_stacks):
159-
for i in self.dilations:
160-
x, skip_out = residual_block(x, s, i, self.activation, self.nb_filters,
160+
for i, d in enumerate(self.dilations):
161+
x, skip_out = residual_block(x, s, d, i, self.activation, self.nb_filters,
161162
self.kernel_size, self.padding, self.dropout_rate, name=self.name)
162163
skip_connections.append(skip_out)
163164
if self.use_skip_connections:

0 commit comments

Comments
 (0)