Skip to content

Commit 4f7ca60

Browse files
author
Philippe Rémy
authored
Merge pull request #35 from philipperemy/issue_34
Small updates
2 parents bcc210a + a86b5b3 commit 4f7ca60

File tree

3 files changed

+9
-13
lines changed

3 files changed

+9
-13
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ model.fit(x, y) # Keras model.
9292

9393
### Arguments
9494

95-
`tcn.TCN(nb_filters=64, kernel_size=2, nb_stacks=1, dilations=None, activation='norm_relu', padding='causal', use_skip_connections=True, dropout_rate=0.0, return_sequences=True, name='tcn')`
95+
`tcn.TCN(nb_filters=64, kernel_size=2, nb_stacks=1, dilations=[1, 2, 4, 8, 16, 32], activation='norm_relu', padding='causal', use_skip_connections=True, dropout_rate=0.0, return_sequences=True, name='tcn')`
9696

9797
- `nb_filters`: Integer. The number of filters to use in the convolutional layers.
9898
- `kernel_size`: Integer. The size of the kernel to use in each convolutional layer.

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
setup(
44
name='keras-tcn',
5-
version='2.3.4',
5+
version='2.3.5',
66
description='Keras TCN',
77
author='Philippe Remy',
88
license='MIT',

tcn/tcn.py

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def wave_net_activation(x):
4545

4646

4747
def residual_block(x, s, i, activation, nb_filters, kernel_size, padding, dropout_rate=0, name=''):
48-
# type: (Layer, int, int, str, int, int, float, str) -> Tuple[Layer, Layer]
48+
# type: (Layer, int, int, str, int, int, str, float, str) -> Tuple[Layer, Layer]
4949
"""Defines the residual block for the WaveNet TCN
5050
5151
Args:
@@ -67,7 +67,7 @@ def residual_block(x, s, i, activation, nb_filters, kernel_size, padding, dropou
6767
original_x = x
6868
conv = Conv1D(filters=nb_filters, kernel_size=kernel_size,
6969
dilation_rate=i, padding=padding,
70-
name=name + '_dilated_conv_%d_tanh_s%d' % (i, s))(x)
70+
name=name + '_d_%s_conv_%d_tanh_s%d' % (padding, i, s))(x)
7171
if activation == 'norm_relu':
7272
x = Activation('relu')(conv)
7373
x = Lambda(channel_normalization)(x)
@@ -100,8 +100,10 @@ def is_power_of_two(num):
100100
class TCN:
101101
"""Creates a TCN layer.
102102
103+
Input shape:
104+
A tensor of shape (batch_size, timesteps, input_dim).
105+
103106
Args:
104-
input_layer: A tensor of shape (batch_size, timesteps, input_dim).
105107
nb_filters: The number of filters to use in the convolutional layers.
106108
kernel_size: The size of the kernel to use in each convolutional layer.
107109
dilations: The list of the dilations. Example is: [1, 2, 4, 8, 16, 32, 64].
@@ -121,7 +123,7 @@ def __init__(self,
121123
nb_filters=64,
122124
kernel_size=2,
123125
nb_stacks=1,
124-
dilations=None,
126+
dilations=[1, 2, 4, 8, 16, 32],
125127
activation='norm_relu',
126128
padding='causal',
127129
use_skip_connections=True,
@@ -139,12 +141,8 @@ def __init__(self,
139141
self.nb_filters = nb_filters
140142
self.padding = padding
141143

142-
# backwards incompatibility warning.
143-
# o = tcn.TCN(i, return_sequences=False) =>
144-
# o = tcn.TCN(return_sequences=False)(i)
145-
146144
if padding != 'causal' and padding != 'same':
147-
raise ValueError("Only 'causal' or 'same' paddings are compatible for this layer.")
145+
raise ValueError("Only 'causal' or 'same' padding are compatible for this layer.")
148146

149147
if not isinstance(nb_filters, int):
150148
print('An interface change occurred after the version 2.1.2.')
@@ -154,8 +152,6 @@ def __init__(self,
154152
raise Exception()
155153

156154
def __call__(self, inputs):
157-
if self.dilations is None:
158-
self.dilations = [1, 2, 4, 8, 16, 32]
159155
x = inputs
160156
x = Convolution1D(self.nb_filters, 1, padding=self.padding, name=self.name + '_initial_conv')(x)
161157
skip_connections = []

0 commit comments

Comments
 (0)