@@ -44,14 +44,15 @@ def wave_net_activation(x):
44
44
return keras .layers .multiply ([tanh_out , sigm_out ])
45
45
46
46
47
- def residual_block (x , s , i , activation , nb_filters , kernel_size , padding , dropout_rate = 0 , name = '' ):
48
- # type: (Layer, int, int, str, int, int, str, float, str) -> Tuple[Layer, Layer]
47
+ def residual_block (x , s , i , c , activation , nb_filters , kernel_size , padding , dropout_rate = 0 , name = '' ):
48
+ # type: (Layer, int, int, int, str, int, int, str, float, str) -> Tuple[Layer, Layer]
49
49
"""Defines the residual block for the WaveNet TCN
50
50
51
51
Args:
52
52
x: The previous layer in the model
53
53
s: The stack index i.e. which stack in the overall TCN
54
54
i: The dilation power of 2 we are using for this residual block
55
+ c: The dilation name to make it unique. In case we have same dilation twice: [1, 1, 2, 4].
55
56
activation: The name of the type of activation to use
56
57
nb_filters: The number of convolutional filters to use in this block
57
58
kernel_size: The size of the convolutional kernel
@@ -67,7 +68,7 @@ def residual_block(x, s, i, activation, nb_filters, kernel_size, padding, dropou
67
68
original_x = x
68
69
conv = Conv1D (filters = nb_filters , kernel_size = kernel_size ,
69
70
dilation_rate = i , padding = padding ,
70
- name = name + '_d_%s_conv_%d_tanh_s%d' % (padding , i , s ))(x )
71
+ name = name + '_d_%s_conv_%d-% d_tanh_s%d' % (padding , i , c , s ))(x )
71
72
if activation == 'norm_relu' :
72
73
x = Activation ('relu' )(conv )
73
74
x = Lambda (channel_normalization )(x )
@@ -76,7 +77,7 @@ def residual_block(x, s, i, activation, nb_filters, kernel_size, padding, dropou
76
77
else :
77
78
x = Activation (activation )(conv )
78
79
79
- x = SpatialDropout1D (dropout_rate , name = name + '_spatial_dropout1d_%d_s%d_%f' % (i , s , dropout_rate ))(x )
80
+ x = SpatialDropout1D (dropout_rate , name = name + '_spatial_dropout1d_%d-% d_s%d_%f' % (i , c , s , dropout_rate ))(x )
80
81
81
82
# 1x1 conv.
82
83
x = Convolution1D (nb_filters , 1 , padding = 'same' )(x )
@@ -156,8 +157,8 @@ def __call__(self, inputs):
156
157
x = Convolution1D (self .nb_filters , 1 , padding = self .padding , name = self .name + '_initial_conv' )(x )
157
158
skip_connections = []
158
159
for s in range (self .nb_stacks ):
159
- for i in self .dilations :
160
- x , skip_out = residual_block (x , s , i , self .activation , self .nb_filters ,
160
+ for i , d in enumerate ( self .dilations ) :
161
+ x , skip_out = residual_block (x , s , d , i , self .activation , self .nb_filters ,
161
162
self .kernel_size , self .padding , self .dropout_rate , name = self .name )
162
163
skip_connections .append (skip_out )
163
164
if self .use_skip_connections :
0 commit comments