forked from asbudhkar/ANN-PSO
-
Notifications
You must be signed in to change notification settings - Fork 0
/
layers.py
80 lines (73 loc) · 2.68 KB
/
layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import tensorflow as tf
# Activation Function
def activate(input_layer, act='relu', name='activation'):
if act is None:
return input_layer
if act == 'relu':
return tf.nn.relu(input_layer, name)
if act == 'sqr':
return tf.square(input_layer, name)
if act == 'sqr_sigmoid':
return tf.nn.sigmoid(tf.square(input_layer, name))
if act == 'sigmoid':
return tf.nn.sigmoid(input_layer, name)
if act == 'softmax':
return tf.nn.softmax(input_layer)
# Fully connected custom layer for PSO
# Supported activation function types : None,relu,sqr,sqr_sigmoid,sigmoid
def fc(input_tensor, n_output_units, scope,
activation_fn='softmax', uniform=False):
shape = [input_tensor.get_shape().as_list()[-1], n_output_units]
# Use the Scope specified
if n_output_units==3:
activation_fn='softmax'
else:
activation_fn='sigmoid'
with tf.variable_scope(scope):
# Init Weights
if uniform:
weights = tf.Variable(tf.random_uniform(
shape=shape,
dtype=tf.float32,
minval=-10,
maxval=10),
name='weights')
else:
weights = tf.Variable(tf.truncated_normal(
shape=shape,
mean=0.0,
stddev=0.1,
dtype=tf.float32),
name='weights')
# Init Biases
biases = tf.Variable(
tf.zeros(shape=[n_output_units]), name='biases', dtype=tf.float32)
# Particle Best
pbest_w = tf.Variable(weights.initialized_value(), name='pbest_w')
pbest_b = tf.Variable(biases.initialized_value(), name='pbest_b')
# Velocities
vel_weights = tf.Variable(tf.random_uniform(
shape=shape,
dtype=tf.float32,
minval=-0.001,
maxval=0.001),
name='vel_weights')
vel_biases = tf.Variable(tf.random_uniform(
shape=[n_output_units],
dtype=tf.float32,
minval=-0.001,
maxval=0.001),
name='vel_biases')
# Perform actual feedforward
act = tf.matmul(input_tensor, weights) #+ biases
pso_tupple = [weights, biases,
pbest_w, pbest_b,
vel_weights, vel_biases]
# Activate And Return
return activate(act, activation_fn), pso_tupple
# Magnitude Clipper
# Magmax can be either a Tensor or a Float
def maxclip(tensor, magmax):
# assertion commented out to allow usage of both Tensor & Integer
# assert magmax > 0, "magmax argument in maxclip must be positive"
return tf.minimum(tf.maximum(tensor, -magmax), magmax)