-
Notifications
You must be signed in to change notification settings - Fork 2
/
nn.py
159 lines (131 loc) · 5.98 KB
/
nn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import math
import tensorflow as tf
NUM_CLASSES = 1
WINDOW_SIZE = 10 # Tamaño de ventana
def inference_one_hidden_layer(inputs, hidden1_units):
# Hidden 1
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([WINDOW_SIZE, hidden1_units],
stddev=1.0 / math.sqrt(float(WINDOW_SIZE))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.softmax(tf.matmul(inputs, weights) + biases)
# Linear
with tf.name_scope('identity'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden1, weights) + biases
return logits
# def inference_two_hidden_layers(inputs,hidden1_units,hidden2_units):
#
# with tf.name_scope('hidden1'):
# weights = tf.Variable(tf.truncated_normal([WINDOW_SIZE,hidden1_units]),stddev=1.0 / math.sqrt(float(WINDOW_SIZE)))
# biases = tf.Variable(tf.zeros([hidden1_units]),name='biases')
# hidden1 = tf.nn.softmax(tf.matmul(inputs, weights) + biases)
#
# with tf.name_scope('hidden2'):
# weights= tf.Variable(tf.truncated_normal([hidden1_units, hidden2_units],stddev=1.0 / math.sqrt(float(hidden1_units))))
# biases = tf.Variable(tf.zeros([hidden2_units]),name='biases')
# hidden2 = tf.nn.softmax(tf.matmul(hidden1, weights) + biases)
#
# with tf.truncated_normal('identity'):
# weights = tf.Variable(tf.truncated_normal([hidden2_units, 1], stddev=1.0 / math.sqrt(float(hidden1_units))))
# biases = tf.Variable(tf.zeros([NUM_CLASSES]),name='biases')
# logits = tf.matmul(hidden2,weights) + biases
#
# return logits
def inference_two_hidden_layers_simplifcado(images, hidden1_units, hidden2_units):
with tf.name_scope('hidden1'):
weights = tf.Variable(tf.truncated_normal([WINDOW_SIZE, hidden1_units],stddev=1.0 / math.sqrt(float(WINDOW_SIZE))),name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
with tf.name_scope('hidden2'):
weights = tf.Variable(tf.truncated_normal([hidden1_units, hidden2_units],stddev=1.0 / math.sqrt(float(hidden1_units))),name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
with tf.name_scope('identity'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, 1],stddev=1.0 / math.sqrt(float(hidden2_units))),name='weights')
biases = tf.Variable(tf.zeros([1]),name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits
def inference_two_hidden_layers(images, hidden1_units, hidden2_units):
"""Build the MNIST model up to where it may be used for inference.
Args:
images: Images placeholder, from inputs().
hidden1_units: Size of the first hidden layer.
hidden2_units: Size of the second hidden layer.
Returns:
softmax_linear: Output tensor with the computed logits.
"""
# Hidden 1
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([WINDOW_SIZE, hidden1_units],
stddev=1.0 / math.sqrt(float(WINDOW_SIZE))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.sigmoid(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hidden2'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.sigmoid(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope('identity'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, 1],
stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([1]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits
def mse_cost(logits, outputs):
mse = tf.reduce_mean(tf.pow(tf.sub(logits, outputs), 2.0))
return mse
def crossentropy_cost(logits,outputs):
#crossentropy = -tf.reduce_sum(logits*tf.log(outputs))
crossentropy = tf.reduce_mean(outputs*tf.log(logits) + (1-outputs)*tf.log(1-logits))
return crossentropy
def training_simplificado(loss, learning_rate):
#tf.scalar_summary(loss.op.name, loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
return train_op
def training(loss, learning_rate):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.scalar_summary(loss.op.name, loss)
# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, outputs):
mse = tf.reduce_mean(tf.pow(tf.sub(logits, outputs), 2.0))
return mse