-
Notifications
You must be signed in to change notification settings - Fork 4
/
Autoencoder.py
117 lines (88 loc) · 4.09 KB
/
Autoencoder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
# Load the data
mnist = input_data.read_data_sets('MNIST/', one_hot=True)
# Setting up Parameters
learning_rate = 0.001
training_epochs = 30
batch_size = 256
display_step = 5
# Network parameters
n_hidden_1 = 256 # 1st Layer num features
n_hidden_2 = 128 # 2nd Layer num features
n_hidden_3 = 64 # 3rd Layer num features
n_input = 784 # MNIST data input (img shape: 28*23)
# TensorFlow Graph input
X = tf.placeholder(tf.float32, [None, n_input])
# Declare weights and biases for each hidden layer
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'encoder_h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_2])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h3': tf.Variable(tf.random_normal([n_hidden_1, n_input]))}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b2': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b3': tf.Variable(tf.random_normal([n_input]))}
# Building the encoder and decoder
def encoder(x):
# Encoder first layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
# Encoder second layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
# Encoder third layer with sigmoid activation #3
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']), biases['encoder_b3']))
return layer_3
def decoder(x):
# Decoder first layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
# Decoder second layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
# Decoder second layer with sigmoid activation #3
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']), biases['decoder_b3']))
return layer_3
# In place of variable cost we have loss function and in optimizer we have gradient used for back prop
# Construct model, encoder and decoder operation
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Loss and Optimizer, Minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Initialize the variables
init = tf.global_variables_initializer()
# Launching the graph
sess = tf.Session()
sess.run(init)
# Batch Size
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per 5 epoch steps
if epoch % display_step == 0:
print("Epoch: ", '%02d' % (epoch + 1), "cost = ", "{:.9f}".format(c))
print("Optimization Finished!")
# Applying encode and decode over test set, Select the number of test cases as well
encode_decode = sess.run(y_pred, feed_dict={X: mnist.test.images[:10]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
# Change range function to show the any number of examples to compare from
for i in range(10):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)), cmap='gray')
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)), cmap='gray')
plt.show()