|
12 | 12 | import matplotlib.image as mpimg
|
13 | 13 | import pylab
|
14 | 14 |
|
15 |
| -num_steps = 3000 |
16 |
| -learning_rate = 12e-5 |
| 15 | +num_steps = 6000 |
| 16 | +learning_rate = 15e-5 |
17 | 17 | batch_size = 128
|
18 | 18 | show_every = 1000
|
19 | 19 | num_evals = 100
|
@@ -44,7 +44,7 @@ def main():
|
44 | 44 | optim = AdamOptimizer(learning_rate, 0.95, 0.95)
|
45 | 45 | losses = []
|
46 | 46 |
|
47 |
| - stack = RBMStack([784, 512, 64]) |
| 47 | + stack = RBMStack([784, 256, 64]) |
48 | 48 | img, _ = SelectBatch(data['train_X'], 1)
|
49 | 49 | #rbm = RBM(784, 64)
|
50 | 50 | rbm = stack.Stack()[0]
|
@@ -85,21 +85,21 @@ def main():
|
85 | 85 | #recon = rbm.CycleContinuous(img)
|
86 | 86 | plot_dual(img, recon)
|
87 | 87 | """
|
88 |
| - |
89 |
| - fullimage = [] |
90 |
| - for col_step in range(10): |
91 |
| - column = [] |
92 |
| - for row_step in range(10): |
93 |
| - img, _ = SelectBatch(data['train_X'], 1) |
94 |
| - recon, _ = autoencoder.EvaluateFull(img) |
95 |
| - img1 = img.reshape(28, 28) |
96 |
| - img2 = recon.reshape(28, 28) |
97 |
| - #disp = np.concatenate((img1, img2), axis=1) |
98 |
| - disp = np.hstack([img1, img2]) |
99 |
| - column.append(disp) |
100 |
| - fullimage.append(np.vstack(column)) |
101 |
| - plt.imshow(np.hstack(fullimage), cmap='Greys') |
102 |
| - pylab.show() |
| 88 | + for step in range(num_evals): |
| 89 | + fullimage = [] |
| 90 | + for col_step in range(10): |
| 91 | + column = [] |
| 92 | + for row_step in range(10): |
| 93 | + img, _ = SelectBatch(data['train_X'], 1) |
| 94 | + recon, _ = autoencoder.EvaluateFull(img) |
| 95 | + img1 = img.reshape(28, 28) |
| 96 | + img2 = recon.reshape(28, 28) |
| 97 | + #disp = np.concatenate((img1, img2), axis=1) |
| 98 | + disp = np.hstack([img1, img2]) |
| 99 | + column.append(disp) |
| 100 | + fullimage.append(np.vstack(column)) |
| 101 | + plt.imshow(np.hstack(fullimage), cmap='Greys') |
| 102 | + pylab.show() |
103 | 103 |
|
104 | 104 |
|
105 | 105 |
|
|
0 commit comments