-
Notifications
You must be signed in to change notification settings - Fork 46
/
wgan.py
115 lines (90 loc) · 3.48 KB
/
wgan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os,sys
sys.path.append('utils')
from nets import *
from datas import *
def sample_z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
class WGAN():
def __init__(self, generator, discriminator, data):
self.generator = generator
self.discriminator = discriminator
self.data = data
# data
self.z_dim = self.data.z_dim
self.size = self.data.size
self.channel = self.data.channel
self.X = tf.placeholder(tf.float32, shape=[None, self.size, self.size, self.channel])
self.z = tf.placeholder(tf.float32, shape=[None, self.z_dim])
# nets
self.G_sample = self.generator(self.z)
self.D_real = self.discriminator(self.X)
self.D_fake = self.discriminator(self.G_sample, reuse = True)
# loss
self.D_loss = - tf.reduce_mean(self.D_real) + tf.reduce_mean(self.D_fake)
self.G_loss = - tf.reduce_mean(self.D_fake)
# clip
self.clip_D = [var.assign(tf.clip_by_value(var, -0.01, 0.01)) for var in self.discriminator.vars]
# solver
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.D_solver = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate).minimize(self.D_loss, var_list=self.discriminator.vars)
self.G_solver = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate).minimize(self.G_loss, var_list=self.generator.vars)
gpu_options = tf.GPUOptions(allow_growth=True)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self.saver = tf.train.Saver()
self.model_name = 'Models/wgan.ckpt'
def train(self, sample_dir, training_epoches = 500000, batch_size = 32):
fig_count = 0
self.sess.run(tf.global_variables_initializer())
#self.saver.restore(self.sess, self.model_name)
learning_rate_initial = 1e-4
for epoch in range(training_epoches):
learning_rate = learning_rate_initial * pow(0.5, epoch // 50000)
# update D
n_d = 100 if epoch < 25 or (epoch+1) % 500 == 0 else 5
for _ in range(n_d):
X_b = self.data(batch_size)
self.sess.run(
[self.clip_D,self.D_solver],
feed_dict={self.X: X_b, self.z: sample_z(batch_size, self.z_dim), self.learning_rate: learning_rate}
)
# update G
for _ in range(1):
self.sess.run(
self.G_solver,
feed_dict={self.z: sample_z(batch_size, self.z_dim), self.learning_rate: learning_rate}
)
# save img, model. print loss
if epoch % 100 == 0 or epoch < 100:
D_loss_curr, G_loss_curr = self.sess.run(
[self.D_loss, self.G_loss],
feed_dict={self.X: X_b, self.z: sample_z(batch_size, self.z_dim)})
print('Iter: {}; D loss: {:.4}; G_loss: {:.4}'.format(epoch, D_loss_curr, G_loss_curr))
if epoch % 1000 == 0:
samples = self.sess.run(self.G_sample, feed_dict={self.z: sample_z(16, self.z_dim)})
fig = self.data.data2fig(samples)
plt.savefig('{}/{}.png'.format(sample_dir, str(fig_count).zfill(3)), bbox_inches='tight')
fig_count += 1
plt.close(fig)
if epoch % 5000 == 0:
self.saver.save(self.sess, self.model_name)
if __name__ == '__main__':
# constraint GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# save generated images
sample_dir = 'Samples/wgan'
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
# param
generator = G_conv()
discriminator = D_conv()
data = celebA()
# run
wgan = WGAN(generator, discriminator, data)
wgan.train(sample_dir)