Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/learnopencv
Path: blob/master/Deep-Convolutional-GAN/TensorFlow/dcgan_anime_tesnorflow.py
3142 views
1
#import the required packages
2
import os
3
import time
4
from tensorflow import keras
5
import numpy as np
6
import tensorflow as tf
7
from tensorflow.keras import layers
8
import matplotlib.pyplot as plt
9
10
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
11
12
img_height, img_width = 64, 64
13
batch_size = 128
14
15
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
16
'../dcgan/anime',
17
image_size=(img_height, img_width),
18
batch_size=batch_size,
19
label_mode=None)
20
21
plt.figure(figsize=(10, 10))
22
for images in train_ds.take(1):
23
for i in range(9):
24
ax = plt.subplot(3, 3, i + 1)
25
plt.imshow(images[i].numpy().astype("uint8"))
26
plt.axis("off")
27
28
for image_batch in train_ds:
29
print(image_batch.shape)
30
break
31
32
tf.data.experimental.AUTOTUNE
33
34
AUTOTUNE = tf.data.experimental.AUTOTUNE
35
36
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
37
38
normalization_layer = layers.experimental.preprocessing.Rescaling(scale= 1./127.5, offset=-1)
39
40
normalized_ds = train_ds.map(lambda x: normalization_layer(x))
41
image_batch = next(iter(normalized_ds))
42
first_image = image_batch[0]
43
print(np.min(first_image), np.max(first_image))
44
45
noise_dim = (1,1,100)
46
47
def generator():
48
49
inputs = keras.Input(shape=(1, 1, 100), name='input_layer')
50
x = layers.Conv2DTranspose(64 * 8, kernel_size=4, strides= 4, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
51
mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_1')(inputs)
52
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_1')(x)
53
x = layers.ReLU(name='relu_1')(x)
54
55
x = layers.Conv2DTranspose(64 * 4, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
56
mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_2')(x)
57
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_2')(x)
58
x = layers.ReLU(name='relu_2')(x)
59
60
x = layers.Conv2DTranspose(64 * 2, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
61
mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_3')(x)
62
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_3')(x)
63
x = layers.ReLU(name='relu_3')(x)
64
65
66
x = layers.Conv2DTranspose(64 * 1, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
67
mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_4')(x)
68
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_4')(x)
69
x = layers.ReLU(name='relu_4')(x)
70
71
outputs = layers.Conv2DTranspose(3, 4, 2,padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
72
mean=0.0, stddev=0.02), use_bias=False, activation='tanh', name='conv_transpose_5')(x)
73
74
model = tf.keras.Model(inputs, outputs, name="Generator")
75
return model
76
77
generator = generator()
78
79
generator.summary()
80
81
generator.save('dcgan_gen.h5')
82
83
def discriminator():
84
85
inputs = keras.Input(shape=(64, 64, 3), name='input_layer')
86
x = layers.Conv2D(64, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
87
mean=0.0, stddev=0.02), use_bias=False, name='conv_1')(inputs)
88
x = layers.LeakyReLU(0.2, name='leaky_relu_1')(x)
89
90
x = layers.Conv2D(64 * 2, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
91
mean=0.0, stddev=0.02), use_bias=False, name='conv_2')(x)
92
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_1')(x)
93
x = layers.LeakyReLU(0.2, name='leaky_relu_2')(x)
94
95
x = layers.Conv2D(64 * 4, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
96
mean=0.0, stddev=0.02), use_bias=False, name='conv_3')(x)
97
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_2')(x)
98
x = layers.LeakyReLU(0.2, name='leaky_relu_3')(x)
99
100
101
x = layers.Conv2D(64 * 8, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
102
mean=0.0, stddev=0.02), use_bias=False, name='conv_4')(x)
103
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_3')(x)
104
x = layers.LeakyReLU(0.2, name='leaky_relu_4')(x)
105
106
outputs = layers.Conv2D(1, 4, 4,padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
107
mean=0.0, stddev=0.02), use_bias=False, activation='sigmoid', name='conv_5')(x)
108
109
outputs = layers.Flatten()(outputs)
110
111
model = tf.keras.Model(inputs, outputs, name="Discriminator")
112
return model
113
114
discriminator = discriminator()
115
116
discriminator.summary()
117
118
discriminator.save('dcgan_disc.h5')
119
120
BATCH_SIZE=128
121
latent_dim = 100
122
123
binary_cross_entropy = tf.keras.losses.BinaryCrossentropy()
124
125
def generator_loss(label, fake_output):
126
gen_loss = binary_cross_entropy(label, fake_output)
127
#print(gen_loss)
128
return gen_loss
129
130
def discriminator_loss(label, output):
131
disc_loss = binary_cross_entropy(label, output)
132
#print(total_loss)
133
return disc_loss
134
135
learning_rate = 0.0002
136
generator_optimizer = tf.keras.optimizers.Adam(lr = 0.0002, beta_1 = 0.5, beta_2 = 0.999 )
137
discriminator_optimizer = tf.keras.optimizers.Adam(lr = 0.0002, beta_1 = 0.5, beta_2 = 0.999 )
138
139
num_examples_to_generate = 25
140
# We will reuse this seed overtime to visualize progress
141
seed = tf.random.normal([num_examples_to_generate, 1, 1, latent_dim])
142
143
# Notice the use of `tf.function`
144
# This annotation causes the function to be "compiled".
145
@tf.function
146
def train_step(images):
147
# noise vector sampled from normal distribution
148
noise = tf.random.normal([BATCH_SIZE, 1, 1, latent_dim])
149
150
# Train Discriminator with real labels
151
with tf.GradientTape() as disc_tape1:
152
generated_images = generator(noise, training=True)
153
154
155
real_output = discriminator(images, training=True)
156
real_targets = tf.ones_like(real_output)
157
disc_loss1 = discriminator_loss(real_targets, real_output)
158
159
# gradient calculation for discriminator for real labels
160
gradients_of_disc1 = disc_tape1.gradient(disc_loss1, discriminator.trainable_variables)
161
162
# parameters optimization for discriminator for real labels
163
discriminator_optimizer.apply_gradients(zip(gradients_of_disc1,\
164
discriminator.trainable_variables))
165
166
# Train Discriminator with fake labels
167
with tf.GradientTape() as disc_tape2:
168
fake_output = discriminator(generated_images, training=True)
169
fake_targets = tf.zeros_like(fake_output)
170
disc_loss2 = discriminator_loss(fake_targets, fake_output)
171
# gradient calculation for discriminator for fake labels
172
gradients_of_disc2 = disc_tape2.gradient(disc_loss2, discriminator.trainable_variables)
173
174
175
# parameters optimization for discriminator for fake labels
176
discriminator_optimizer.apply_gradients(zip(gradients_of_disc2,\
177
discriminator.trainable_variables))
178
179
# Train Generator with real labels
180
with tf.GradientTape() as gen_tape:
181
generated_images = generator(noise, training=True)
182
fake_output = discriminator(generated_images, training=True)
183
real_targets = tf.ones_like(fake_output)
184
gen_loss = generator_loss(real_targets, fake_output)
185
186
# gradient calculation for generator for real labels
187
gradients_of_gen = gen_tape.gradient(gen_loss, generator.trainable_variables)
188
189
# parameters optimization for generator for real labels
190
generator_optimizer.apply_gradients(zip(gradients_of_gen,\
191
generator.trainable_variables))
192
193
def train(dataset, epochs):
194
for epoch in range(epochs):
195
start = time.time()
196
i = 0
197
D_loss_list, G_loss_list = [], []
198
for image_batch in dataset:
199
i += 1
200
train_step(image_batch)
201
print(epoch)
202
# display.clear_output(wait=True)
203
generate_and_save_images(generator,
204
epoch + 1,
205
seed)
206
207
# Save the model every 15 epochs
208
if (epoch + 1) % 15 == 0:
209
generator.save_weights('dcgan/tf/training_weights/gen_'+ str(epoch)+'.h5')
210
discriminator.save_weights('dcgan/tf/training_weights/disc_'+ str(epoch)+'.h5')
211
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
212
213
# Generate after the final epoch
214
# display.clear_output(wait=True)
215
generate_and_save_images(generator,
216
epochs,
217
seed)
218
219
def generate_and_save_images(model, epoch, test_input):
220
# Notice `training` is set to False.
221
# This is so all layers run in inference mode (batchnorm).
222
predictions = model(test_input, training=False)
223
print(predictions.shape)
224
fig = plt.figure(figsize=(4,4))
225
226
for i in range(predictions.shape[0]):
227
plt.subplot(5, 5, i+1)
228
pred = (predictions[i, :, :, :] + 1 ) * 127.5
229
pred = np.array(pred)
230
plt.imshow(pred.astype(np.uint8))
231
plt.axis('off')
232
233
plt.savefig('dcgan/tf/images/image_at_epoch_{:d}.png'.format(epoch))
234
plt.show()
235
236
train(normalized_ds, 2)
237