Path: blob/master/Deep-Convolutional-GAN/TensorFlow/dcgan_anime_tesnorflow.py
3142 views
#import the required packages1import os2import time3from tensorflow import keras4import numpy as np5import tensorflow as tf6from tensorflow.keras import layers7import matplotlib.pyplot as plt89os.environ['CUDA_VISIBLE_DEVICES'] = '0'1011img_height, img_width = 64, 6412batch_size = 1281314train_ds = tf.keras.preprocessing.image_dataset_from_directory(15'../dcgan/anime',16image_size=(img_height, img_width),17batch_size=batch_size,18label_mode=None)1920plt.figure(figsize=(10, 10))21for images in train_ds.take(1):22for i in range(9):23ax = plt.subplot(3, 3, i + 1)24plt.imshow(images[i].numpy().astype("uint8"))25plt.axis("off")2627for image_batch in train_ds:28print(image_batch.shape)29break3031tf.data.experimental.AUTOTUNE3233AUTOTUNE = tf.data.experimental.AUTOTUNE3435train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)3637normalization_layer = layers.experimental.preprocessing.Rescaling(scale= 1./127.5, offset=-1)3839normalized_ds = train_ds.map(lambda x: normalization_layer(x))40image_batch = next(iter(normalized_ds))41first_image = image_batch[0]42print(np.min(first_image), np.max(first_image))4344noise_dim = (1,1,100)4546def generator():4748inputs = keras.Input(shape=(1, 1, 100), name='input_layer')49x = layers.Conv2DTranspose(64 * 8, kernel_size=4, strides= 4, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(50mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_1')(inputs)51x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_1')(x)52x = layers.ReLU(name='relu_1')(x)5354x = layers.Conv2DTranspose(64 * 4, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(55mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_2')(x)56x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_2')(x)57x = layers.ReLU(name='relu_2')(x)5859x = layers.Conv2DTranspose(64 * 2, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(60mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_3')(x)61x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_3')(x)62x = layers.ReLU(name='relu_3')(x)636465x = layers.Conv2DTranspose(64 * 1, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(66mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_4')(x)67x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_4')(x)68x = layers.ReLU(name='relu_4')(x)6970outputs = layers.Conv2DTranspose(3, 4, 2,padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(71mean=0.0, stddev=0.02), use_bias=False, activation='tanh', name='conv_transpose_5')(x)7273model = tf.keras.Model(inputs, outputs, name="Generator")74return model7576generator = generator()7778generator.summary()7980generator.save('dcgan_gen.h5')8182def discriminator():8384inputs = keras.Input(shape=(64, 64, 3), name='input_layer')85x = layers.Conv2D(64, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(86mean=0.0, stddev=0.02), use_bias=False, name='conv_1')(inputs)87x = layers.LeakyReLU(0.2, name='leaky_relu_1')(x)8889x = layers.Conv2D(64 * 2, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(90mean=0.0, stddev=0.02), use_bias=False, name='conv_2')(x)91x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_1')(x)92x = layers.LeakyReLU(0.2, name='leaky_relu_2')(x)9394x = layers.Conv2D(64 * 4, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(95mean=0.0, stddev=0.02), use_bias=False, name='conv_3')(x)96x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_2')(x)97x = layers.LeakyReLU(0.2, name='leaky_relu_3')(x)9899100x = layers.Conv2D(64 * 8, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(101mean=0.0, stddev=0.02), use_bias=False, name='conv_4')(x)102x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_3')(x)103x = layers.LeakyReLU(0.2, name='leaky_relu_4')(x)104105outputs = layers.Conv2D(1, 4, 4,padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(106mean=0.0, stddev=0.02), use_bias=False, activation='sigmoid', name='conv_5')(x)107108outputs = layers.Flatten()(outputs)109110model = tf.keras.Model(inputs, outputs, name="Discriminator")111return model112113discriminator = discriminator()114115discriminator.summary()116117discriminator.save('dcgan_disc.h5')118119BATCH_SIZE=128120latent_dim = 100121122binary_cross_entropy = tf.keras.losses.BinaryCrossentropy()123124def generator_loss(label, fake_output):125gen_loss = binary_cross_entropy(label, fake_output)126#print(gen_loss)127return gen_loss128129def discriminator_loss(label, output):130disc_loss = binary_cross_entropy(label, output)131#print(total_loss)132return disc_loss133134learning_rate = 0.0002135generator_optimizer = tf.keras.optimizers.Adam(lr = 0.0002, beta_1 = 0.5, beta_2 = 0.999 )136discriminator_optimizer = tf.keras.optimizers.Adam(lr = 0.0002, beta_1 = 0.5, beta_2 = 0.999 )137138num_examples_to_generate = 25139# We will reuse this seed overtime to visualize progress140seed = tf.random.normal([num_examples_to_generate, 1, 1, latent_dim])141142# Notice the use of `tf.function`143# This annotation causes the function to be "compiled".144@tf.function145def train_step(images):146# noise vector sampled from normal distribution147noise = tf.random.normal([BATCH_SIZE, 1, 1, latent_dim])148149# Train Discriminator with real labels150with tf.GradientTape() as disc_tape1:151generated_images = generator(noise, training=True)152153154real_output = discriminator(images, training=True)155real_targets = tf.ones_like(real_output)156disc_loss1 = discriminator_loss(real_targets, real_output)157158# gradient calculation for discriminator for real labels159gradients_of_disc1 = disc_tape1.gradient(disc_loss1, discriminator.trainable_variables)160161# parameters optimization for discriminator for real labels162discriminator_optimizer.apply_gradients(zip(gradients_of_disc1,\163discriminator.trainable_variables))164165# Train Discriminator with fake labels166with tf.GradientTape() as disc_tape2:167fake_output = discriminator(generated_images, training=True)168fake_targets = tf.zeros_like(fake_output)169disc_loss2 = discriminator_loss(fake_targets, fake_output)170# gradient calculation for discriminator for fake labels171gradients_of_disc2 = disc_tape2.gradient(disc_loss2, discriminator.trainable_variables)172173174# parameters optimization for discriminator for fake labels175discriminator_optimizer.apply_gradients(zip(gradients_of_disc2,\176discriminator.trainable_variables))177178# Train Generator with real labels179with tf.GradientTape() as gen_tape:180generated_images = generator(noise, training=True)181fake_output = discriminator(generated_images, training=True)182real_targets = tf.ones_like(fake_output)183gen_loss = generator_loss(real_targets, fake_output)184185# gradient calculation for generator for real labels186gradients_of_gen = gen_tape.gradient(gen_loss, generator.trainable_variables)187188# parameters optimization for generator for real labels189generator_optimizer.apply_gradients(zip(gradients_of_gen,\190generator.trainable_variables))191192def train(dataset, epochs):193for epoch in range(epochs):194start = time.time()195i = 0196D_loss_list, G_loss_list = [], []197for image_batch in dataset:198i += 1199train_step(image_batch)200print(epoch)201# display.clear_output(wait=True)202generate_and_save_images(generator,203epoch + 1,204seed)205206# Save the model every 15 epochs207if (epoch + 1) % 15 == 0:208generator.save_weights('dcgan/tf/training_weights/gen_'+ str(epoch)+'.h5')209discriminator.save_weights('dcgan/tf/training_weights/disc_'+ str(epoch)+'.h5')210print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))211212# Generate after the final epoch213# display.clear_output(wait=True)214generate_and_save_images(generator,215epochs,216seed)217218def generate_and_save_images(model, epoch, test_input):219# Notice `training` is set to False.220# This is so all layers run in inference mode (batchnorm).221predictions = model(test_input, training=False)222print(predictions.shape)223fig = plt.figure(figsize=(4,4))224225for i in range(predictions.shape[0]):226plt.subplot(5, 5, i+1)227pred = (predictions[i, :, :, :] + 1 ) * 127.5228pred = np.array(pred)229plt.imshow(pred.astype(np.uint8))230plt.axis('off')231232plt.savefig('dcgan/tf/images/image_at_epoch_{:d}.png'.format(epoch))233plt.show()234235train(normalized_ds, 2)236237