Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/learnopencv
Path: blob/master/Conditional-GAN-PyTorch-TensorFlow/TensorFlow/CGAN-RockPaperScissor-TensorFlow.ipynb
3150 views
Kernel: Python 3 (ipykernel)
import cv2 import tensorflow as tf from tensorflow.keras import layers from IPython import display import matplotlib.pyplot as plt import numpy as np import time %matplotlib inline import tensorflow_datasets as tfds from matplotlib import gridspec
ds = tfds.load('RockPaperScissors', split='train', as_supervised=True, shuffle_files=True)
2021-07-11 08:00:07.382030: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set 2021-07-11 08:00:07.535973: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2021-07-11 08:00:07.605501: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-07-11 08:00:07.606139: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: pciBusID: 0000:01:00.0 name: GeForce GTX 1060 computeCapability: 6.1 coreClock: 1.6705GHz coreCount: 10 deviceMemorySize: 5.94GiB deviceMemoryBandwidth: 178.99GiB/s 2021-07-11 08:00:07.606186: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0 2021-07-11 08:00:07.609138: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11 2021-07-11 08:00:07.609190: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11 2021-07-11 08:00:07.610286: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10 2021-07-11 08:00:07.610549: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10 2021-07-11 08:00:07.931866: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.10 2021-07-11 08:00:07.934506: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11 2021-07-11 08:00:07.934959: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8 2021-07-11 08:00:07.935241: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-07-11 08:00:07.936707: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-07-11 08:00:07.959750: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0 2021-07-11 08:00:07.980821: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set 2021-07-11 08:00:07.981246: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-07-11 08:00:07.982746: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: pciBusID: 0000:01:00.0 name: GeForce GTX 1060 computeCapability: 6.1 coreClock: 1.6705GHz coreCount: 10 deviceMemorySize: 5.94GiB deviceMemoryBandwidth: 178.99GiB/s 2021-07-11 08:00:07.982851: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0 2021-07-11 08:00:07.982948: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11 2021-07-11 08:00:07.983067: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11 2021-07-11 08:00:07.983133: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10 2021-07-11 08:00:07.983186: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10 2021-07-11 08:00:07.983238: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.10 2021-07-11 08:00:07.983289: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11 2021-07-11 08:00:07.983341: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8 2021-07-11 08:00:07.983568: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-07-11 08:00:07.984977: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-07-11 08:00:07.985692: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0 2021-07-11 08:00:07.999434: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0 2021-07-11 08:00:19.725352: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1261] Device interconnect StreamExecutor with strength 1 edge matrix: 2021-07-11 08:00:19.725372: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1267] 0 2021-07-11 08:00:19.725378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1280] 0: N 2021-07-11 08:00:19.737436: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-07-11 08:00:19.737812: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-07-11 08:00:19.738151: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2021-07-11 08:00:19.738454: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1406] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5545 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1060, pci bus id: 0000:01:00.0, compute capability: 6.1)
ds = ds.shuffle(1000).batch(128)
ds
<BatchDataset shapes: ((None, 300, 300, 3), (None,)), types: (tf.uint8, tf.int64)>
# Create dictionary of target classes label_dict = { 0: 'Rock', 1: 'Paper', 2: 'Scissors' }
plt.figure(figsize=(10, 10)) for image, label in ds.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) lab = np.array(label[i]) plt.text(0.5, -0.1, s = label_dict[int(lab)], horizontalalignment='center', verticalalignment='center', transform = ax.transAxes, fontsize=20) plt.imshow(image[i]) plt.axis("off")
2021-07-11 08:00:20.839896: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (registered 2) 2021-07-11 08:00:20.870090: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2199995000 Hz
Image in a Jupyter notebook
@tf.function def normalization(tensor): #normalized_ds = data.map(lambda x: normalization_layer(x)) tensor = tf.image.resize( tensor, (128,128)) tensor = tf.subtract(tf.divide(tensor, 127.5), 1) return tensor # image_batch = next(iter(normalized_ds)) # first_image = image_batch[0] # print(np.min(first_image), np.max(first_image))
for img, label in ds.take(1): img = tf.cast(img, tf.float32) imgs = normalization(img) print(imgs.shape)
(128, 128, 128, 3)
BATCH_SIZE=128 latent_dim = 100
# label input con_label = layers.Input(shape=(1,)) # latent vector input latent_vector = layers.Input(shape=(latent_dim,))
def label_conditioned_generator(n_classes=3, embedding_dim=100): # embedding for categorical input label_embedding = layers.Embedding(n_classes, embedding_dim)(con_label) # linear multiplication nodes = 4 * 4 label_dense = layers.Dense(nodes)(label_embedding) # reshape to additional channel label_reshape_layer = layers.Reshape((4, 4, 1))(label_dense) return label_reshape_layer def latent_input(latent_dim=100): # image generator input nodes = 512 * 4 * 4 latent_dense = layers.Dense(nodes)(latent_vector) latent_dense = layers.ReLU()(latent_dense) latent_reshape = layers.Reshape((4, 4, 512))(latent_dense) return latent_reshape
# define the final generator model def define_generator(): label_output = label_conditioned_generator() latent_vector_output= latent_input() # merge label_conditioned_generator and latent_input output merge = layers.Concatenate()([latent_vector_output, label_output]) x = layers.Conv2DTranspose(64 * 8, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_1')(merge) x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_1')(x) x = layers.ReLU(name='relu_1')(x) x = layers.Conv2DTranspose(64 * 4, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_2')(x) x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_2')(x) x = layers.ReLU(name='relu_2')(x) x = layers.Conv2DTranspose(64 * 2, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_3')(x) x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_3')(x) x = layers.ReLU(name='relu_3')(x) x = layers.Conv2DTranspose(64 * 1, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_4')(x) x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_4')(x) x = layers.ReLU(name='relu_4')(x) out_layer = layers.Conv2DTranspose(3, 4, 2,padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( mean=0.0, stddev=0.02), use_bias=False, activation='tanh', name='conv_transpose_6')(x) # define model model = tf.keras.Model([latent_vector, con_label], out_layer) return model
conditional_gen = define_generator()
conditional_gen.summary()
Model: "model" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_2 (InputLayer) [(None, 100)] 0 __________________________________________________________________________________________________ input_1 (InputLayer) [(None, 1)] 0 __________________________________________________________________________________________________ dense_1 (Dense) (None, 8192) 827392 input_2[0][0] __________________________________________________________________________________________________ embedding (Embedding) (None, 1, 100) 300 input_1[0][0] __________________________________________________________________________________________________ re_lu (ReLU) (None, 8192) 0 dense_1[0][0] __________________________________________________________________________________________________ dense (Dense) (None, 1, 16) 1616 embedding[0][0] __________________________________________________________________________________________________ reshape_1 (Reshape) (None, 4, 4, 512) 0 re_lu[0][0] __________________________________________________________________________________________________ reshape (Reshape) (None, 4, 4, 1) 0 dense[0][0] __________________________________________________________________________________________________ concatenate (Concatenate) (None, 4, 4, 513) 0 reshape_1[0][0] reshape[0][0] __________________________________________________________________________________________________ conv_transpose_1 (Conv2DTranspo (None, 8, 8, 512) 4202496 concatenate[0][0] __________________________________________________________________________________________________ bn_1 (BatchNormalization) (None, 8, 8, 512) 2048 conv_transpose_1[0][0] __________________________________________________________________________________________________ relu_1 (ReLU) (None, 8, 8, 512) 0 bn_1[0][0] __________________________________________________________________________________________________ conv_transpose_2 (Conv2DTranspo (None, 16, 16, 256) 2097152 relu_1[0][0] __________________________________________________________________________________________________ bn_2 (BatchNormalization) (None, 16, 16, 256) 1024 conv_transpose_2[0][0] __________________________________________________________________________________________________ relu_2 (ReLU) (None, 16, 16, 256) 0 bn_2[0][0] __________________________________________________________________________________________________ conv_transpose_3 (Conv2DTranspo (None, 32, 32, 128) 524288 relu_2[0][0] __________________________________________________________________________________________________ bn_3 (BatchNormalization) (None, 32, 32, 128) 512 conv_transpose_3[0][0] __________________________________________________________________________________________________ relu_3 (ReLU) (None, 32, 32, 128) 0 bn_3[0][0] __________________________________________________________________________________________________ conv_transpose_4 (Conv2DTranspo (None, 64, 64, 64) 131072 relu_3[0][0] __________________________________________________________________________________________________ bn_4 (BatchNormalization) (None, 64, 64, 64) 256 conv_transpose_4[0][0] __________________________________________________________________________________________________ relu_4 (ReLU) (None, 64, 64, 64) 0 bn_4[0][0] __________________________________________________________________________________________________ conv_transpose_6 (Conv2DTranspo (None, 128, 128, 3) 3072 relu_4[0][0] ================================================================================================== Total params: 7,791,228 Trainable params: 7,789,308 Non-trainable params: 1,920 __________________________________________________________________________________________________
def label_condition_disc(in_shape=(128,128,3), n_classes=3, embedding_dim=100): # label input con_label = layers.Input(shape=(1,)) # embedding for categorical input label_embedding = layers.Embedding(n_classes, embedding_dim)(con_label) # scale up to image dimensions with linear activation nodes = in_shape[0] * in_shape[1] * in_shape[2] label_dense = layers.Dense(nodes)(label_embedding) # reshape to additional channel label_reshape_layer = layers.Reshape((in_shape[0], in_shape[1], 3))(label_dense) # image input return con_label, label_reshape_layer def image_disc(in_shape=(128,128,3)): inp_image = layers.Input(shape=in_shape) return inp_image
def define_discriminator(): con_label, label_condition_output = label_condition_disc() inp_image_output = image_disc() # concat label as a channel merge = layers.Concatenate()([inp_image_output, label_condition_output]) x = layers.Conv2D(64, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( mean=0.0, stddev=0.02), use_bias=False, name='conv_1')(merge) x = layers.LeakyReLU(0.2, name='leaky_relu_1')(x) x = layers.Conv2D(64 * 2, kernel_size=4, strides= 3, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( mean=0.0, stddev=0.02), use_bias=False, name='conv_2')(x) x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_1')(x) x = layers.LeakyReLU(0.2, name='leaky_relu_2')(x) x = layers.Conv2D(64 * 4, 4, 3, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( mean=0.0, stddev=0.02), use_bias=False, name='conv_3')(x) x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_2')(x) x = layers.LeakyReLU(0.2, name='leaky_relu_3')(x) x = layers.Conv2D(64 * 8, 4, 3,padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( mean=0.0, stddev=0.02), use_bias=False, name='conv_5')(x) x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_4')(x) x = layers.LeakyReLU(0.2, name='leaky_relu_5')(x) flattened_out = layers.Flatten()(x) # dropout dropout = layers.Dropout(0.4)(flattened_out) # output dense_out = layers.Dense(1, activation='sigmoid')(dropout) # define model # define model model = tf.keras.Model([inp_image_output, con_label], dense_out) return model
conditional_discriminator = define_discriminator()
conditional_discriminator.summary()
Model: "model_1" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_3 (InputLayer) [(None, 1)] 0 __________________________________________________________________________________________________ embedding_1 (Embedding) (None, 1, 100) 300 input_3[0][0] __________________________________________________________________________________________________ dense_2 (Dense) (None, 1, 49152) 4964352 embedding_1[0][0] __________________________________________________________________________________________________ input_4 (InputLayer) [(None, 128, 128, 3) 0 __________________________________________________________________________________________________ reshape_2 (Reshape) (None, 128, 128, 3) 0 dense_2[0][0] __________________________________________________________________________________________________ concatenate_1 (Concatenate) (None, 128, 128, 6) 0 input_4[0][0] reshape_2[0][0] __________________________________________________________________________________________________ conv_1 (Conv2D) (None, 64, 64, 64) 6144 concatenate_1[0][0] __________________________________________________________________________________________________ leaky_relu_1 (LeakyReLU) (None, 64, 64, 64) 0 conv_1[0][0] __________________________________________________________________________________________________ conv_2 (Conv2D) (None, 22, 22, 128) 131072 leaky_relu_1[0][0] __________________________________________________________________________________________________ bn_1 (BatchNormalization) (None, 22, 22, 128) 512 conv_2[0][0] __________________________________________________________________________________________________ leaky_relu_2 (LeakyReLU) (None, 22, 22, 128) 0 bn_1[0][0] __________________________________________________________________________________________________ conv_3 (Conv2D) (None, 8, 8, 256) 524288 leaky_relu_2[0][0] __________________________________________________________________________________________________ bn_2 (BatchNormalization) (None, 8, 8, 256) 1024 conv_3[0][0] __________________________________________________________________________________________________ leaky_relu_3 (LeakyReLU) (None, 8, 8, 256) 0 bn_2[0][0] __________________________________________________________________________________________________ conv_5 (Conv2D) (None, 3, 3, 512) 2097152 leaky_relu_3[0][0] __________________________________________________________________________________________________ bn_4 (BatchNormalization) (None, 3, 3, 512) 2048 conv_5[0][0] __________________________________________________________________________________________________ leaky_relu_5 (LeakyReLU) (None, 3, 3, 512) 0 bn_4[0][0] __________________________________________________________________________________________________ flatten (Flatten) (None, 4608) 0 leaky_relu_5[0][0] __________________________________________________________________________________________________ dropout (Dropout) (None, 4608) 0 flatten[0][0] __________________________________________________________________________________________________ dense_3 (Dense) (None, 1) 4609 dropout[0][0] ================================================================================================== Total params: 7,731,501 Trainable params: 7,729,709 Non-trainable params: 1,792 __________________________________________________________________________________________________
# conditional_discriminator.save('con-disc.h5')
embeddings = conditional_gen.layers[3]
weights = embeddings.get_weights()[0]
weights.shape
(3, 100)
# conditional_gen.save('con-gen.h5')
# ?layers.Embedding
binary_cross_entropy = tf.keras.losses.BinaryCrossentropy()
def generator_loss(label, fake_output): gen_loss = binary_cross_entropy(label, fake_output) #print(gen_loss) return gen_loss
def discriminator_loss(label, output): disc_loss = binary_cross_entropy(label, output) #print(total_loss) return disc_loss
learning_rate = 0.0002 generator_optimizer = tf.keras.optimizers.Adam(lr = 0.0002, beta_1 = 0.5, beta_2 = 0.999 ) discriminator_optimizer = tf.keras.optimizers.Adam(lr = 0.0002, beta_1 = 0.5, beta_2 = 0.999 )
num_examples_to_generate = 25 # We will reuse this seed overtime to visualize progress seed = tf.random.normal([num_examples_to_generate, latent_dim])
seed.dtype
tf.float32
conditional_discriminator.input
[<KerasTensor: shape=(None, 128, 128, 3) dtype=float32 (created by layer 'input_4')>, <KerasTensor: shape=(None, 1) dtype=float32 (created by layer 'input_3')>]
conditional_gen.input
[<KerasTensor: shape=(None, 100) dtype=float32 (created by layer 'input_2')>, <KerasTensor: shape=(None, 1) dtype=float32 (created by layer 'input_1')>]
# Notice the use of `tf.function` # This annotation causes the function to be "compiled". @tf.function def train_step(images,target): # noise vector sampled from normal distribution noise = tf.random.normal([target.shape[0], latent_dim]) # Train Discriminator with real labels with tf.GradientTape() as disc_tape1: generated_images = conditional_gen([noise,target], training=True) print(noise.shape) print(target.shape) real_output = conditional_discriminator([images,target], training=True) real_targets = tf.ones_like(real_output) disc_loss1 = discriminator_loss(real_targets, real_output) # gradient calculation for discriminator for real labels gradients_of_disc1 = disc_tape1.gradient(disc_loss1, conditional_discriminator.trainable_variables) # parameters optimization for discriminator for real labels discriminator_optimizer.apply_gradients(zip(gradients_of_disc1,\ conditional_discriminator.trainable_variables)) # Train Discriminator with fake labels with tf.GradientTape() as disc_tape2: fake_output = conditional_discriminator([generated_images,target], training=True) fake_targets = tf.zeros_like(fake_output) disc_loss2 = discriminator_loss(fake_targets, fake_output) # gradient calculation for discriminator for fake labels gradients_of_disc2 = disc_tape2.gradient(disc_loss2, conditional_discriminator.trainable_variables) # parameters optimization for discriminator for fake labels discriminator_optimizer.apply_gradients(zip(gradients_of_disc2,\ conditional_discriminator.trainable_variables)) # Train Generator with real labels with tf.GradientTape() as gen_tape: generated_images = conditional_gen([noise,target], training=True) fake_output = conditional_discriminator([generated_images,target], training=True) real_targets = tf.ones_like(fake_output) gen_loss = generator_loss(real_targets, fake_output) # gradient calculation for generator for real labels gradients_of_gen = gen_tape.gradient(gen_loss, conditional_gen.trainable_variables) # parameters optimization for generator for real labels generator_optimizer.apply_gradients(zip(gradients_of_gen,\ conditional_gen.trainable_variables))
def train(dataset, epochs): for epoch in range(epochs): start = time.time() i = 0 D_loss_list, G_loss_list = [], [] for image_batch,target in dataset: i += 1 img = tf.cast(image_batch, tf.float32) imgs = normalization(img) train_step(imgs,target) print(epoch) display.clear_output(wait=True) generate_and_save_images(conditional_gen, epoch + 1, seed) # # Save the model every 15 epochs # if (epoch + 1) % 15 == 0: # checkpoint.save(file_prefix = checkpoint_prefix) conditional_gen.save_weights('rock-paper-scissors/training_weights/gen_'+ str(epoch)+'.h5') conditional_discriminator.save_weights('rock-paper-scissors/training_weights/disc_'+ str(epoch)+'.h5') print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start)) # Generate after the final epoch display.clear_output(wait=True) generate_and_save_images(conditional_gen, epochs, seed)
def label_gen(n_classes): lab = tf.random.uniform((1,), minval=0, maxval=n_classes, dtype=tf.dtypes.int32, seed=None, name=None) return tf.repeat(lab, [25], axis=None, name=None)
# Create dictionary of target classes label_dict = { 0: 'Rock', 1: 'Paper', 2: 'Scissors' }
def generate_and_save_images(model, epoch, test_input): # Notice `training` is set to False. # This is so all layers run in inference mode (batchnorm). labels = label_gen(n_classes=3) predictions = model([test_input, labels], training=False) print(predictions.shape) fig = plt.figure(figsize=(8,8)) print("Generated Images are Conditioned on Label:", label_dict[np.array(labels)[0]]) for i in range(predictions.shape[0]): plt.subplot(5, 5, i+1) pred = (predictions[i, :, :, :] + 1 ) * 127.5 pred = np.array(pred) plt.imshow(pred.astype(np.uint8)) plt.axis('off') plt.savefig('rock-paper-scissors/images/image_at_epoch_{:d}.png'.format(epoch)) plt.show()
train(ds, 100)
(128, 100) (128,)
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) /tmp/ipykernel_37085/1291108346.py in <module> ----> 1 train(ds, 100) /tmp/ipykernel_37085/1721109600.py in train(dataset, epochs) 8 img = tf.cast(image_batch, tf.float32) 9 imgs = normalization(img) ---> 10 train_step(imgs,target) 11 print(epoch) 12 display.clear_output(wait=True) ~/miniconda3/envs/gan_series/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds) 826 tracing_count = self.experimental_get_tracing_count() 827 with trace.Trace(self._name) as tm: --> 828 result = self._call(*args, **kwds) 829 compiler = "xla" if self._experimental_compile else "nonXla" 830 new_tracing_count = self.experimental_get_tracing_count() ~/miniconda3/envs/gan_series/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds) 860 # In this case we have not created variables on the first call. So we can 861 # run the first trace but we should fail if variables are created. --> 862 results = self._stateful_fn(*args, **kwds) 863 if self._created_variables: 864 raise ValueError("Creating variables on a non-first call to a function" ~/miniconda3/envs/gan_series/lib/python3.7/site-packages/tensorflow/python/eager/function.py in __call__(self, *args, **kwargs) 2941 filtered_flat_args) = self._maybe_define_function(args, kwargs) 2942 return graph_function._call_flat( -> 2943 filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access 2944 2945 @property ~/miniconda3/envs/gan_series/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _call_flat(self, args, captured_inputs, cancellation_manager) 1917 # No tape is watching; skip to running the function. 1918 return self._build_call_outputs(self._inference_function.call( -> 1919 ctx, args, cancellation_manager=cancellation_manager)) 1920 forward_backward = self._select_forward_and_backward_functions( 1921 args, ~/miniconda3/envs/gan_series/lib/python3.7/site-packages/tensorflow/python/eager/function.py in call(self, ctx, args, cancellation_manager) 558 inputs=args, 559 attrs=attrs, --> 560 ctx=ctx) 561 else: 562 outputs = execute.execute_with_cancellation( ~/miniconda3/envs/gan_series/lib/python3.7/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) 58 ctx.ensure_initialized() 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, ---> 60 inputs, attrs, num_outputs) 61 except core._NotOkStatusException as e: 62 if name is not None: KeyboardInterrupt:
conditional_gen.load_weights('rock-paper-scissors/training_weights/gen_99.h5')
def generate_images(model, test_input): # Notice `training` is set to False. # This is so all layers run in inference mode (batchnorm). output = None for label in range(3): labels = tf.ones(10) * label # predictions = model([labels, test_input], training=False) predictions = model([test_input, labels], training=False) if output is None: output = predictions else: output = np.concatenate((output,predictions)) nrow = 3 ncol = 10 fig = plt.figure(figsize=(25,25)) gs = gridspec.GridSpec(nrow, ncol, width_ratios=[1, 1, 1,1, 1,1, 1, 1, 1, 1], wspace=0.0, hspace=0.0, top=0.2, bottom=0.00, left=0.17, right=0.845) #output = output.reshape(-1, 128, 128, 3) #print("Generated Images are Conditioned on Label:", label_dict[np.array(labels)[0]]) k = 0 for i in range(nrow): for j in range(ncol): pred = (output[k, :, :, :] + 1 ) * 127.5 pred = np.array(pred) ax= plt.subplot(gs[i,j]) ax.imshow(pred.astype(np.uint8)) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.axis('off') k += 1 plt.savefig('result.png', dpi=300) plt.show()
num_examples_to_generate = 10 latent_dim = 100 noise = tf.random.normal([num_examples_to_generate, latent_dim])
generate_images(conditional_gen, noise)
Image in a Jupyter notebook
generate_images(conditional_gen, noise)
Image in a Jupyter notebook