Path: blob/master/deprecated/notebooks/vae_celeba_lightning.ipynb
1192 views
Kernel: Python 3
VAE on CelebA dataset with pytorch lightning
In [1]:
Install lightning
In [2]:
In [3]:
In [4]:
Get CelebA data
Get API key from Kaggle
Follow these instructions to get a kaggle.json key file. Then upload it to colab.
In [5]:
Out[5]:
Saving kaggle.json to kaggle.json
In [6]:
Pytorch dataset and lightning datamodule
This replaces torchvision.datasets.CelebA by downloading from kaggle instead of gdrive.
Code is from https://github.com/sayantanauddy/vae_lightning/blob/main/data.py
In [7]:
In [ ]:
Downloading dataset. Please while while the download and extraction processes complete
Downloading celeba-dataset.zip to kaggle
100%|██████████| 1.33G/1.33G [00:28<00:00, 49.6MB/s]
In [ ]:
In [ ]:
VAE
In [ ]:
VAE(
(encoder): Sequential(
(0): Sequential(
(0): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(fc_mu): Linear(in_features=2048, out_features=256, bias=True)
(fc_var): Linear(in_features=2048, out_features=256, bias=True)
(decoder_input): Linear(in_features=256, out_features=2048, bias=True)
(decoder): Sequential(
(0): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): ConvTranspose2d(64, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(final_layer): Sequential(
(0): ConvTranspose2d(32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
(3): Conv2d(32, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): Sigmoid()
)
)
Reconstruction
In [ ]:
Random Sample From TN[0,1]
In [ ]:
In [ ]:
<matplotlib.image.AxesImage at 0x7f7d2558df50>
Vector Arithmetic
In [ ]:
In [ ]:
In [ ]:
Interpolation
In [ ]:
<matplotlib.image.AxesImage at 0x7f7d24ad2810>
2D Color embedding of latent space
In [ ]:
/usr/local/lib/python3.7/dist-packages/numba/np/ufunc/parallel.py:363: NumbaWarning: The TBB threading layer requires TBB version 2019.5 or later i.e., TBB_INTERFACE_VERSION >= 11005. Found TBB_INTERFACE_VERSION = 9107. The TBB threading layer is disabled.
warnings.warn(problem)
In [ ]: