@incollection{gcvae_tall,
doi = {10.48550/ARXIV.2206.04225},
url = {https://arxiv.org/abs/2206.04225},
author = {Ezukwoke, Kenneth and Hoayek, Anis and Batton-Hubert, Mireille and Boucher, Xavier},
keywords = {regular},
title = {Deep Generative Model. Highlight: Generalized-Controllable Variational AutoEncoder (GCVAE)},
publisher = {Seminar at GMI/DSI-LIMOS/MAAD-LIMOS, Mines Saint-Étienne},
year = {2022}
}
Variational AutoEncoders (VAEs) have recently been used for unsupervised disentanglement learning of overlapping mixtures of distributions (e.g Gaussian). Numerous variants exist to encourage disentanglement in hidden (latent) spaces while improving reconstruction quality. However, none have simultaneously managed the trade-off between attaining extremely low reconstruction error and a high disentanglement score. We present a generalized framework to handle this challenge under constrained optimization and demonstrate that it outperforms state-of-the-art existing models as regards disentanglement while balancing reconstruction. We will begins first by introducing you to the formalization of variational autoencoders and their closed form solution followed by a more stable version, Generalized Variational Autoencoder (GCVAE).