@incollection{icml2020_3761,
abstract = {Typical architectures of Generative Adversarial Networks make use of a unimodal latent/input distribution transformed by a continuous generator. Consequently, the modeled distribution always has connected support which is cumbersome when learning a disconnected set of manifolds. We formalize this problem by establishing a "no free lunch" theorem for the disconnected manifold learning stating an upper-bound on the precision of the targeted distribution. This is done by building on the necessary existence of a low-quality region where the generator continuously samples data between two disconnected modes. Finally, we derive a rejection sampling method based on the norm of generatorâ€™s Jacobian and show its efficiency on several generators including BigGAN.},
author = {Tanielian, Ugo and Issenhuth, Thibaut and Dohmatob, Elvis and Mary, Jeremie},
booktitle = {Proceedings of Machine Learning and Systems 2020},
pages = {6767--6776},
title = {Learning disconnected manifolds: a no GAN\textquotesingle s land},
year = {2020}
}