@incollection{icml2020_652,
abstract = {Convolutional neural networks are among the most successful architectures in deep learning. This success is at least partially attributable to the efficacy of spatial invariance as an inductive bias. Locally connected layers, which differ from convolutional layers only in their lack of spatial invariance, usually perform poorly in practice. However, these observations still leave open the possibility that some degree of relaxation of spatial invariance may yield a better inductive bias than either convolution or local connectivity. To test this hypothesis, we design a method to relax the spatial invariance of a network layer in a controlled manner. In particular, we create a \textbackslash textit\lbrace low-rank\rbrace locally connected layer, where the kernel applied at each position is constructed as a linear combination of basis kernels with spatially varying combining weights. By varying the number of basis kernels, we can control the degree of relaxation of spatial invariance. In our experiments, we find that relaxing spatial invariance improves classification accuracy over both convolution and locally connected layers across MNIST, CIFAR-10, and CelebA datasets. These results suggest that spatial invariance may be an overly restrictive prior.},
author = {Elsayed, Gamaleldin and Ramachandran, Prajit and Shlens, Jon and Kornblith, Simon},
booktitle = {Proceedings of Machine Learning and Systems 2020},
pages = {1011--1022},
title = {Revisiting Spatial Invariance with Low-Rank Local Connectivity},
year = {2020}
}