@incollection{icml2020_535,
abstract = {Sorting is an important procedure in computer science. However, the argsort operator - which takes as input a vector and returns its sorting per-mutation - has a discrete image and thus zero gradients almost everywhere. This prohibits end-to-end, gradient-based learning of models that rely on the argsort operator. A natural way to overcome this problem is to replace the argsort operator with a continuous relaxation. Recent work has shown a number of ways to do this. However, the relaxations proposed so far are computationally complex. In this work we propose a simple continuous relaxation for the argsort operator. Unlike previous works, our relaxation is straight-forward: it can be implemented in three lines of code, achieves state-of-the-art performance, is easy to reason about mathematically - substantially simplifying proofs - and is up to six times faster than competing approaches. We open-source the code to reproduce all of the experiments},
author = {Prillo, Sebastian and Eisenschlos, Julian},
booktitle = {Proceedings of Machine Learning and Systems 2020},
pages = {817--826},
title = {SoftSort: A Differantiable Continuous Relaxation of the argsort Operator},
year = {2020}
}