@incollection{icml2020_2264,
abstract = {A common workflow in data exploration is to learn a low-dimensional representation of the data, identify groups of points in that representation, and examine the differences between the groups to determine what they represent. We treat this as an interpretable machine learning problem by leveraging the model that learned the low-dimensional representation to help identify the key differences between the groups. To solve this problem, we introduce a new type of explanation, a Global Counterfactual Explanation (GCE), and our algorithm, Transitive Global Translations (TGT), for computing GCEs. TGT identifies the differences between each pair of groups using compressed sensing but constrains those pairwise differences to be consistent among all of the groups. Empirically, we demonstrate that TGT is able to identify explanations that accurately explain the model while being relatively sparse, and that these explanations match real patterns in the data. },
author = {Plumb, Gregory and Terhorst, Jonathan and Sankararaman, Sriram and Talwalkar, Ameet},
booktitle = {Proceedings of Machine Learning and Systems 2020},
pages = {3818--3827},
title = {Explaining Groups of Points in Low-Dimensional Representations},
year = {2020}
}