@incollection{icml2020_1,
abstract = {It has been widely assumed that a neural network cannot be recovered from its outputs, as the network depends on its parameters in a highly nonlinear way. Here, we prove that in fact it is often possible to identify the architecture, weights, and biases of an unknown deep ReLU network by observing only its output. Every ReLU network defines a piecewise linear function, where the boundaries between linear regions correspond to inputs for which some neuron in the network switches between inactive and active ReLU states. By dissecting the set of region boundaries into components associated with particular neurons, we show both theoretically and empirically that it is possible to recover the weights of neurons and their arrangement within the network, up to isomorphism.},
author = {Rolnick, David and Kording, Konrad},
booktitle = {Proceedings of Machine Learning and Systems 2020},
pages = {1--10},
title = {Reverse-engineering deep ReLU networks},
year = {2020}
}