@inproceedings{ChaukairSchuetteSunkara, author = {Chaukair, Mustafa and Sch{\"u}tte, Christof and Sunkara, Vikram}, title = {On the Activation Space of ReLU Equipped Deep Neural Networks}, series = {Procedia Computer Science}, volume = {222}, booktitle = {Procedia Computer Science}, doi = {10.1016/j.procs.2023.08.200}, pages = {624 -- 635}, abstract = {Modern Deep Neural Networks are getting wider and deeper in their architecture design. However, with an increasing number of parameters the decision mechanisms becomes more opaque. Therefore, there is a need for understanding the structures arising in the hidden layers of deep neural networks. In this work, we present a new mathematical framework for describing the canonical polyhedral decomposition in the input space, and in addition, we introduce the notions of collapsing- and preserving patches, pertinent to understanding the forward map and the activation space they induce. The activation space can be seen as the output of a layer and, in the particular case of ReLU activations, we prove that this output has the structure of a polyhedral complex.}, language = {en} }