@article{fernandez_determining_2021, abstract = {Artificial Neural Networks ({ANNs}) are widely used for approximating complex functions. The process that is usually followed to define the most appropriate architecture for an {ANN} given a specific function is mostly empirical. Once this architecture has been defined, weights are usually optimized according to the error function. On the other hand, we observe that {ANNs} can be represented as graphs and their topological 'fingerprints' can be obtained using Persistent Homology ({PH}). In this paper, we describe a proposal focused on designing more principled architecture search procedures. To do this, different architectures for solving problems related to a heterogeneous set of datasets have been analyzed. The results of the evaluation corroborate that {PH} effectively characterizes the {ANN} invariants: when {ANN} density (layers and neurons) or sample feeding order is the only difference, {PH} topological invariants appear; in the opposite direction in different sub-problems (i.e. different labels), {PH} varies. This approach based on topological analysis helps towards the goal of designing more principled architecture search procedures and having a better understanding of {ANNs}.}, author = {Fernández, David Pérez and Gutiérrez-Fandiño, Asier and Armengol-Estapé, Jordi and Villegas, Marta}, date = {2021-01-19}, eprint = {2101.07752}, eprinttype = {arxiv}, journaltitle = {{arXiv}:2101.07752 [cs, math]}, keywords = {1 - Machine learning, 1 - Neural network, 2 - Persistence diagrams, 2 - Persistence landscape, 2 - Persistent homology, 3 - Images, 3 - Networks, 3 - Textual document}, title = {Determining Structural Properties of Artificial Neural Networks Using Algebraic Topology}, url = {http://arxiv.org/abs/2101.07752}, urldate = {2021-01-20} }