@COMMENT This file was generated by bib2html.pl <http://www.cs.cmu.edu/~pfr/misc_software/index.html#bib2html> version 0.90
@COMMENT written by Patrick Riley <http://www.cs.cmu.edu/~pfr>
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@article{NatureComm18,
  AUTHOR = {Decebal Constantin Mocanu and Elena Mocanu and Peter Stone and Phuong H.\ Nguyen and Madeleine Gibescu and Antonio Liotta},
  TITLE = {Scalable Training of Artificial Neural Networks with Adaptive Sparse Connectivity Inspired by Network Science},
  JOURNAL={Nature Communications},
  YEAR={2018},
  volume=9,
  number=2383,
  month={June},
  DOI={10.1038/s41467-018-04316-3},
  abstract = {
     Through the success of deep learning in various domains, artificial
     neural networks are currently among the most used artificial
     intelligence methods. Taking inspiration from the network
     properties of biological neural networks (e.g. sparsity,
     scale-freeness), we argue that (contrary to general practice)
     artificial neural networks, too, should not have fully-connected
     layers. Here we propose sparse evolutionary training of artificial
     neural networks, an algorithm which evolves an initial sparse
     topology (Erdos-Renyi random graph) of two consecutive layers of
     neurons into a scale-free topology, during learning. Our method
     replaces artificial neural networks fully-connected layers with
     sparse ones before training, reducing quadratically the number of
     parameters, with no decrease in accuracy. We demonstrate our claims
     on restricted Boltzmann machines, multi-layer perceptrons, and
     convolutional neural networks for unsupervised and supervised
     learning on 15 datasets. Our approach has the potential to enable
     artificial neural networks to scale up beyond what is currently
     possible.},
  wwwnote={Official version from <a href="https://www.nature.com/articles/s41467-018-04316-3.pdf">Publisher's Webpage</a>.},
}
