@COMMENT This file was generated by bib2html.pl <https://sourceforge.net/projects/bib2html/> version 0.94
@COMMENT written by Patrick Riley <http://sourceforge.net/users/patstg/>
@COMMENT This file came from UT Austin Villa's publication pages at
@COMMENT http://www.cs.utexas.edu/~sbarrett/publications/?p=papers
@InProceedings{ICLR16-hausknecht,
  author = {Matthew Hausknecht and Peter Stone},
  title = {Deep Reinforcement Learning in Parameterized Action Space},
  booktitle = {Proceedings of the International Conference on Learning Representations (ICLR)},
  location = {San Juan, Puerto Rico},
  month = {May},
  year = {2016},
  abstract = {
Recent work has shown that deep neural networks are capable of
approximating both value functions and policies in reinforcement
learning domains featuring continuous state and action
spaces. However, to the best of our knowledge no previous work has
succeeded at using deep neural networks in structured (parameterized)
continuous action spaces. To fill this gap, this paper focuses on
learning within the domain of simulated RoboCup soccer, which features
a small set of discrete action types, each of which is parameterized
with continuous variables. The best learned agents can score goals
more reliably than the 2012 RoboCup champion agent. As such, this
paper represents a successful extension of deep reinforcement learning
to the class of parameterized action space MDPs.
  },
}
