@COMMENT This file was generated by bib2html.pl version 0.90
@COMMENT written by Patrick Riley
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@InProceedings{ICML08-jong,
author="Nicholas K.\ Jong and Peter Stone",
title="Hierarchical Model-Based Reinforcement Learning: {Rmax} + {MAXQ}",
booktitle="Proceedings of the Twenty-Fifth International Conference on Machine Learning",
month="July",year="2008",
abstract={ Hierarchical decomposition promises to help scale
reinforcement learning algorithms naturally to
real-world problems by exploiting their underlying
structure. Model-based algorithms, which provided the
first finite-time convergence guarantees for
reinforcement learning, may also play an important
role in coping with the relative scarcity of data in
large environments. In this paper, we introduce an
algorithm that fully integrates modern hierarchical
and model-learning methods in the standard
reinforcement learning setting. Our algorithm,
\textsc{R-maxq}, inherits the efficient model-based
exploration of the \textsc{R-max} algorithm and the
opportunities for abstraction provided by the MAXQ
framework. We analyze the sample complexity of our
algorithm, and our experiments in a standard
simulation environment illustrate the advantages of
combining hierarchies and models. },
wwwnote={ICML 2008},
}