@COMMENT This file was generated by bib2html.pl <http://www.cs.cmu.edu/~pfr/misc_software/index.html#bib2html> version 0.90
@COMMENT written by Patrick Riley <http://www.cs.cmu.edu/~pfr>
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@InProceedings{ziping_xu_ICLR2024,
  author   = {Ziping Xu and Zifan Xu and Runxuan Jiang and Peter Stone and Ambuj Tewari},
  title    = {Sample Efficient Myopic Exploration Through Multitask Reinforcement Learning with Diverse Tasks},
  booktitle = {International Conference on Learning Representations (ICLR)},
  year     = {2024},
  month    = {May},
  location = {Vienna, Austria},
  abstract = {Multitask Reinforcement Learning (MTRL) approaches have gained increasing
attention for its wide applications in many important Reinforcement Learning (RL)
tasks. However, while recent advancements in MTRL theory have focused on the
improved statistical efficiency by assuming a shared structure across tasks,
exploration--a crucial aspect of RL--has been largely overlooked. This paper
addresses this gap by showing that when an agent is trained on a sufficiently
diverse set of tasks, a generic policy-sharing algorithm with myopic exploration
design like epsilon-greedy that are inefficient in general can be
sample-efficient for MTRL. To the best of our knowledge, this is the first
theoretical demonstration of the "exploration benefits" of MTRL. It may also shed
light on the enigmatic success of the wide applications of myopic exploration in
practice. To validate the role of diversity, we conduct experiments on synthetic
robotic control environments, where the diverse task set aligns with the task
selection by automatic curriculum learning, which is empirically shown to improve
sample-efficiency.
  },
}
