@COMMENT This file was generated by bib2html.pl <http://www.cs.cmu.edu/~pfr/misc_software/index.html#bib2html> version 0.90
@COMMENT written by Patrick Riley <http://www.cs.cmu.edu/~pfr>
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@InProceedings{viraj_joshi_rlc2025,
  author   = {Viraj Joshi and Zifan Xu and Bo Liu and Peter Stone and Amy Zhang},
  title    = {Benchmarking Massively Parallelized Multi-Task Reinforcement Learning for Robotics Tasks},
  booktitle = {Reinforcement Learning Conference (RLC)},
  year     = {2025},
  month    = {August},
  location = {Edmonton, Canada},
  abstract = {Multi-task Reinforcement Learning (MTRL) has emerged as a critical training
paradigm for applying reinforcement learning (RL) to a set of complex real-world
robotic tasks, which demands a generalizable and robust policy. At the same time,
massively parallelized training has gained popularity, not only for significantly
accelerating data collection through GPU-accelerated simulation but also for
enabling diverse data collection across multiple tasks by simulating
heterogeneous scenes in parallel. However, existing MTRL research has largely
been limited to off-policy methods like SAC in the low-parallelization regime.
MTRL could capitalize on the higher asymptotic performance of on-policy
algorithms, whose batches require data from the current policy, and as a result,
take advantage of massive parallelization offered by GPU-accelerated simulation.
To bridge this gap, we introduce a massively parallelized Multi-Task Benchmark
for robotics (MTBench), an open-sourced benchmark featuring a broad distribution
of 50 manipulation tasks and 20 locomotion tasks, implemented using the
GPU-accelerated simulator IsaacGym. MTBench also includes four base RL algorithms
combined with seven state-of-the-art MTRL algorithms and architectures, providing
a unified framework for evaluating their performance. Our extensive experiments
highlight the superior speed of evaluating MTRL approaches using MTBench, while
also uncovering unique challenges that arise from combining massive parallelism
with MTRL. Code is available at https://github.com/Viraj-Joshi/MTBench.
  },
}
