@COMMENT This file was generated by bib2html.pl <http://www.cs.cmu.edu/~pfr/misc_software/index.html#bib2html> version 0.90
@COMMENT written by Patrick Riley <http://www.cs.cmu.edu/~pfr>
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@InProceedings{liu_zhu_NeurIPS2023,
  author   = {Bo Liu and Yifeng Zhu and Chongkai Gao and Yihao Feng and Qiang Liu and Yuke Zhu and Peter Stone},
  title    = {LIBERO: Benchmarking Knowledge Transfer in Lifelong Robot Learning},
  booktitle = {37th Conference on Neural Information Processing Systems (NeurIPS 2023) Track on Datasets and Benchmarks},
  year     = {2023},
  month    = {December},
  location = {New Orleans, United States},
  abstract = {Lifelong learning offers a promising paradigm of building a generalist agent that
learns and adapts over its lifespan. Unlike traditional lifelong learning
problems in image and text domains, which primarily involve the transfer of
declarative knowledge of entities and concepts, lifelong learning in
decision-making (LLDM) also necessitates the transfer of procedural knowledge,
such as actions and behaviors. To advance research in LLDM, we introduce LIBERO,
a novel benchmark of lifelong learning for robot manipulation. Specifically,
LIBERO highlights five key research topics in LLDM: 1) how to efficiently
transfer declarative knowledge, procedural knowledge, or the mixture of both; 2)
how to design effective policy architectures and 3) effective algorithms for
LLDM; 4) the robustness of a lifelong learner with respect to task ordering; and
5) the effect of model pretraining for LLDM. We develop an extendible procedural
generation pipeline that can in principle generate infinitely many tasks. For
benchmarking purpose, we create four task suites (130 tasks in total) that we use
to investigate the above-mentioned research topics. To support sample-efficient
learning, we provide high-quality human-teleoperated demonstration data for all
tasks. Our extensive experiments present several insightful or even unexpected
discoveries: sequential fine-tuning outperforms existing lifelong learning
methods in forward transfer, no single visual encoder architecture excels at all
types of knowledge transfer, and naive supervised pretraining can hinder agents’
performance in the subsequent LLDM. 
  },
}
