@COMMENT This file was generated by bib2html.pl version 0.90
@COMMENT written by Patrick Riley
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@InProceedings{AAAISSS17-Zhang,
author = {Shiqi Zhang and Jivko Sinapov and Suhua Wei and Peter Stone},
title = {Robot Behavioral Exploration and Multimodal Perception using POMDPs},
booktitle = {Proceedings of 2017 AAAI Spring Symposium on Interactive Multi-Sensory Perception for Embodied Agents},
location = {Stanford, CA},
month = {March},
year = {2017},
abstract = {
Service robots are increasingly present in everyday environments, such as
homes, offices, airports and hospitals. A common task for such robots
involves retrieving an object for a user. Consider the request, "Robot,
please fetch me the red empty bottle." A key problem for the robot
consists of deciding whether a particular candidate object matches the
properties in the query. For certain words (e.g., heavy, soft, etc.)
visual classification of the object is insufficient as the robot would
need to perform an action (e.g., lift the object) to determine whether it
is empty or not. Furthermore, the robot would need to decide which
actions (possibly out of many) to perform on an object, i.e., it would
need to generate a behavioral policy for a given request.
Although multimodal perception and POMDP-based object exploration have been
studied previously, to the best of our knowledge, there is no research that
integrates both in robotics. In this work, given queries about object
properties, we dynamically construct POMDPs using a data set collected from
a real robot. Experiments on exploring new objects show that our
POMDP-based object exploration strategy significantly reduces the overall
cost of exploration actions without hurting accuracy, compared to a
baseline strategy that uses a predefined sequence of actions.
},
wwwnote={Accompanying videos at https://youtu.be/jLHzRXPCi_w},
}