@COMMENT This file was generated by bib2html.pl <http://www.cs.cmu.edu/~pfr/misc_software/index.html#bib2html> version 0.90
@COMMENT written by Patrick Riley <http://www.cs.cmu.edu/~pfr>
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@InProceedings{AAAI18-jesse,
  author = {Jesse Thomason and Jivko Sinapov and Raymond J. Mooney and Peter Stone},
  title = {Guiding Exploratory Behaviors for Multi-Modal Grounding of Linguistic Descriptions},
  booktitle = {Proceedings of the 32nd Conference on Artificial Intelligence (AAAI)},
  location = {New Orleans, LA},
  month = {February},
  year = {2018},
  abstract = {
A major goal of grounded language learning research is to enable robots to 
connect language predicates to a robot's physical interactive perception of
 the world.
Coupling object exploratory behaviors such as grasping, lifting, and looking
 with multiple sensory modalities (e.g., audio, haptics, and vision) enables a 
 robot to ground non-visual words like ``heavy'' as well as visual words like 
 ``red''.
A major limitation of existing approaches to multi-modal language grounding is 
that a robot has to exhaustively explore training objects with a variety of 
 actions when learning a new such language predicate.
This paper proposes a method for guiding a robot's behavioral exploration
 policy when learning a novel predicate based on known grounded predicates and the novel predicate's linguistic relationship to them.
We demonstrate our approach on two datasets in which a robot explored large
 sets of objects and was tasked with learning to recognize whether novel words 
 applied to those objects. 
  },
}
