@COMMENT This file was generated by bib2html.pl <http://www.cs.cmu.edu/~pfr/misc_software/index.html#bib2html> version 0.90
@COMMENT written by Patrick Riley <http://www.cs.cmu.edu/~pfr>
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@InProceedings{ASIMOV2021-REUTH,
  author = {Reuth Mirsky and Peter Stone},
  title = {Intelligent Disobedience and AI Rebel Agents in Assistive Robotics},
  booktitle = {ICSR workshop on Adaptive Social Interaction and MOVement for assistive and rehabilitation robotics (ASIMOV)},
  location = {Virtual},
  month = {November},
  year = {2021},
  abstract = {
With the increasing integration of service robots into assistive technologies,
 there is a need to reason about the boundaries and scope of these robots'
 autonomy, such as when they should merely react to their environment, when
 they should make proactive decisions, and when they should override commands.
 In most existing research, the definition of a ``good'' assistive robot is
 one that is compliant with respect to the commands it is given. Two recent
 papers challenge this perspective, and describe scenarios where a system
 might choose to rebel against a command or disobey its handler due to a deep
 understanding of the handler's intentions. This paper provides a comparative
 discussion about these two papers and how they together create a more
 comprehensive framework for assistive robots that can override commands.
  },
}
