@COMMENT This file was generated by bib2html.pl <http://www.cs.cmu.edu/~pfr/misc_software/index.html#bib2html> version 0.90
@COMMENT written by Patrick Riley <http://www.cs.cmu.edu/~pfr>
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@InProceedings{brad_knox_AAAI2024,
  author   = {W. Bradley Knox and Stephane Hatgis-Kessell and Sigurdur Orn Adalgeirsson and Serena Booth and Anca Dragan and Peter Stone and Scott Niekum},
  title    = {Learning Optimal Advantage from Preferences and Mistaking it for Reward},
  booktitle = {The 38th Annual AAAI Conference on Artificial Intelligence (AAAI)},
  year     = {2024},
  month    = {February},
  location = {Vancouver, Canada},
  abstract = {We consider algorithms for learning reward functions from human preferences over
pairs of trajectory segments, as used in reinforcement learning from human
feedback (RLHF). Most recent work assumes that human preferences are generated
based only upon the reward accrued within those segments, or their partial
return. Recent work casts doubt on the validity of this assumption, proposing an
alternative preference model based upon regret. We investigate the consequences
of assuming preferences are based upon partial return when they actually arise
from regret. We argue that the learned function is an approximation of the
optimal advantage function, not a reward function. We find that if a specific
pitfall is addressed, this incorrect assumption is not particularly harmful,
resulting in a highly shaped reward function. Nonetheless, this incorrect usage
of the approximation of the optimal advantage function is less desirable than the
appropriate and simpler approach of greedy maximization of it. From the
perspective of the regret preference model, we also provide a clearer
interpretation of fine tuning contemporary large language models with RLHF. This
paper overall provides insight regarding why learning under the partial return
preference model tends to work so well in practice, despite it conforming poorly
to how humans give preferences.
  },
}
