@COMMENT This file was generated by bib2html.pl <http://www.cs.cmu.edu/~pfr/misc_software/index.html#bib2html> version 0.90
@COMMENT written by Patrick Riley <http://www.cs.cmu.edu/~pfr>
@COMMENT This file came from Peter Stone's publication pages at
@COMMENT http://www.cs.utexas.edu/~pstone/papers
@InProceedings{AAMAS2018-eladlieb,
  author = {Elad Liebman and Eric Zavesky and Peter Stone},
  title = {{A} {S}titch in {T}ime - {A}utonomous {M}odel {M}anagement via {R}einforcement {L}earning},
  booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
  location = {Stockholm, Sweden},
  month = {July},
  year = {2018},
  abstract = {
              Concept drift - a change, either sudden or gradual, in
              the underlying properties of data - is one of the most
              prevalent challenges to maintaining high-performing
              learned models over time in autonomous systems.  In the
              face of concept drift, one can hope that the old model
              is sufficiently representative of the new data despite
              the concept drift, one can discard the old data and
              retrain a new model with (often limited) new data, or
              one can use transfer learning methods to combine the old
              data with the new to create an updated model.  Which of
              these three options is chosen affects not only near-term
              decisions, but also future needs to transfer or retrain.
              In this paper, we thus model response to concept drift
              as a sequential decision making problem and formally
              frame it as a Markov Decision Process.  Our
              reinforcement learning approach to the problem shows
              promising results on one synthetic and two real-world
              datasets.
  },
}
