@inproceedings{36a645ba90a447c78981dc44c028dd37,
title = "Space objects maneuvering prediction via maximum causal entropy inverse reinforcement learning",
abstract = "Inverse Reinforcement Learning (RL) can be used to determine the behavior of Space Objects (SOs) by estimating the reward function that an SO is using for control. The approach discussed in this work can be used to analyze maneuvering of SOs from observational data. The inverse RL problem is solved using maximum causal entropy. This approach determines the optimal reward function that a SO is using while maneuvering with random disturbances by assuming that the observed trajectories are optimal with respect to the SO{\textquoteright}s own reward function. Lastly, this paper develops results for scenarios involving Low Earth Orbit (LEO) station-keeping and Geostationary Orbit (GEO) station-keeping.",
author = "Bryce Doerr and Richard Linares and Roberto Furfaro",
note = "Funding Information: This research was supported by an appointment to the Intelligence Community Postdoctoral Research Fellowship Program at Massachusetts Institute of Technology, administered by Oak Ridge Institute for Science and Education through an interagency agreement between the U.S. Department of Energy and the Office of the Director of National Intelligence. Publisher Copyright: {\textcopyright} 2020, American Institute of Aeronautics and Astronautics Inc, AIAA. All rights reserved.; AIAA Scitech Forum, 2020 ; Conference date: 06-01-2020 Through 10-01-2020",
year = "2020",
doi = "10.2514/6.2020-0235",
language = "English (US)",
isbn = "9781624105951",
series = "AIAA Scitech 2020 Forum",
publisher = "American Institute of Aeronautics and Astronautics Inc, AIAA",
booktitle = "AIAA Scitech 2020 Forum",
}