@inproceedings{34442, keywords = {Control systems, Smart buildings, Distributed Energy Resources (DER), Reinforcement learning}, author = {Anand Prakash and Samir Touzani and Mariam Kiran and Shreya Agarwal and Marco Pritoni and Jessica Granderson}, title = {Deep Reinforcement Learning in Buildings: Implicit Assumptions and their Impact}, abstract = {
As deep reinforcement learning (DRL) continues to gain interest in the smart building research community, there is a transition from simulation-based evaluations to deploying DRL control strategies in actual buildings. While the efficacy of a solution could depend on a particular implementation, there are common obstacles that developers have to overcome to deliver an effective controller. Additionally, a deployment in a physical building can invalidate some of the assumptions made during the controller development. Assumptions on the sensor placement or on the equipment behavior can quickly come undone. This paper presents some of the significant assumptions made during the development of DRL based controllers that could affect their operations in a physical building. Furthermore, a preliminary evaluation revealed that controllers developed with some of these assumptions can incur twice the expected costs when they are deployed in a building.
}, year = {2020}, journal = {RLEM'20: Proceedings of the 1st International Workshop on Reinforcement Learning for Energy Management in Buildings & Cities}, month = {17/11/2020}, url = {https://dl.acm.org/doi/10.1145/3427773.3427868}, doi = {10.1145/3427773.3427868}, language = {eng}, }