@article{BachseitzSheryarSchmittetal.2024, author = {Bachseitz, Michael and Sheryar, Muhammad and Schmitt, David and Summ, Thorsten and Trinkl, Christoph and Z{\"o}rner, Wilfried}, title = {PV-Optimized Heat Pump Control in Multi-Family Buildings Using a Reinforcement Learning Approach}, volume = {17}, pages = {1908}, journal = {Energies}, number = {8}, publisher = {MDPI}, address = {Basel}, issn = {1996-1073}, doi = {https://doi.org/10.3390/en17081908}, year = {2024}, abstract = {For the energy transition in the residential sector, heat pumps are a core technology for decarbonizing thermal energy production for space heating and domestic hot water. Electricity generation from on-site photovoltaic (PV) systems can also contribute to a carbon-neutral building stock. However, both will increase the stress on the electricity grid. This can be reduced by using appropriate control strategies to match electricity consumption and production. In recent years, artificial intelligence-based approaches such as reinforcement learning (RL) have become increasingly popular for energy-system management. However, the literature shows a lack of investigation of RL-based controllers for multi-family building energy systems, including an air source heat pump, thermal storage, and a PV system, although this is a common system configuration. Therefore, in this study, a model of such an energy system and RL-based controllers were developed and simulated with physical models and compared with conventional rule-based approaches. Four RL algorithms were investigated for two objectives, and finally, the soft actor-critic algorithm was selected for the annual simulations. The first objective, to maintain only the required temperatures in the thermal storage, could be achieved by the developed RL agent. However, the second objective, to additionally improve the PV self-consumption, was better achieved by the rule-based controller. Therefore, further research on the reward function, hyperparameters, and advanced methods, including long short-term memory layers, as well as a training for longer time periods than six days are suggested.}, language = {en} } @thesis{Sheryar2024, author = {Sheryar, Muhammad}, title = {Reinforcement learning for building energy system control in multi-family buildings}, publisher = {Technische Hochschule Ingolstadt}, address = {Ingolstadt}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:573-44608}, pages = {viii, 42}, year = {2024}, abstract = {The demand for heat energy is increasing worldwide and to achieve net zero carbon emissions targets, more innovation is needed for heat production. Heat pumps are considered a potential replacement for boilers and are currently in high demand. The next approach is to optimize the use of heat pumps with household PV production to avoid grid overloading due to running increased demand by the heat pump. In this work, a Reinforcement Learning algorithm is used in the MATLAB RL toolbox with an energy-building model built in MATLAB Simulink Carnot. The energy building model uses a heat pump to charge thermal storage, and a PV system is considered with a typical ON/OFF strategy. This work shows how the RL toolbox has the potential to interact with this energy-building model to optimize the heat pump with a PV system. All suggested agents by the MATLAB RL toolbox are investigated with this building energy model (BEM), and annual simulation is performed with a well-trained agent, which converges during training. Two different models have been developed for heat pump control. The first model is called the RL-based Heat Pump Controller, which is designed to meet thermal targets only. The second model is called the PV- optimized RL-based Heat Pump Controller, which not only meets thermal targets but also considers the operation of the PV system with the heat pump. The simulation results show that using the RL toolbox, the RL-based Heat Pump Controller model has performed excellently. In the PV-optimized RL-based Heat Pump Controller model, there is almost a 4.37\% increase in PV self-consumption compared to the typical control strategy, resulting in annual electricity savings of almost 3.52 MWh. Some challenges of using the RL toolbox are also highlighted with future recommendations, which mainly include computational efforts.}, language = {en} }