@article{HelfmannRiberaBorrellSchuetteetal.2020, author = {Helfmann, Luzie and Ribera Borrell, Enric and Sch{\"u}tte, Christof and Koltai, Peter}, title = {Extending Transition Path Theory: Periodically Driven and Finite-Time Dynamics}, volume = {30}, journal = {Journal of Nonlinear Science}, doi = {https://doi.org/10.1007/s00332-020-09652-7}, pages = {3321 -- 3366}, year = {2020}, language = {en} } @misc{RiberaBorrell2019, type = {Master Thesis}, author = {Ribera Borrell, Enric}, title = {From ergodic infinite-time to finite-time transition path theory}, pages = {62}, year = {2019}, language = {en} } @inproceedings{RiberaBorrellRichterSchuette2025, author = {Ribera Borrell, Enric and Richter, Lorenz and Sch{\"u}tte, Christof}, title = {Reinforcement Learning with Random Time Horizons}, volume = {267}, booktitle = {Proceedings of the 42nd International Conference on Machine Learning}, arxiv = {http://arxiv.org/abs/2506.00962}, pages = {5101 -- 5123}, year = {2025}, language = {en} } @misc{RiberaBorrellQuerRichteretal.2021, author = {Ribera Borrell, Enric and Quer, Jannes and Richter, Lorenz and Sch{\"u}tte, Christof}, title = {Improving control based importance sampling strategies for metastable diffusions via adapted metadynamics}, issn = {1438-0064}, year = {2021}, abstract = {Sampling rare events in metastable dynamical systems is often a computationally expensive task and one needs to resort to enhanced sampling methods such as importance sampling. Since we can formulate the problem of finding optimal importance sampling controls as a stochastic optimization problem, this then brings additional numerical challenges and the convergence of corresponding algorithms might as well suffer from metastabilty. In this article we address this issue by combining systematic control approaches with the heuristic adaptive metadynamics method. Crucially, we approximate the importance sampling control by a neural network, which makes the algorithm in principle feasible for high dimensional applications. We can numerically demonstrate in relevant metastable problems that our algorithm is more effective than previous attempts and that only the combination of the two approaches leads to a satisfying convergence and therefore to an efficient sampling in certain metastable settings.}, language = {en} } @article{RiberaBorrellQuerRichteretal.2023, author = {Ribera Borrell, Enric and Quer, Jannes and Richter, Lorenz and Sch{\"u}tte, Christof}, title = {Improving control based importance sampling strategies for metastable diffusions via adapted metadynamics}, volume = {89}, journal = {SIAM Journal on Scientific Computing (SISC)}, number = {1}, doi = {10.1137/22M1503464}, year = {2023}, abstract = {Sampling rare events in metastable dynamical systems is often a computationally expensive task and one needs to resort to enhanced sampling methods such as importance sampling. Since we can formulate the problem of finding optimal importance sampling controls as a stochastic optimization problem, this then brings additional numerical challenges and the convergence of corresponding algorithms might as well suffer from metastabilty. In this article we address this issue by combining systematic control approaches with the heuristic adaptive metadynamics method. Crucially, we approximate the importance sampling control by a neural network, which makes the algorithm in principle feasible for high dimensional applications. We can numerically demonstrate in relevant metastable problems that our algorithm is more effective than previous attempts and that only the combination of the two approaches leads to a satisfying convergence and therefore to an efficient sampling in certain metastable settings.}, language = {en} } @article{SikorskiRiberaBorrellWeber2024, author = {Sikorski, Alexander and Ribera Borrell, Enric and Weber, Marcus}, title = {Learning Koopman eigenfunctions of stochastic diffusions with optimal importance sampling and ISOKANN}, volume = {65}, journal = {Journal of Mathematical Physics}, arxiv = {http://arxiv.org/abs/2301.00065}, doi = {10.1063/5.0140764}, pages = {013502}, year = {2024}, abstract = {The dominant eigenfunctions of the Koopman operator characterize the metastabilities and slow-timescale dynamics of stochastic diffusion processes. In the context of molecular dynamics and Markov state modeling, they allow for a description of the location and frequencies of rare transitions, which are hard to obtain by direct simulation alone. In this article, we reformulate the eigenproblem in terms of the ISOKANN framework, an iterative algorithm that learns the eigenfunctions by alternating between short burst simulations and a mixture of machine learning and classical numerics, which naturally leads to a proof of convergence. We furthermore show how the intermediate iterates can be used to reduce the sampling variance by importance sampling and optimal control (enhanced sampling), as well as to select locations for further training (adaptive sampling). We demonstrate the usage of our proposed method in experiments, increasing the approximation accuracy by several orders of magnitude.}, language = {en} } @article{QuerRiberaBorrell2024, author = {Quer, Jannes and Ribera Borrell, Enric}, title = {Connecting Stochastic Optimal Control and Reinforcement Learning}, volume = {65}, journal = {Journal of Mathematical Physics}, number = {8}, doi = {10.1063/5.0140665}, year = {2024}, abstract = {In this paper the connection between stochastic optimal control and reinforcement learning is investigated. Our main motivation is to apply importance sampling to sampling rare events which can be reformulated as an optimal control problem. By using a parameterised approach the optimal control problem becomes a stochastic optimization problem which still raises some open questions regarding how to tackle the scalability to high-dimensional problems and how to deal with the intrinsic metastability of the system. To explore new methods we link the optimal control problem to reinforcement learning since both share the same underlying framework, namely a Markov Decision Process (MDP). For the optimal control problem we show how the MDP can be formulated. In addition we discuss how the stochastic optimal control problem can be interpreted in the framework of reinforcement learning. At the end of the article we present the application of two different reinforcement learning algorithms to the optimal control problem and a comparison of the advantages and disadvantages of the two algorithms.}, language = {en} }