@inproceedings{RichterBerner, author = {Richter, Lorenz and Berner, Julius}, title = {Robust SDE-Based Variational Formulations for Solving Linear PDEs via Deep Learning}, series = {Proceedings of the 39th International Conference on Machine Learning, PMLR}, volume = {162}, booktitle = {Proceedings of the 39th International Conference on Machine Learning, PMLR}, pages = {18649 -- 18666}, abstract = {The combination of Monte Carlo methods and deep learning has recently led to efficient algorithms for solving partial differential equations (PDEs) in high dimensions. Related learning problems are often stated as variational formulations based on associated stochastic differential equations (SDEs), which allow the minimization of corresponding losses using gradient-based optimization methods. In respective numerical implementations it is therefore crucial to rely on adequate gradient estimators that exhibit low variance in order to reach convergence accurately and swiftly. In this article, we rigorously investigate corresponding numerical aspects that appear in the context of linear Kolmogorov PDEs. In particular, we systematically compare existing deep learning approaches and provide theoretical explanations for their performances. Subsequently, we suggest novel methods that can be shown to be more robust both theoretically and numerically, leading to substantial performance improvements.}, language = {en} } @inproceedings{BernerRichterUllrich, author = {Berner, Julius and Richter, Lorenz and Ullrich, Karen}, title = {An optimal control perspective on diffusion-based generative modeling}, series = {NeurIPS 2022 Workshop on Score-Based Methods}, booktitle = {NeurIPS 2022 Workshop on Score-Based Methods}, abstract = {We establish a connection between stochastic optimal control and generative models based on stochastic differential equations (SDEs) such as recently developed diffusion probabilistic models. In particular, we derive a Hamilton-Jacobi-Bellman equation that governs the evolution of the log-densities of the underlying SDE marginals. This perspective allows to transfer methods from optimal control theory to generative modeling. First, we show that the evidence lower bound is a direct consequence of the well-known verification theorem from control theory. Further, we develop a novel diffusion-based method for sampling from unnormalized densities -- a problem frequently occurring in statistics and computational sciences.}, language = {en} } @inproceedings{ZinkEkteraiMartinetal., author = {Zink, Christof and Ekterai, Michael and Martin, Dominik and Clemens, William and Maennel, Angela and Mundinger, Konrad and Richter, Lorenz and Crump, Paul and Knigge, Andrea}, title = {Deep-learning-based visual inspection of facets and p-sides for efficient quality control of diode lasers}, series = {High-Power Diode Laser Technology XXI}, volume = {12403}, booktitle = {High-Power Diode Laser Technology XXI}, publisher = {SPIE}, doi = {10.1117/12.2648691}, pages = {94 -- 112}, abstract = {The optical inspection of the surfaces of diode lasers, especially the p-sides and facets, is an essential part of the quality control in the laser fabrication procedure. With reliable, fast, and flexible optical inspection processes, it is possible to identify and eliminate defects, accelerate device selection, reduce production costs, and shorten the cycle time for product development. Due to a vast range of rapidly changing designs, structures, and coatings, however, it is impossible to realize a practical inspection with conventional software. In this work, we therefore suggest a deep learning based defect detection algorithm that builds on a Faster Regional Convolutional Neural Network (Faster R-CNN) as a core component. While for related, more general object detection problems, the application of such models is straightforward, it turns out that our task exhibits some additional challenges. On the one hand, a sophisticated pre- and postprocessing of the data has to be deployed to make the application of the deep learning model feasible. On the other hand, we find that creating labeled training data is not a trivial task in our scenario, and one has to be extra careful with model evaluation. We can demonstrate in multiple empirical assessments that our algorithm can detect defects in diode lasers accurately and reliably in most cases. We analyze the results of our production-ready pipeline in detail, discuss its limitations and provide some proposals for further improvements.}, language = {en} } @article{WeilandtBehlingGoncalvesetal., author = {Weilandt, Frank and Behling, Robert and Goncalves, Romulo and Madadi, Arash and Richter, Lorenz and Sanona, Tiago and Spengler, Daniel and Welsch, Jona}, title = {Early Crop Classification via Multi-Modal Satellite Data Fusion and Temporal Attention}, series = {Remote Sensing}, volume = {15}, journal = {Remote Sensing}, number = {3}, doi = {10.3390/rs15030799}, pages = {799}, abstract = {In this article, we propose a deep learning-based algorithm for the classification of crop types from Sentinel-1 and Sentinel-2 time series data which is based on the celebrated transformer architecture. Crucially, we enable our algorithm to do early classification, i.e., predict crop types at arbitrary time points early in the year with a single trained model (progressive intra-season classification). Such early season predictions are of practical relevance for instance for yield forecasts or the modeling of agricultural water balances, therefore being important for the public as well as the private sector. Furthermore, we improve the mechanism of combining different data sources for the prediction task, allowing for both optical and radar data as inputs (multi-modal data fusion) without the need for temporal interpolation. We can demonstrate the effectiveness of our approach on an extensive data set from three federal states of Germany reaching an average F1 score of 0.92 using data of a complete growing season to predict the eight most important crop types and an F1 score above 0.8 when doing early classification at least one month before harvest time. In carefully chosen experiments, we can show that our model generalizes well in time and space.}, language = {en} } @article{NueskenRichter, author = {N{\"u}sken, Nikolas and Richter, Lorenz}, title = {Interpolating between BSDEs and PINNs: deep learning for elliptic and parabolic boundary value problems}, series = {Journal of Machine Learning}, volume = {2}, journal = {Journal of Machine Learning}, number = {1}, doi = {0.4208/jml.220416}, pages = {31 -- 64}, abstract = {Solving high-dimensional partial differential equations is a recurrent challenge in economics, science and engineering. In recent years, a great number of computational approaches have been developed, most of them relying on a combination of Monte Carlo sampling and deep learning based approximation. For elliptic and parabolic problems, existing methods can broadly be classified into those resting on reformulations in terms of backward stochastic differential equations (BSDEs) and those aiming to minimize a regression-type L2-error (physics-informed neural networks, PINNs). In this paper, we review the literature and suggest a methodology based on the novel diffusion loss that interpolates between BSDEs and PINNs. Our contribution opens the door towards a unified understanding of numerical approaches for high-dimensional PDEs, as well as for implementations that combine the strengths of BSDEs and PINNs. The diffusion loss furthermore bears close similarities to (least squares) temporal difference objectives found in reinforcement learning. We also discuss eigenvalue problems and perform extensive numerical studies, including calculations of the ground state for nonlinear Schr ¨odinger operators and committor functions relevant in molecular dynamics.}, language = {en} } @article{RichterBernerLiu, author = {Richter, Lorenz and Berner, Julius and Liu, Guan-Horng}, title = {Improved sampling via learned diffusions}, abstract = {Recently, a series of papers proposed deep learning-based approaches to sample from unnormalized target densities using controlled diffusion processes. In this work, we identify these approaches as special cases of the Schr{\"o}dinger bridge problem, seeking the most likely stochastic evolution between a given prior distribution and the specified target. We further generalize this framework by introducing a variational formulation based on divergences between path space measures of time-reversed diffusion processes. This abstract perspective leads to practical losses that can be optimized by gradient-based algorithms and includes previous objectives as special cases. At the same time, it allows us to consider divergences other than the reverse Kullback-Leibler divergence that is known to suffer from mode collapse. In particular, we propose the so-called log-variance loss, which exhibits favorable numerical properties and leads to significantly improved performance across all considered approaches.}, language = {en} } @incollection{HartmannRichter, author = {Hartmann, Carsten and Richter, Lorenz}, title = {Transgressing the Boundaries: Towards a Rigorous Understanding of Deep Learning and Its (Non )Robustness}, series = {AI - Limits and Prospects of Artificial Intelligence}, volume = {4}, booktitle = {AI - Limits and Prospects of Artificial Intelligence}, editor = {Klimczak, Peter and Petersen, Christer}, publisher = {transcript Verlag}, doi = {10.1515/9783839457320}, pages = {43 -- 82}, abstract = {The recent advances in machine learning in various fields of applications can be largely attributed to the rise of deep learning (DL) methods and architectures. Despite being a key technology behind autonomous cars, image processing, speech recognition, etc., a notorious problem remains the lack of theoretical understanding of DL and related interpretability and (adversarial) robustness issues. Understanding the specifics of DL, as compared to, say, other forms of nonlinear regression methods or statistical learning, is interesting from a mathematical perspective, but at the same time it is of crucial importance in practice: treating neural networks as mere black boxes might be sufficient in certain cases, but many applications require waterproof performance guarantees and a deeper understanding of what could go wrong and why it could go wrong. It is probably fair to say that, despite being mathematically well founded as a method to approximate complicated functions, DL is mostly still more like modern alchemy that is firmly in the hands of engineers and computer scientists. Nevertheless, it is evident that certain specifics of DL that could explain its success in applications demands systematic mathematical approaches. In this work, we review robustness issues of DL and particularly bridge concerns and attempts from approximation theory to statistical learning theory. Further, we review Bayesian Deep Learning as a means for uncertainty quantification and rigorous explainability.}, language = {en} } @article{RiberaBorrellQuerRichteretal., author = {Ribera Borrell, Enric and Quer, Jannes and Richter, Lorenz and Sch{\"u}tte, Christof}, title = {Improving control based importance sampling strategies for metastable diffusions via adapted metadynamics}, series = {SIAM Journal on Scientific Computing (SISC)}, journal = {SIAM Journal on Scientific Computing (SISC)}, doi = {10.1137/22M1503464}, pages = {S298 -- S323}, abstract = {Sampling rare events in metastable dynamical systems is often a computationally expensive task and one needs to resort to enhanced sampling methods such as importance sampling. Since we can formulate the problem of finding optimal importance sampling controls as a stochastic optimization problem, this then brings additional numerical challenges and the convergence of corresponding algorithms might as well suffer from metastabilty. In this article we address this issue by combining systematic control approaches with the heuristic adaptive metadynamics method. Crucially, we approximate the importance sampling control by a neural network, which makes the algorithm in principle feasible for high dimensional applications. We can numerically demonstrate in relevant metastable problems that our algorithm is more effective than previous attempts and that only the combination of the two approaches leads to a satisfying convergence and therefore to an efficient sampling in certain metastable settings.}, language = {en} } @misc{RiberaBorrellQuerRichteretal., author = {Ribera Borrell, Enric and Quer, Jannes and Richter, Lorenz and Sch{\"u}tte, Christof}, title = {Improving control based importance sampling strategies for metastable diffusions via adapted metadynamics}, issn = {1438-0064}, abstract = {Sampling rare events in metastable dynamical systems is often a computationally expensive task and one needs to resort to enhanced sampling methods such as importance sampling. Since we can formulate the problem of finding optimal importance sampling controls as a stochastic optimization problem, this then brings additional numerical challenges and the convergence of corresponding algorithms might as well suffer from metastabilty. In this article we address this issue by combining systematic control approaches with the heuristic adaptive metadynamics method. Crucially, we approximate the importance sampling control by a neural network, which makes the algorithm in principle feasible for high dimensional applications. We can numerically demonstrate in relevant metastable problems that our algorithm is more effective than previous attempts and that only the combination of the two approaches leads to a satisfying convergence and therefore to an efficient sampling in certain metastable settings.}, language = {en} } @article{HartmannRichter, author = {Hartmann, Carsten and Richter, Lorenz}, title = {Nonasymptotic bounds for suboptimal importance sampling}, language = {en} }