@inproceedings{SteyerCheginiPotseetal., author = {Steyer, Joshua and Chegini, Fatemeh and Potse, Mark and Loewe, Axel and Weiser, Martin}, title = {Continuity of Microscopic Cardiac Conduction in a Computational Cell-by-Cell Model}, series = {2023 Computing in Cardiology Conference (CinC)}, volume = {50}, booktitle = {2023 Computing in Cardiology Conference (CinC)}, publisher = {Computing in Cardiology}, issn = {2325-887X}, doi = {10.22489/CinC.2023.385}, abstract = {Conduction velocity in cardiac tissue is a crucial electrophysiological parameter for arrhythmia vulnerability. Pathologically reduced conduction velocity facilitates arrhythmogenesis because such conduction velocities decrease the wavelength with which re-entry may occur. Computational studies on CV and how it changes regionally in models at spatial scales multiple times larger than actual cardiac cells exist. However, microscopic conduction within cells and between them have been studied less in simulations. In this work, we study the relation of microscopic conduction patterns and clinically observable macroscopic conduction using an extracellular-membrane-intracellular model which represents cardiac tissue with these subdomains at subcellular resolution. By considering cell arrangement and non-uniform gap junction distribution, it yields anisotropic excitation propagation. This novel kind of model can for example be used to understand how discontinuous conduction on the microscopic level affects fractionation of electrograms in healthy and fibrotic tissue. Along the membrane of a cell, we observed a continuously propagating activation wavefront. When transitioning from one cell to the neighbouring one, jumps in local activation times occurred, which led to lower global conduction velocities than locally within each cell.}, language = {en} } @article{NiemannUramWolfetal., author = {Niemann, Jan-Hendrik and Uram, Samuel and Wolf, Sarah and Conrad, Natasa Djurdjevac and Weiser, Martin}, title = {Multilevel Optimization for Policy Design with Agent-Based Epidemic Models}, series = {Computational Science}, volume = {77}, journal = {Computational Science}, doi = {10.1016/j.jocs.2024.102242}, pages = {102242}, abstract = {Epidemiological models can not only be used to forecast the course of a pandemic like COVID-19, but also to propose and design non-pharmaceutical interventions such as school and work closing. In general, the design of optimal policies leads to nonlinear optimization problems that can be solved by numerical algorithms. Epidemiological models come in different complexities, ranging from systems of simple ordinary differential equations (ODEs) to complex agent-based models (ABMs). The former allow a fast and straightforward optimization, but are limited in accuracy, detail, and parameterization, while the latter can resolve spreading processes in detail, but are extremely expensive to optimize. We consider policy optimization in a prototypical situation modeled as both ODE and ABM, review numerical optimization approaches, and propose a heterogeneous multilevel approach based on combining a fine-resolution ABM and a coarse ODE model. Numerical experiments, in particular with respect to convergence speed, are given for illustrative examples.}, language = {en} } @article{SemlerWeiser, author = {Semler, Phillip and Weiser, Martin}, title = {Adaptive Gaussian Process Regression for Efficient Building of Surrogate Models in Inverse Problems}, series = {Inverse Problems}, volume = {39}, journal = {Inverse Problems}, number = {12}, doi = {10.1088/1361-6420/ad0028}, pages = {125003}, abstract = {In a task where many similar inverse problems must be solved, evaluating costly simulations is impractical. Therefore, replacing the model y with a surrogate model y(s) that can be evaluated quickly leads to a significant speedup. The approximation quality of the surrogate model depends strongly on the number, position, and accuracy of the sample points. With an additional finite computational budget, this leads to a problem of (computer) experimental design. In contrast to the selection of sample points, the trade-off between accuracy and effort has hardly been studied systematically. We therefore propose an adaptive algorithm to find an optimal design in terms of position and accuracy. Pursuing a sequential design by incrementally appending the computational budget leads to a convex and constrained optimization problem. As a surrogate, we construct a Gaussian process regression model. We measure the global approximation error in terms of its impact on the accuracy of the identified parameter and aim for a uniform absolute tolerance, assuming that y(s) is computed by finite element calculations. A priori error estimates and a coarse estimate of computational effort relate the expected improvement of the surrogate model error to computational effort, resulting in the most efficient combination of sample point and evaluation tolerance. We also allow for improving the accuracy of already existing sample points by continuing previously truncated finite element solution procedures.}, language = {en} }