@article{DahlkeSteidlTeschke2004, author = {Dahlke, S. and Steidl, G. and Teschke, G.}, title = {Weighted Coorbit Spaces and Banach Frames on Homogeneous Spaces}, volume = {5}, journal = {Journal of Fourier Analysis and Applications}, number = {10}, pages = {507 -- 539}, year = {2004}, language = {en} } @book{DeuflhardWeiser2011, author = {Deuflhard, Peter and Weiser, Martin}, title = {Numerische Mathematik 3}, publisher = {de Gruyter, Berlin}, year = {2011}, language = {de} } @book{DeuflhardWeiser2012, author = {Deuflhard, Peter and Weiser, Martin}, title = {Adaptive numerical solution of PDEs}, publisher = {de Gruyter}, address = {Berlin}, year = {2012}, language = {en} } @article{DeuflhardWeiserZachow2006, author = {Deuflhard, Peter and Weiser, Martin and Zachow, Stefan}, title = {Mathematics in Facial Surgery}, volume = {53}, journal = {AMS Notices}, number = {9}, pages = {1012 -- 1016}, year = {2006}, language = {en} } @article{DeuflhardHochmuth2004, author = {Deuflhard, Peter and Hochmuth, Reinhard}, title = {Multiscale analysis of thermoregulation in the human microvascular system}, volume = {27}, journal = {Math. Methods Appl. Sci.}, number = {8}, publisher = {Wiley Verlag}, pages = {971 -- 989}, year = {2004}, language = {en} } @incollection{Deuflhard2003, author = {Deuflhard, Peter}, title = {A Comparison of Related Concepts in Computational Chemistry and Mathematics}, volume = {88}, booktitle = {Chemisty and Mathematics}, number = {330}, pages = {51 -- 66}, year = {2003}, language = {en} } @incollection{Deuflhard2003, author = {Deuflhard, Peter}, title = {From Molecular Dynamics to Conformational Dynamics in Drug Design}, booktitle = {Trends in Nonlinear Analysis}, editor = {et al. Kirkilionis, Markus}, publisher = {Springer Verlag Berlin}, pages = {269 -- 288}, year = {2003}, language = {en} } @misc{DeuflhardZachow2012, author = {Deuflhard, Peter and Zachow, Stefan}, title = {Mathematische Therapie- und Operationsplanung}, publisher = {Berliner Wirtschaftsgespr{\"a}che e.V.}, address = {Berlin}, pages = {89 -- 90}, year = {2012}, language = {en} } @incollection{DeuflhardDoesselLouisetal.2010, author = {Deuflhard, Peter and D{\"o}ssel, Olaf and Louis, Alfred and Zachow, Stefan}, title = {More Mathematics into Medicine!}, booktitle = {Production Factor Mathematics}, publisher = {Springer}, pages = {357 -- 378}, year = {2010}, language = {en} } @article{WeiserZachowDeuflhard2010, author = {Weiser, Martin and Zachow, Stefan and Deuflhard, Peter}, title = {Craniofacial Surgery Planning Based on Virtual Patient Models}, volume = {52}, journal = {it - Information Technology}, number = {5}, publisher = {Oldenbourg Verlagsgruppe}, doi = {10.1524/itit.2010.0600}, pages = {258 -- 263}, year = {2010}, language = {en} } @inproceedings{ZachowHierlErdmann2004, author = {Zachow, Stefan and Hierl, Thomas and Erdmann, Bodo}, title = {A quantitative evaluation of 3D soft tissue prediction in maxillofacial surgery planning}, booktitle = {Proc. 3. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboter-assistierte Chirurgie e.V.}, address = {M{\"u}nchen}, year = {2004}, language = {en} } @inproceedings{WeiserScacchi2017, author = {Weiser, Martin and Scacchi, Simone}, title = {Spectral Deferred Correction methods for adaptive electro-mechanical coupling in cardiac simulation}, booktitle = {G. Russo et al.(eds.) Progress in Industrial Mathematics at ECMI 2014}, publisher = {Springer}, doi = {10.1007/978-3-319-23413-7_42}, pages = {321 -- 328}, year = {2017}, abstract = {We investigate spectral deferred correction (SDC) methods for time stepping and their interplay with spatio-temporal adaptivity, applied to the solution of the cardiac electro-mechanical coupling model. This model consists of the Monodomain equations, a reaction-diffusion system modeling the cardiac bioelectrical activity, coupled with a quasi-static mechanical model describing the contraction and relaxation of the cardiac muscle. The numerical approximation of the cardiac electro-mechanical coupling is a challenging multiphysics problem, because it exhibits very different spatial and temporal scales. Therefore, spatio-temporal adaptivity is a promising approach to reduce the computational complexity. SDC methods are simple iterative methods for solving collocation systems. We exploit their flexibility for combining them in various ways with spatio-temporal adaptivity. The accuracy and computational complexity of the resulting methods are studied on some numerical examples.}, language = {en} } @misc{LubkollSchielaWeiser2015, author = {Lubkoll, Lars and Schiela, Anton and Weiser, Martin}, title = {An affine covariant composite step method for optimization with PDEs as equality constraints}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53954}, year = {2015}, abstract = {We propose a composite step method, designed for equality constrained optimization with partial differential equations. Focus is laid on the construction of a globalization scheme, which is based on cubic regularization of the objective and an affine covariant damped Newton method for feasibility. We show finite termination of the inner loop and fast local convergence of the algorithm. We discuss preconditioning strategies for the iterative solution of the arising linear systems with projected conjugate gradient. Numerical results are shown for optimal control problems subject to a nonlinear heat equation and subject to nonlinear elastic equations arising from an implant design problem in craniofacial surgery.}, language = {en} } @misc{WeiserGhosh2016, author = {Weiser, Martin and Ghosh, Sunayana}, title = {Theoretically optimal inexact SDC methods}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53140}, year = {2016}, abstract = {In several inital value problems with particularly expensive right hand side computation, there is a trade-off between accuracy and computational effort in evaluating the right hand sides. We consider inexact spectral deferred correction (SDC) methods for solving such non-stiff initial value problems. SDC methods are interpreted as fixed point iterations and, due to their corrective iterative nature, allow to exploit the accuracy-work-tradeoff for a reduction of the total computational effort. On one hand we derive an error model bounding the total error in terms of the right hand side evaluation errors. On the other hand, we define work models describing the computational effort in terms of the evaluation accuracy. Combining both, a theoretically optimal tolerance selection is worked out by minimizing the total work subject to achieving the requested tolerance.}, language = {en} } @phdthesis{Goetschel2015, author = {G{\"o}tschel, Sebastian}, title = {Adaptive Lossy Trajectory Compression for Optimal Control of Parabolic PDEs}, year = {2015}, abstract = {Optimal control problems governed by nonlinear, time-dependent PDEs on three-dimensional spatial domains are an important tool in many fields, ranging from engineering applications to medicine. For the solution of such optimization problems, methods working on the reduced objective functional are often employed to avoid a full spatio-temporal discretization of the problem. The evaluation of the reduced gradient requires one solve of the state equation forward in time, and one backward solve of the adjoint equation. The state enters into the adjoint equation, requiring the storage of a full 4D data set. If Newton-CG methods are used, two additional trajectories have to be stored. To get numerical results that are accurate enough, in many cases very fine discretizations in time and space are necessary, leading to a significant amount of data to be stored and transmitted to mass storage. This thesis deals with the development and analysis of methods for lossy compression of such finite element solutions. The algorithms are based on a change of basis to reduce correlations in the data, combined with quantization. This is achieved by transforming the finite element coefficient vector from the nodal to the hierarchical basis, followed by rounding the coefficients to a prescribed precision. Due to the inexact reconstruction, and thus inexact data for the adjoint equation, the error induced in the reduced gradient, and reduced Hessian, has to be controlled, to not impede convergence of the optimization. Accuracy requirements of different optimization methods are analyzed, and computable error estimates for the influence of lossy trajectory storage are derived. These tools are used to adaptively control the accuracy of the compressed data. The efficiency of the algorithms is demonstrated on several numerical examples, ranging from a simple linear, scalar equation to a semi-linear system of reaction-diffusion equations. In all examples considerable reductions in storage space and bandwidth requirements are achieved, without significantly influencing the convergence behavior of the optimization methods. Finally, to go beyond pointwise error control, the hierarchical basis transform can be replaced by more sophisticated wavelet transforms. Numerical experiments indicate that choosing suitable norms for error control allows higher compression factors.}, language = {en} } @misc{Freytag2017, author = {Freytag, Yvonne}, title = {Optimal Experimental Design to Estimate the Time of Death in a Bayesian Context}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-62475}, year = {2017}, abstract = {This thesis is devoted to the interdisciplinary work between mathematicians and forensic experts: the modeling of the human body cooling process after death laying the foundation for the estimation of the time of death. An inverse problem needs to be solved. In this thesis the inverse problem computes the time of death given the measured body temperature and the Forward Model that simulates the body cooling process. The Forward Model is based on the heat equation established by Fourier. This differential equation is numerically solved by the discretization over space by the Finite Element Method and the discretization over time by the Implicit Euler Method. The applications in this thesis demand a fast computation time. A model reduction is achieved by the Proper Orthogonal Decomposition in combination with the Galerkin Method. For reasons of simplification the computations and the measurements are restricted to a cylindrical phantom that is made out of homogeneous polyethylene. The estimate of the time of death is accompanied by an uncertainty. The inverse problem is incorporated by Bayesian inference to interpret the quality of the estimate and the effciency of the experiment. The uncertainty of the estimate of the time of death is minimized by approaching the Optimal Design of the Experiment. An objective function measures the certainty of the data and lays the foundation of the optimization problem. Solving the optimization problem is successfully done by relaxing the complex discrete NP-hard problem and applying a gradient-based method. The results of this thesis clearly show that the design of an experiment has a great in- uence on the outcome of the quality of the estimate. The comparison of the estimate and its properties based on different designs and conditions reveals the effciency of the Design of Experiment in the context of the estimation of the time of death.}, language = {en} } @inproceedings{GoetschelHoehneKolkoorietal.2016, author = {G{\"o}tschel, Sebastian and H{\"o}hne, Christian and Kolkoori, Sanjeevareddy and Mitzscherling, Steffen and Prager, Jens and Weiser, Martin}, title = {Ray Tracing Boundary Value Problems: Simulation and SAFT Reconstruction for Ultrasonic Testing}, booktitle = {Proceedings 19th World Conference on Non-Destructive Testing (WCNDT 2016)}, year = {2016}, language = {en} } @inproceedings{GoetschelMaierhoferMuelleretal.2016, author = {G{\"o}tschel, Sebastian and Maierhofer, Christiane and M{\"u}ller, Jan and Rothbart, Nick and Weiser, Martin}, title = {Quantitative Defect Reconstruction in Active Thermography for Fiber-Reinforced Composites}, booktitle = {Proceedings 19th World Conference on Non-Destructive Testing (WCNDT 2016)}, year = {2016}, language = {en} } @inproceedings{MuellerGoetschelMaierhoferetal.2017, author = {M{\"u}ller, Jan and G{\"o}tschel, Sebastian and Maierhofer, Christiane and Weiser, Martin}, title = {Determining the material parameters for the reconstruction of defects in carbon fiber reinforced polymers from data measured by flash thermography}, volume = {1806}, booktitle = {AIP Conference Proceedings}, doi = {10.1063/1.4974671}, year = {2017}, language = {en} } @misc{Schuh2016, type = {Master Thesis}, author = {Schuh, Hannah}, title = {Identification of Anisotropies in Welding Seams Using Ultrasound Measurements}, year = {2016}, language = {en} } @misc{FischerGoetschelWeiser2017, author = {Fischer, Lisa and G{\"o}tschel, Sebastian and Weiser, Martin}, title = {Lossy data compression reduces communication time in hybrid time-parallel integrators}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-63961}, year = {2017}, abstract = {Parallel in time methods for solving initial value problems are a means to increase the parallelism of numerical simulations. Hybrid parareal schemes interleaving the parallel in time iteration with an iterative solution of the individual time steps are among the most efficient methods for general nonlinear problems. Despite the hiding of communication time behind computation, communication has in certain situations a significant impact on the total runtime. Here we present strict, yet no sharp, error bounds for hybrid parareal methods with inexact communication due to lossy data compression, and derive theoretical estimates of the impact of compression on parallel efficiency of the algorithms. These and some computational experiments suggest that compression is a viable method to make hybrid parareal schemes robust with respect to low bandwidth setups.}, language = {en} } @misc{WeiserErdmannSchenkletal.2017, author = {Weiser, Martin and Erdmann, Bodo and Schenkl, Sebastian and Muggenthaler, Holger and Hubig, Michael and Mall, Gita and Zachow, Stefan}, title = {Uncertainty in Temperature-Based Determination of Time of Death}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-63818}, year = {2017}, abstract = {Temperature-based estimation of time of death (ToD) can be per- formed either with the help of simple phenomenological models of corpse cooling or with detailed mechanistic (thermodynamic) heat transfer mod- els. The latter are much more complex, but allow a higher accuracy of ToD estimation as in principle all relevant cooling mechanisms can be taken into account. The potentially higher accuracy depends on the accuracy of tissue and environmental parameters as well as on the geometric resolution. We in- vestigate the impact of parameter variations and geometry representation on the estimated ToD based on a highly detailed 3D corpse model, that has been segmented and geometrically reconstructed from a computed to- mography (CT) data set, differentiating various organs and tissue types. From that we identify the most crucial parameters to measure or estimate, and obtain a local uncertainty quantifcation for the ToD.}, language = {en} } @misc{GoetschelMinion2017, author = {G{\"o}tschel, Sebastian and Minion, Michael L.}, title = {Parallel-in-Time for Parabolic Optimal Control Problems Using PFASST}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-64989}, year = {2017}, abstract = {In gradient-based methods for parabolic optimal control problems, it is necessary to solve both the state equation and a backward-in-time adjoint equation in each iteration of the optimization method. In order to facilitate fully parallel gradient-type and nonlinear conjugate gradient methods for the solution of such optimal control problems, we discuss the application of the parallel-in-time method PFASST to adjoint gradient computation. In addition to enabling time parallelism, PFASST provides high flexibility for handling nonlinear equations, as well as potential extra computational savings from reusing previous solutions in the optimization loop. The approach is demonstrated here for a model reaction-diffusion optimal control problem.}, language = {en} } @article{FischerGoetschelWeiser2018, author = {Fischer, Lisa and G{\"o}tschel, Sebastian and Weiser, Martin}, title = {Lossy data compression reduces communication time in hybrid time-parallel integrators}, volume = {19}, journal = {Comput. Vis. Sci.}, number = {1}, doi = {10.1007/s00791-018-0293-2}, pages = {19 -- 30}, year = {2018}, abstract = {Parallel in time methods for solving initial value problems are a means to increase the parallelism of numerical simulations. Hybrid parareal schemes interleaving the parallel in time iteration with an iterative solution of the individual time steps are among the most efficient methods for general nonlinear problems. Despite the hiding of communication time behind computation, communication has in certain situations a significant impact on the total runtime. Here we present strict, yet no sharp, error bounds for hybrid parareal methods with inexact communication due to lossy data compression, and derive theoretical estimates of the impact of compression on parallel efficiency of the algorithms. These and some computational experiments suggest that compression is a viable method to make hybrid parareal schemes robust with respect to low bandwidth setups.}, language = {en} } @misc{Freytag2017, type = {Master Thesis}, author = {Freytag, Yvonne}, title = {Optimal Experimental Design to Estimate the Time of Death in a Bayesian Context}, school = {Zuse Institute Berlin (ZIB)}, pages = {81}, year = {2017}, abstract = {This thesis is devoted to the interdisciplinary work between mathematicians and forensic experts: the modeling of the human body cooling process after death laying the foundation for the estimation of the time of death. An inverse problem needs to be solved. In this thesis the inverse problem computes the time of death given the measured body temperature and the Forward Model that simulates the body cooling process. The Forward Model is based on the heat equation established by Fourier. This differential equation is numerically solved by the discretization over space by the Finite Element Method and the discretization over time by the Implicit Euler Method. The applications in this thesis demand a fast computation time. A model reduction is achieved by the Proper Orthogonal Decomposition in combination with the Galerkin Method. For reasons of simplification the computations and the measurements are restricted to a cylindrical phantom that is made out of homogeneous polyethylene. The estimate of the time of death is accompanied by an uncertainty. The inverse problem is incorporated by Bayesian inference to interpret the quality of the estimate and the effciency of the experiment. The uncertainty of the estimate of the time of death is minimized by approaching the Optimal Design of the Experiment. An objective function measures the certainty of the data and lays the foundation of the optimization problem. Solving the optimization problem is successfully done by relaxing the complex discrete NP-hard problem and applying a gradient-based method. The results of this thesis clearly show that the design of an experiment has a great in- uence on the outcome of the quality of the estimate. The comparison of the estimate and its properties based on different designs and conditions reveals the effciency of the Design of Experiment in the context of the estimation of the time of death.}, language = {en} } @misc{Fischer2017, type = {Master Thesis}, author = {Fischer, Lisa}, title = {On the convergence of inexact time parallel time integration}, pages = {61}, year = {2017}, language = {en} } @misc{WeiserScacchi2014, author = {Weiser, Martin and Scacchi, Simone}, title = {Spectral Deferred Correction methods for adaptive electro-mechanical coupling in cardiac simulation}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-50695}, year = {2014}, abstract = {We investigate spectral deferred correction (SDC) methods for time stepping and their interplay with spatio-temporal adaptivity, applied to the solution of the cardiac electro-mechanical coupling model. This model consists of the Monodomain equations, a reaction-diffusion system modeling the cardiac bioelectrical activity, coupled with a quasi-static mechanical model describing the contraction and relaxation of the cardiac muscle. The numerical approximation of the cardiac electro-mechanical coupling is a challenging multiphysics problem, because it exhibits very different spatial and temporal scales. Therefore, spatio-temporal adaptivity is a promising approach to reduce the computational complexity. SDC methods are simple iterative methods for solving collocation systems. We exploit their flexibility for combining them in various ways with spatio-temporal adaptivity. The accuracy and computational complexity of the resulting methods are studied on some numerical examples.}, language = {en} } @misc{DeuflhardKornhuberSanderetal.2014, author = {Deuflhard, Peter and Kornhuber, Ralf and Sander, Oliver and Schiela, Anton and Weiser, Martin}, title = {Mathematics cures virtual patients}, volume = {1}, journal = {MATHEON-Mathematics for Key Technologies}, editor = {Deuflhard, Peter and Gr{\"o}tschel, Martin and H{\"o}mberg, Dietmar and Horst, Ulrich and Kramer, J{\"u}rg and Mehrmann, Volker and Polthier, Konrad and Schmidt, Frank and Sch{\"u}tte, Christof and Skutella, Martin and Sprekels, J{\"u}rgen}, publisher = {European Mathematical Society}, pages = {7 -- 25}, year = {2014}, language = {en} } @article{MoualeuNgangueWeiserEhrigetal.2015, author = {Moualeu-Ngangue, Dany Pascal and Weiser, Martin and Ehrig, Rainald and Deuflhard, Peter}, title = {Optimal control for a tuberculosis model with undetected cases in Cameroon}, volume = {20}, journal = {Communications in Nonlinear Science and Numerical Simulation}, number = {3}, doi = {10.1016/j.cnsns.2014.06.037}, pages = {986 -- 1003}, year = {2015}, abstract = {This paper considers the optimal control of tuberculosis through education, diagnosis campaign and chemoprophylaxis of latently infected. A mathematical model which includes important components such as undiagnosed infectious, diagnosed infectious, latently infected and lost-sight infectious is formulated. The model combines a frequency dependent and a density dependent force of infection for TB transmission. Through optimal control theory and numerical simulations, a cost-effective balance of two different intervention methods is obtained. Seeking to minimize the amount of money the government spends when tuberculosis remain endemic in the Cameroonian population, Pontryagin's maximum principle is used to characterize the optimal control. The optimality system is derived and solved numerically using the forward-backward sweep method (FBSM). Results provide a framework for designing cost-effective strategies for diseases with multiple intervention methods. It comes out that combining chemoprophylaxis and education, the burden of TB can be reduced by 80 \% in 10 years.}, language = {en} } @misc{GoetschelWeiserMaierhoferetal.2012, author = {G{\"o}tschel, Sebastian and Weiser, Martin and Maierhofer, Christiane and Richter, Regina}, title = {Data Enhancement for Active Thermography}, journal = {E-book Proceedings, 11th International Conference on Quantitative Infrared Thermography, Naples}, editor = {Cardone, Gennaro}, year = {2012}, abstract = {Pulse thermography is a non-destructive testing method based on infrared imaging of transient thermal patterns. Heating the surface of the structure under test for a short period of time generates a non-stationary temperature distribution and thus a thermal contrast between the defect and the sound material. Due to measurement noise, preprocessing of the experimental data is necessary, before reconstruction algorithms can be applied. We propose a decomposition of the measured temperature into Green's function solutions to eliminate noise.}, language = {en} } @article{Weiser2013, author = {Weiser, Martin}, title = {On goal-oriented adaptivity for elliptic optimal control problems}, volume = {28}, journal = {Opt. Meth. Softw.}, number = {13}, pages = {969 -- 992}, year = {2013}, abstract = {The paper proposes goal-oriented error estimation and mesh refinement for optimal control problems with elliptic PDE constraints using the value of the reduced cost functional as quantity of interest. Error representation, hierarchical error estimators, and greedy-style error indicators are derived and compared to their counterparts when using the all-at-once cost functional as quantity of interest. Finally, the efficiency of the error estimator and generated meshes are demonstrated on numerical examples.}, language = {en} } @article{DeuflhardSchielaWeiser2012, author = {Deuflhard, Peter and Schiela, Anton and Weiser, Martin}, title = {Mathematical Cancer Therapy Planning in Deep Regional Hyperthermia}, volume = {21}, journal = {Acta Numerica}, pages = {307 -- 378}, year = {2012}, abstract = {This paper surveys the required mathematics for a typical challenging problem from computational medicine, the cancer therapy planning in deep regional hyperthermia. In the course of many years of close cooperation with clinics, the medical problem gave rise to quite a number of subtle mathematical problems, part of which had been unsolved when the common project started. Efficiency of numerical algorithms, i.e. computational speed and monitored reliability, play a decisive role for the medical treatment. Off-the-shelf software had turned out to be not sufficient to meet the requirements of medicine. Rather, new mathematical theory as well as new numerical algorithms had to be developed. In order to make our algorithms useful in the clinical environment, new visualization software, a virtual lab, including 3D geometry processing of individual virtual patients had to be designed and implemented. Moreover, before the problems could be attacked by numerical algorithms, careful mathematical modelling had to be done. Finally, parameter identification and constrained optimization for the PDEs had to be newly analyzed and realized over the individual patient's geometry. Our new techniques had an impact on the specificity of the individual patients' treatment and on the construction of an improved hyperthermia applicator.}, language = {en} } @misc{GoetschelWeiserSchiela2012, author = {G{\"o}tschel, Sebastian and Weiser, Martin and Schiela, Anton}, title = {Solving Optimal Control Problems with the Kaskade 7 Finite Element Toolbox}, journal = {Advances in DUNE}, editor = {Dedner, A. and Flemisch, B. and Kl{\"o}fkorn, R.}, publisher = {Springer}, pages = {101 -- 112}, year = {2012}, abstract = {This paper presents concepts and implementation of the finite element toolbox Kaskade 7, a flexible C++ code for solving elliptic and parabolic PDE systems. Issues such as problem formulation, assembly and adaptivity are discussed at the example of optimal control problems. Trajectory compression for parabolic optimization problems is considered as a case study.}, language = {en} } @article{Weiser2015, author = {Weiser, Martin}, title = {Faster SDC convergence on non-equidistant grids by DIRK sweeps}, volume = {55}, journal = {BIT Numerical Mathematics}, number = {4}, doi = {10.1007/s10543-014-0540-y}, pages = {1219 -- 1241}, year = {2015}, abstract = {Spectral deferred correction methods for solving stiff ODEs are known to converge rapidly towards the collocation limit solution on equidistant grids, but show a much less favourable contraction on non-equidistant grids such as Radau-IIa points. We interprete SDC methods as fixed point iterations for the collocation system and propose new DIRK-type sweeps for stiff problems based on purely linear algebraic considerations. Good convergence is recovered also on non-equidistant grids. The properties of different variants are explored on a couple of numerical examples.}, language = {en} } @article{WeiserGoetschel2012, author = {Weiser, Martin and G{\"o}tschel, Sebastian}, title = {State Trajectory Compression for Optimal Control with Parabolic PDEs}, volume = {34}, journal = {SIAM J. Sci. Comput.}, number = {1}, doi = {10.1137/11082172X}, pages = {A161 -- A184}, year = {2012}, abstract = {In optimal control problems with nonlinear time-dependent 3D PDEs, full 4D discretizations are usually prohibitive due to the storage requirement. For this reason gradient and quasi-Newton methods working on the reduced functional are often employed. The computation of the reduced gradient requires one solve of the state equation forward in time, and one backward solve of the adjoint equation. The state enters into the adjoint equation, again requiring the storage of a full 4D data set. We propose a lossy compression algorithm using an inexact but cheap predictor for the state data, with additional entropy coding of prediction errors. As the data is used inside a discretized, iterative algorithm, lossy coding maintaining an error bound is sufficient.}, language = {en} } @misc{NadobnyWeihrauchWeiseretal.2007, author = {Nadobny, Johanna and Weihrauch, Mirko and Weiser, Martin and Gellermann, Johanna and Wlodarczyk, Waldemar and Budach, Volker and Wust, Peter}, title = {Advances in the Planning and Control of the MR-guided Regional Hyperthermia Applications}, journal = {Proc. Int. Conf. Electromagnetics in Advanced Applications, ICEAA 2007, Torino, Italy}, pages = {1010 -- 1013}, year = {2007}, language = {en} } @article{GoetschelWeiser2010, author = {G{\"o}tschel, Sebastian and Weiser, Martin}, title = {State Trajectory Compression in Optimal Control}, volume = {10}, journal = {PAMM}, number = {1}, doi = {10.1002/pamm.201010282}, pages = {579 -- 580}, year = {2010}, abstract = {In optimal control problems with nonlinear time-dependent 3D PDEs, the computation of the reduced gradient by adjoint methods requires one solve of the state equation forward in time, and one backward solve of the adjoint equation. Since the state enters into the adjoint equation, the storage of a 4D discretization is necessary. We propose a lossy compression algorithm using a cheap predictor for the state data, with additional entropy coding of prediction errors. Analytical and numerical results indicate that compression factors around 30 can be obtained without exceeding the FE discretization error.}, language = {en} } @misc{Weiser2010, author = {Weiser, Martin}, title = {Delayed Residual Compensation for Bidomain Equations}, volume = {1281}, journal = {AIP Conference Proceedings}, doi = {10.1063/1.3498495}, pages = {419 -- 422}, year = {2010}, abstract = {The biodomain model of cardioelectric excitation consists of a reaction-diffusion equation, an elliptic algebraic constraint, and a set of pointwise ODEs. Fast reaction enforces small time steps, such that for common mesh sizes the reaction-diffusion equation is easily solved implicitly due to a dominating mass matrix. In contrast, the elliptic constraint does not benefit from small time steps and requires a comparably expensive solution. We propose a delayed residual compensation that improves the solution of the elliptic constraint and thus alleviates the need for long iteration times.}, language = {en} } @misc{WeiserErdmannDeuflhard2010, author = {Weiser, Martin and Erdmann, Bodo and Deuflhard, Peter}, title = {On Efficiency and Accuracy in Cardioelectric Simulation}, journal = {Progress in Industrial Mathematics at ECMI 2008}, editor = {Wilson, E. and Fitt, A. and Ockendon, H. and Norbury, J.}, publisher = {Springer}, pages = {371 -- 376}, year = {2010}, abstract = {Reasons for the failure of adaptive methods to deliver improved efficiency when integrating monodomain models for myocardiac excitation are discussed. Two closely related techniques for reducing the computational complexity of linearly implicit integrators, deliberate sparsing and splitting, are investigated with respect to their impact on computing time and accuracy.}, language = {en} } @article{WilhelmsSeemannWeiseretal.2010, author = {Wilhelms, Mathias and Seemann, Gunnar and Weiser, Martin and D{\"o}ssel, Olaf}, title = {Benchmarking Solvers of the Monodomain Equation in Cardiac Electrophysiological Modeling}, volume = {55}, journal = {Biomed. Engineer.}, doi = {10.1515/BMT.2010.712}, pages = {99 -- 102}, year = {2010}, language = {en} } @misc{WustWeihrauchWeiseretal.2010, author = {Wust, Peter and Weihrauch, Mirko and Weiser, Martin and Gellermann, Johanna and Eisenhardt, Steffen and Chobrok, Thorsten and Budach, Volker}, title = {Optimization of clinical radiofrequency hyperthermia by use of MR-thermography in a hybrid system}, journal = {World Congress on Medical Physics and Biomedical Engineering, September 2009, Munich, Germany}, editor = {D{\"o}ssel, O. and Schlegel, W. and Magjarevic, R.}, publisher = {Springer}, pages = {174 -- 175}, year = {2010}, language = {en} } @article{LubkollSchielaWeiser2014, author = {Lubkoll, Lars and Schiela, Anton and Weiser, Martin}, title = {An optimal control problem in polyconvex hyperelasticity}, volume = {52}, journal = {SIAM J. Control Opt.}, number = {3}, doi = {10.1137/120876629}, pages = {1403 -- 1422}, year = {2014}, abstract = {We consider a shape implant design problem that arises in the context of facial surgery. We introduce a reformulation as an optimal control problem, where the control acts as a boundary force. The state is modelled as a minimizer of a polyconvex hyperelastic energy functional. We show existence of optimal solutions and derive - on a formal level - first order optimality conditions. Finally, preliminary numerical results are presented.}, language = {en} } @article{GuentherLameckerWeiser2013, author = {G{\"u}nther, Andreas and Lamecker, Hans and Weiser, Martin}, title = {Flexible Shape Matching with Finite Element Based LDDMM}, volume = {105}, journal = {International Journal of Computer Vision}, number = {2}, doi = {10.1007/s11263-012-0599-3}, pages = {128 -- 143}, year = {2013}, abstract = {We consider Large Deformation Diffeomorphic Metric Mapping of general \$m\$-currents. After stating an optimization algorithm in the function space of admissable morph generating velocity fields, two innovative aspects in this framework are presented and numerically investigated: First, we spatially discretize the velocity field with conforming adaptive finite elements and discuss advantages of this new approach. Second, we directly compute the temporal evolution of discrete \$m\$-current attributes.}, language = {en} } @misc{GuentherLameckerWeiser2011, author = {G{\"u}nther, Andreas and Lamecker, Hans and Weiser, Martin}, title = {Direct LDDMM of Discrete Currents with Adaptive Finite Elements}, journal = {Proceedings of the Third International Workshop on Mathematical Foundations of Computational Anatomy - Geometrical and Statistical Methods for Modelling Biological Shape Variability}, editor = {Pennec, X. and Joshi, S. and Nielsen, M.}, pages = {1 -- 14}, year = {2011}, abstract = {We consider Large Deformation Diffeomorphic Metric Mapping of general \$m\$-currents. After stating an optimization algorithm in the function space of admissable morph generating velocity fields, two innovative aspects in this framework are presented and numerically investigated: First, we spatially discretize the velocity field with conforming adaptive finite elements and discuss advantages of this new approach. Second, we directly compute the temporal evolution of discrete \$m\$-current attributes.}, language = {en} } @misc{SchielaWeiser2010, author = {Schiela, Anton and Weiser, Martin}, title = {Barrier methods for a control problem from hyperthermia treatment planning}, journal = {Recent Advances in Optimization and its Applications in Engineering (Proceedings of 14th Belgian-French-German Conference on Optimization 2009)}, editor = {Diehl, M. and Glineur, F. and Jarlebring, E. and Michiels, W.}, publisher = {Springer}, pages = {419 -- 428}, year = {2010}, abstract = {We consider an optimal control problem from hyperthermia treatment planning and its barrier regularization. We derive basic results, which lay the groundwork for the computation of optimal solutions via an interior point path-following method. Further, we report on a numerical implementation of such a method and its performance at an example problem.}, language = {en} } @article{WeiserRoelligArndtetal.2010, author = {Weiser, Martin and R{\"o}llig, Mathias and Arndt, Ralf and Erdmann, Bodo}, title = {Development and test of a numerical model for pulse thermography in civil engineering}, volume = {46}, journal = {Heat and Mass Transfer}, number = {11-12}, pages = {1419 -- 1428}, year = {2010}, abstract = {Pulse thermography of concrete structures is used in civil engineering for detecting voids, honeycombing and delamination. The physical situation is readily modeled by Fourier's law. Despite the simplicity of the PDE structure, quantitatively realistic numerical 3D simulation faces two major obstacles. First, the short heating pulse induces a thin boundary layer at the heated surface which encapsulates all information and therefore has to be resolved faithfully. Even with adaptive mesh refinement techniques, obtaining useful accuracies requires an unsatisfactorily fine discretization. Second, bulk material parameters and boundary conditions are barely known exactly. We address both issues by a semi-analytic reformulation of the heat transport problem and by parameter identification. Numerical results are compared with measurements of test specimens.}, language = {en} } @article{RannebergWeiserWeihrauchetal.2010, author = {Ranneberg, Maximilian and Weiser, Martin and Weihrauch, Mirko and Budach, Volker and Gellermann, Johanna and Wust, Peter}, title = {Regularized Antenna Profile Adaptation in Online Hyperthermia Treatment}, volume = {37}, journal = {Medical Physics}, doi = {10.1118/1.3488896}, pages = {5382 -- 5394}, year = {2010}, language = {en} } @article{LubkollSchielaWeiser2017, author = {Lubkoll, Lars and Schiela, Anton and Weiser, Martin}, title = {An affine covariant composite step method for optimization with PDEs as equality constraints}, volume = {32}, journal = {Optimization Methods and Software}, number = {5}, doi = {10.1080/10556788.2016.1241783}, pages = {1132 -- 1161}, year = {2017}, abstract = {We propose a composite step method, designed for equality constrained optimization with partial differential equations. Focus is laid on the construction of a globalization scheme, which is based on cubic regularization of the objective and an affine covariant damped Newton method for feasibility. We show finite termination of the inner loop and fast local convergence of the algorithm. We discuss preconditioning strategies for the iterative solution of the arising linear systems with projected conjugate gradient. Numerical results are shown for optimal control problems subject to a nonlinear heat equation and subject to nonlinear elastic equations arising from an implant design problem in craniofacial surgery.}, language = {en} } @misc{GoetschelMaierhoferMuelleretal.2016, author = {G{\"o}tschel, Sebastian and Maierhofer, Christiane and M{\"u}ller, Jan P. and Rothbart, Nick and Weiser, Martin}, title = {Quantitative Defect Reconstruction in Active Thermography for Fiber-Reinforced Composites}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-58374}, year = {2016}, abstract = {Carbon-fiber reinforced composites are becoming more and more important in the production of light-weight structures, e.g., in the automotive and aerospace industry. Thermography is often used for non-destructive testing of these products, especially to detect delaminations between different layers of the composite. In this presentation, we aim at methods for defect reconstruction from thermographic measurements of such carbon-fiber reinforced composites. The reconstruction results shall not only allow to locate defects, but also give a quantitative characterization of the defect properties. We discuss the simulation of the measurement process using finite element methods, as well as the experimental validation on flat bottom holes. Especially in pulse thermography, thin boundary layers with steep temperature gradients occurring at the heated surface need to be resolved. Here we use the combination of a 1D analytical solution combined with numerical solution of the remaining defect equation. We use the simulations to identify material parameters from the measurements. Finally, fast heuristics for reconstructing defect geometries are applied to the acquired data, and compared for their accuracy and utility in detecting different defects like back surface defects or delaminations.}, language = {en} } @misc{GoetschelHoehneKolkoorietal.2016, author = {G{\"o}tschel, Sebastian and H{\"o}hne, Christian and Kolkoori, Sanjeevareddy and Mitzscherling, Steffen and Prager, Jens and Weiser, Martin}, title = {Ray Tracing Boundary Value Problems: Simulation and SAFT Reconstruction for Ultrasonic Testing}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-58386}, year = {2016}, abstract = {The application of advanced imaging techniques for the ultrasonic inspection of inhomogeneous anisotropic materials like austenitic and dissimilar welds requires information about acoustic wave propagation through the material, in particular travel times between two points in the material. Forward ray tracing is a popular approach to determine traveling paths and arrival times but is ill suited for inverse problems since a large number of rays have to be computed in order to arrive at prescribed end points. In this contribution we discuss boundary value problems for acoustic rays, where the ray path between two given points is determined by solving the eikonal equation. The implementation of such a two point boundary value ray tracer for sound field simulations through an austenitic weld is described and its efficiency as well as the obtained results are compared to those of a forward ray tracer. The results are validated by comparison with experimental results and commercially available UT simulation tools. As an application, we discuss an implementation of the method for SAFT (Synthetic Aperture Focusing Technique) reconstruction. The ray tracer calculates the required travel time through the anisotropic columnar grain structure of the austenitic weld. There, the formulation of ray tracing as a boundary value problem allows a straightforward derivation of the ray path from a given transducer position to any pixel in the reconstruction area and reduces the computational cost considerably.}, language = {en} } @book{Weiser2016, author = {Weiser, Martin}, title = {Inside Finite Elements}, publisher = {De Gruyter}, year = {2016}, abstract = {All relevant implementation aspects of finite element methods are discussed in this book. The focus is on algorithms and data structures as well as on their concrete implementation. Theory is covered as far as it gives insight into the construction of algorithms.Throughout the exercises a complete FE-solver for scalar 2D problems will be implemented in Matlab/Octave.}, language = {en} } @misc{Makarenko2017, type = {Master Thesis}, author = {Makarenko, Oleksandra}, title = {Square Root Approximation und statistische Umgewichtungstechnik f{\"u}r das Faltungsverhalten von Polymerb{\"u}rsten}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65283}, year = {2017}, language = {de} } @misc{VolkweinWeiser2000, author = {Volkwein, Stefan and Weiser, Martin}, title = {Affine Invariant Convergence Analysis for Inexact Augmented Lagrangian-SQP Methods}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-6243}, number = {00-56}, year = {2000}, abstract = {An affine invariant convergence analysis for inexact augmented Lagrangian-SQP methods is presented. The theory is used for the construction of an accuracy matching between iteration errors and truncation errors, which arise from the inexact linear system solves. The theoretical investigations are illustrated numerically by an optimal control problem for the Burgers equation.}, language = {en} } @misc{WeiserDeuflhard2001, author = {Weiser, Martin and Deuflhard, Peter}, title = {The Central Path towards the Numerical Solution of Optimal Control Problems}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-6380}, number = {01-12}, year = {2001}, abstract = {A new approach to the numerical solution of optimal control problems including control and state constraints is presented. Like hybrid methods, the approach aims at combining the advantages of direct and indirect methods. Unlike hybrid methods, however, our method is directly based on interior-point concepts in function space --- realized via an adaptive multilevel scheme applied to the complementarity formulation and numerical continuation along the central path. Existence of the central path and its continuation towards the solution point is analyzed in some theoretical detail. An adaptive stepsize control with respect to the duality gap parameter is worked out in the framework of affine invariant inexact Newton methods. Finally, the performance of a first version of our new type of algorithm is documented by the successful treatment of the well-known intricate windshear problem.}, language = {en} } @misc{ErdmannKoberLangetal.2001, author = {Erdmann, Bodo and Kober, Cornelia and Lang, Jens and Sader, Robert and Zeilhofer, Hans-Florian and Deuflhard, Peter}, title = {Efficient and Reliable Finite Element Methods for Simulation of the Human Mandible}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-6403}, number = {01-14}, year = {2001}, abstract = {By computed tomography data (CT), the individual geometry of the mandible is quite well reproduced, also the separation between cortical and trabecular bone. Using anatomical knowledge about the architecture and the functional potential of the masticatory muscles, realistic situations were approximated. The solution of the underlying partial differential equations describing linear elastic material behaviour is provided by an adaptive finite element method. Estimations of the discretization error, local grid refinement, and multilevel techniques guarantee the reliability and efficiency of the method.}, language = {en} } @misc{HinzeSchiela2007, author = {Hinze, Michael and Schiela, Anton}, title = {Discretization of Interior Point Methods for State Constrained Elliptic Optimal Control Problems: Optimal Error Estimates and Parameter Adjustment}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10414}, number = {07-40}, year = {2007}, abstract = {An adjustment scheme for the relaxation parameter of interior point approaches to the numerical solution of pointwise state constrained elliptic optimal control problems is introduced. The method is based on error estimates of an associated finite element discretization of the relaxed problems and optimally selects the relaxation parameter in dependence on the mesh size of discretization. The finite element analysis for the relaxed problems is carried out and a numerical example is presented which confirms our analytical findings.}, language = {en} } @misc{Weiser2007, author = {Weiser, Martin}, title = {Pointwise Nonlinear Scaling for Reaction-Diffusion-Equations}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10493}, number = {07-45}, year = {2007}, abstract = {Parabolic reaction-diffusion systems may develop sharp moving reaction fronts which pose a challenge even for adaptive finite element methods. We propose a method to transform the equation into an equivalent form that usually exhibits solutions which are easier to discretize, giving higher accuracy for a given number of degrees of freedom. The transformation is realized as an efficiently computable pointwise nonlinear scaling that is optimized for prototypical planar travelling wave solutions of the underlying reaction-diffusion equation. The gain in either performance or accuracy is demonstrated on different numerical examples.}, language = {en} } @misc{SchenkWaechterWeiser2007, author = {Schenk, Olaf and W{\"a}chter, Andreas and Weiser, Martin}, title = {Inertia Revealing Preconditioning For Large-Scale Nonconvex Constrained Optimization}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10314}, number = {07-32}, year = {2007}, abstract = {Fast nonlinear programming methods following the all-at-once approach usually employ Newton's method for solving linearized Karush-Kuhn-Tucker (KKT) systems. In nonconvex problems, the Newton direction is only guaranteed to be a descent direction if the Hessian of the Lagrange function is positive definite on the nullspace of the active constraints, otherwise some modifications to Newton's method are necessary. This condition can be verified using the signs of the KKT's eigenvalues (inertia), which are usually available from direct solvers for the arising linear saddle point problems. Iterative solvers are mandatory for very large-scale problems, but in general do not provide the inertia. Here we present a preconditioner based on a multilevel incomplete \$LBL^T\$ factorization, from which an approximation of the inertia can be obtained. The suitability of the heuristics for application in optimization methods is verified on an interior point method applied to the CUTE and COPS test problems, on large-scale 3D PDE-constrained optimal control problems, as well as 3D PDE-constrained optimization in biomedical cancer hyperthermia treatment planning. The efficiency of the preconditioner is demonstrated on convex and nonconvex problems with \$150^3\$ state variables and \$150^2\$ control variables, both subject to bound constraints.}, language = {en} } @misc{Schiela2008, author = {Schiela, Anton}, title = {An Extended Mathematical Framework for Barrier Methods in Function Space}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10593}, number = {08-07}, year = {2008}, abstract = {An extended mathematical framework for barrier methods for state constrained optimal control compared to [Schiela, ZIB-Report 07-07] is considered. This allows to apply the results derived there to more general classes of optimal control problems, in particular to boundary control and finite dimensional control.}, language = {en} } @article{SemlerWeiser2023, author = {Semler, Phillip and Weiser, Martin}, title = {Adaptive Gaussian Process Regression for Efficient Building of Surrogate Models in Inverse Problems}, volume = {39}, journal = {Inverse Problems}, number = {12}, arxiv = {http://arxiv.org/abs/2303.05824}, doi = {10.1088/1361-6420/ad0028}, pages = {125003}, year = {2023}, abstract = {In a task where many similar inverse problems must be solved, evaluating costly simulations is impractical. Therefore, replacing the model y with a surrogate model y(s) that can be evaluated quickly leads to a significant speedup. The approximation quality of the surrogate model depends strongly on the number, position, and accuracy of the sample points. With an additional finite computational budget, this leads to a problem of (computer) experimental design. In contrast to the selection of sample points, the trade-off between accuracy and effort has hardly been studied systematically. We therefore propose an adaptive algorithm to find an optimal design in terms of position and accuracy. Pursuing a sequential design by incrementally appending the computational budget leads to a convex and constrained optimization problem. As a surrogate, we construct a Gaussian process regression model. We measure the global approximation error in terms of its impact on the accuracy of the identified parameter and aim for a uniform absolute tolerance, assuming that y(s) is computed by finite element calculations. A priori error estimates and a coarse estimate of computational effort relate the expected improvement of the surrogate model error to computational effort, resulting in the most efficient combination of sample point and evaluation tolerance. We also allow for improving the accuracy of already existing sample points by continuing previously truncated finite element solution procedures.}, language = {en} } @inproceedings{CheginiSteinkeWeiser2022, author = {Chegini, Fatemeh and Steinke, Thomas and Weiser, Martin}, title = {Efficient adaptivity for simulating cardiac electrophysiology with spectral deferred correction methods}, arxiv = {http://arxiv.org/abs/2311.07206}, year = {2022}, abstract = {The locality of solution features in cardiac electrophysiology simulations calls for adaptive methods. Due to the overhead incurred by established mesh refinement and coarsening, however, such approaches failed in accelerating the computations. Here we investigate a different route to spatial adaptivity that is based on nested subset selection for algebraic degrees of freedom in spectral deferred correction methods. This combination of algebraic adaptivity and iterative solvers for higher order collocation time stepping realizes a multirate integration with minimal overhead. This leads to moderate but significant speedups in both monodomain and cell-by-cell models of cardiac excitation, as demonstrated at four numerical examples.}, language = {en} } @inproceedings{SteyerCheginiPotseetal.2023, author = {Steyer, Joshua and Chegini, Fatemeh and Potse, Mark and Loewe, Axel and Weiser, Martin}, title = {Continuity of Microscopic Cardiac Conduction in a Computational Cell-by-Cell Model}, volume = {50}, booktitle = {2023 Computing in Cardiology Conference (CinC)}, publisher = {Computing in Cardiology}, issn = {2325-887X}, doi = {10.22489/CinC.2023.385}, year = {2023}, abstract = {Conduction velocity in cardiac tissue is a crucial electrophysiological parameter for arrhythmia vulnerability. Pathologically reduced conduction velocity facilitates arrhythmogenesis because such conduction velocities decrease the wavelength with which re-entry may occur. Computational studies on CV and how it changes regionally in models at spatial scales multiple times larger than actual cardiac cells exist. However, microscopic conduction within cells and between them have been studied less in simulations. In this work, we study the relation of microscopic conduction patterns and clinically observable macroscopic conduction using an extracellular-membrane-intracellular model which represents cardiac tissue with these subdomains at subcellular resolution. By considering cell arrangement and non-uniform gap junction distribution, it yields anisotropic excitation propagation. This novel kind of model can for example be used to understand how discontinuous conduction on the microscopic level affects fractionation of electrograms in healthy and fibrotic tissue. Along the membrane of a cell, we observed a continuously propagating activation wavefront. When transitioning from one cell to the neighbouring one, jumps in local activation times occurred, which led to lower global conduction velocities than locally within each cell.}, language = {en} } @article{BartelsFisikopoulosWeiser2023, author = {Bartels, Tinko and Fisikopoulos, Vissarion and Weiser, Martin}, title = {Fast Floating-Point Filters for Robust Predicates}, volume = {63}, journal = {BIT Numerical Mathematics}, arxiv = {http://arxiv.org/abs/2208.00497}, doi = {10.1007/s10543-023-00975-x}, year = {2023}, abstract = {Geometric predicates are at the core of many algorithms, such as the construction of Delaunay triangulations, mesh processing and spatial relation tests. These algorithms have applications in scientific computing, geographic information systems and computer-aided design. With floating-point arithmetic, these geometric predicates can incur round-off errors that may lead to incorrect results and inconsistencies, causing computations to fail. This issue has been addressed using a combination of exact arithmetic for robustness and floating-point filters to mitigate the computational cost of exact computations. The implementation of exact computations and floating-point filters can be a difficult task, and code generation tools have been proposed to address this. We present a new C++ meta-programming framework for the generation of fast, robust predicates for arbitrary geometric predicates based on polynomial expressions. We combine and extend different approaches to filtering, branch reduction, and overflow avoidance that have previously been proposed. We show examples of how this approach produces correct results for data sets that could lead to incorrect predicate results with naive implementations. Our benchmark results demonstrate that our implementation surpasses state-of-the-art implementations.}, language = {en} } @misc{BorndoerferDaneckerWeiser2023, author = {Bornd{\"o}rfer, Ralf and Danecker, Fabian and Weiser, Martin}, title = {Convergence Properties of Newton's Method for Globally Optimal Free Flight Trajectory Optimization}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-91309}, year = {2023}, abstract = {The algorithmic efficiency of Newton-based methods for Free Flight Trajectory Optimization is heavily influenced by the size of the domain of convergence. We provide numerical evidence that the convergence radius is much larger in practice than what the theoretical worst case bounds suggest. The algorithm can be further improved by a convergence-enhancing domain decomposition.}, language = {en} } @misc{SagnolHegeWeiser2016, author = {Sagnol, Guillaume and Hege, Hans-Christian and Weiser, Martin}, title = {Using sparse kernels to design computer experiments with tunable precision}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59605}, year = {2016}, abstract = {Statistical methods to design computer experiments usually rely on a Gaussian process (GP) surrogate model, and typically aim at selecting design points (combinations of algorithmic and model parameters) that minimize the average prediction variance, or maximize the prediction accuracy for the hyperparameters of the GP surrogate. In many applications, experiments have a tunable precision, in the sense that one software parameter controls the tradeoff between accuracy and computing time (e.g., mesh size in FEM simulations or number of Monte-Carlo samples). We formulate the problem of allocating a budget of computing time over a finite set of candidate points for the goals mentioned above. This is a continuous optimization problem, which is moreover convex whenever the tradeoff function accuracy vs. computing time is concave. On the other hand, using non-concave weight functions can help to identify sparse designs. In addition, using sparse kernel approximations drastically reduce the cost per iteration of the multiplicative weights updates that can be used to solve this problem.}, language = {en} } @misc{BorndoerferDaneckerWeiser2020, author = {Bornd{\"o}rfer, Ralf and Danecker, Fabian and Weiser, Martin}, title = {A Discrete-Continuous Algorithm for Free Flight Planning}, issn = {1438-0064}, doi = {10.3390/a14010004}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-81343}, year = {2020}, abstract = {We propose a hybrid discrete-continuous algorithm for flight planning in free flight airspaces. In a first step, our DisCOptER method discrete-continuous optimization for enhanced resolution) computes a globally optimal approximate flight path on a discretization of the problem using the A* method. This route initializes a Newton method that converges rapidly to the smooth optimum in a second step. The correctness, accuracy, and complexity of the method are goverened by the choice of the crossover point that determines the coarseness of the discretization. We analyze the optimal choice of the crossover point and demonstrate the asymtotic superority of DisCOptER over a purely discrete approach.}, language = {en} }