@misc{BorndoerferDaneckerWeiser, author = {Bornd{\"o}rfer, Ralf and Danecker, Fabian and Weiser, Martin}, title = {Convergence Properties of Newton's Method for Globally Optimal Free Flight Trajectory Optimization}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-91309}, abstract = {The algorithmic efficiency of Newton-based methods for Free Flight Trajectory Optimization is heavily influenced by the size of the domain of convergence. We provide numerical evidence that the convergence radius is much larger in practice than what the theoretical worst case bounds suggest. The algorithm can be further improved by a convergence-enhancing domain decomposition.}, language = {en} } @misc{BorndoerferDaneckerWeiser, author = {Bornd{\"o}rfer, Ralf and Danecker, Fabian and Weiser, Martin}, title = {A Discrete-Continuous Algorithm for Free Flight Planning}, issn = {1438-0064}, doi = {10.3390/a14010004}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-81343}, abstract = {We propose a hybrid discrete-continuous algorithm for flight planning in free flight airspaces. In a first step, our DisCOptER method discrete-continuous optimization for enhanced resolution) computes a globally optimal approximate flight path on a discretization of the problem using the A* method. This route initializes a Newton method that converges rapidly to the smooth optimum in a second step. The correctness, accuracy, and complexity of the method are goverened by the choice of the crossover point that determines the coarseness of the discretization. We analyze the optimal choice of the crossover point and demonstrate the asymtotic superority of DisCOptER over a purely discrete approach.}, language = {en} } @misc{GoetschelWeiser, author = {G{\"o}tschel, Sebastian and Weiser, Martin}, title = {Lossy Compression for Large Scale PDE Problems}, issn = {1438-0064}, doi = {10.1101/506378}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73817}, abstract = {Solvers for partial differential equations (PDE) are one of the cornerstones of computational science. For large problems, they involve huge amounts of data that needs to be stored and transmitted on all levels of the memory hierarchy. Often, bandwidth is the limiting factor due to relatively small arithmetic intensity, and increasingly so due to the growing disparity between computing power and bandwidth. Consequently, data compression techniques have been investigated and tailored towards the specific requirements of PDE solvers during the last decades. This paper surveys data compression challenges and corresponding solution approaches for PDE problems, covering all levels of the memory hierarchy from mass storage up to main memory. Exemplarily, we illustrate concepts at particular methods, and give references to alternatives.}, language = {en} } @misc{GoetschelSchielaWeiser, author = {G{\"o}tschel, Sebastian and Schiela, Anton and Weiser, Martin}, title = {Kaskade 7 -- a Flexible Finite Element Toolbox}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74616}, abstract = {Kaskade 7 is a finite element toolbox for the solution of stationary or transient systems of partial differential equations, aimed at supporting application-oriented research in numerical analysis and scientific computing. The library is written in C++ and is based on the Dune interface. The code is independent of spatial dimension and works with different grid managers. An important feature is the mix-and-match approach to discretizing systems of PDEs with different ansatz and test spaces for all variables. We describe the mathematical concepts behind the library as well as its structure, illustrating its use at several examples on the way.}, language = {en} } @misc{WeiserFreytagErdmannetal., author = {Weiser, Martin and Freytag, Yvonne and Erdmann, Bodo and Hubig, Michael and Mall, Gita}, title = {Optimal Design of Experiments for Estimating the Time of Death in Forensic Medicine}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-67247}, abstract = {Estimation of time of death based on a single measurement of body core temperature is a standard procedure in forensic medicine. Mechanistic models using simulation of heat transport promise higher accuracy than established phenomenological models in particular in nonstandard situations, but involve many not exactly known physical parameters. Identifying both time of death and physical parameters from multiple temperature measurements is one possibility to reduce the uncertainty significantly. In this paper, we consider the inverse problem in a Bayesian setting and perform both local and sampling-based uncertainty quantification, where proper orthogonal decomposition is used as model reduction for fast solution of the forward model. Based on the local uncertainty quantification, optimal design of experiments is performed in order to minimize the uncertainty in the time of death estimate for a given number of measurements. For reasons of practicability, temperature acquisition points are selected from a set of candidates in different spatial and temporal locations. Applied to a real corpse model, a significant accuracy improvement is obtained already with a small number of measurements.}, language = {en} } @misc{FischerGoetschelWeiser, author = {Fischer, Lisa and G{\"o}tschel, Sebastian and Weiser, Martin}, title = {Lossy data compression reduces communication time in hybrid time-parallel integrators}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-63961}, abstract = {Parallel in time methods for solving initial value problems are a means to increase the parallelism of numerical simulations. Hybrid parareal schemes interleaving the parallel in time iteration with an iterative solution of the individual time steps are among the most efficient methods for general nonlinear problems. Despite the hiding of communication time behind computation, communication has in certain situations a significant impact on the total runtime. Here we present strict, yet no sharp, error bounds for hybrid parareal methods with inexact communication due to lossy data compression, and derive theoretical estimates of the impact of compression on parallel efficiency of the algorithms. These and some computational experiments suggest that compression is a viable method to make hybrid parareal schemes robust with respect to low bandwidth setups.}, language = {en} } @misc{WeiserErdmannSchenkletal.2017, author = {Weiser, Martin and Erdmann, Bodo and Schenkl, Sebastian and Muggenthaler, Holger and Hubig, Michael and Mall, Gita and Zachow, Stefan}, title = {Uncertainty in Temperature-Based Determination of Time of Death}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-63818}, year = {2017}, abstract = {Temperature-based estimation of time of death (ToD) can be per- formed either with the help of simple phenomenological models of corpse cooling or with detailed mechanistic (thermodynamic) heat transfer mod- els. The latter are much more complex, but allow a higher accuracy of ToD estimation as in principle all relevant cooling mechanisms can be taken into account. The potentially higher accuracy depends on the accuracy of tissue and environmental parameters as well as on the geometric resolution. We in- vestigate the impact of parameter variations and geometry representation on the estimated ToD based on a highly detailed 3D corpse model, that has been segmented and geometrically reconstructed from a computed to- mography (CT) data set, differentiating various organs and tissue types. From that we identify the most crucial parameters to measure or estimate, and obtain a local uncertainty quantifcation for the ToD.}, language = {en} } @misc{SagnolHegeWeiser, author = {Sagnol, Guillaume and Hege, Hans-Christian and Weiser, Martin}, title = {Using sparse kernels to design computer experiments with tunable precision}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59605}, abstract = {Statistical methods to design computer experiments usually rely on a Gaussian process (GP) surrogate model, and typically aim at selecting design points (combinations of algorithmic and model parameters) that minimize the average prediction variance, or maximize the prediction accuracy for the hyperparameters of the GP surrogate. In many applications, experiments have a tunable precision, in the sense that one software parameter controls the tradeoff between accuracy and computing time (e.g., mesh size in FEM simulations or number of Monte-Carlo samples). We formulate the problem of allocating a budget of computing time over a finite set of candidate points for the goals mentioned above. This is a continuous optimization problem, which is moreover convex whenever the tradeoff function accuracy vs. computing time is concave. On the other hand, using non-concave weight functions can help to identify sparse designs. In addition, using sparse kernel approximations drastically reduce the cost per iteration of the multiplicative weights updates that can be used to solve this problem.}, language = {en} } @misc{GoetschelMaierhoferMuelleretal., author = {G{\"o}tschel, Sebastian and Maierhofer, Christiane and M{\"u}ller, Jan P. and Rothbart, Nick and Weiser, Martin}, title = {Quantitative Defect Reconstruction in Active Thermography for Fiber-Reinforced Composites}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-58374}, abstract = {Carbon-fiber reinforced composites are becoming more and more important in the production of light-weight structures, e.g., in the automotive and aerospace industry. Thermography is often used for non-destructive testing of these products, especially to detect delaminations between different layers of the composite. In this presentation, we aim at methods for defect reconstruction from thermographic measurements of such carbon-fiber reinforced composites. The reconstruction results shall not only allow to locate defects, but also give a quantitative characterization of the defect properties. We discuss the simulation of the measurement process using finite element methods, as well as the experimental validation on flat bottom holes. Especially in pulse thermography, thin boundary layers with steep temperature gradients occurring at the heated surface need to be resolved. Here we use the combination of a 1D analytical solution combined with numerical solution of the remaining defect equation. We use the simulations to identify material parameters from the measurements. Finally, fast heuristics for reconstructing defect geometries are applied to the acquired data, and compared for their accuracy and utility in detecting different defects like back surface defects or delaminations.}, language = {en} } @misc{GoetschelHoehneKolkoorietal., author = {G{\"o}tschel, Sebastian and H{\"o}hne, Christian and Kolkoori, Sanjeevareddy and Mitzscherling, Steffen and Prager, Jens and Weiser, Martin}, title = {Ray Tracing Boundary Value Problems: Simulation and SAFT Reconstruction for Ultrasonic Testing}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-58386}, abstract = {The application of advanced imaging techniques for the ultrasonic inspection of inhomogeneous anisotropic materials like austenitic and dissimilar welds requires information about acoustic wave propagation through the material, in particular travel times between two points in the material. Forward ray tracing is a popular approach to determine traveling paths and arrival times but is ill suited for inverse problems since a large number of rays have to be computed in order to arrive at prescribed end points. In this contribution we discuss boundary value problems for acoustic rays, where the ray path between two given points is determined by solving the eikonal equation. The implementation of such a two point boundary value ray tracer for sound field simulations through an austenitic weld is described and its efficiency as well as the obtained results are compared to those of a forward ray tracer. The results are validated by comparison with experimental results and commercially available UT simulation tools. As an application, we discuss an implementation of the method for SAFT (Synthetic Aperture Focusing Technique) reconstruction. The ray tracer calculates the required travel time through the anisotropic columnar grain structure of the austenitic weld. There, the formulation of ray tracing as a boundary value problem allows a straightforward derivation of the ray path from a given transducer position to any pixel in the reconstruction area and reduces the computational cost considerably.}, language = {en} } @misc{WeiserGhosh, author = {Weiser, Martin and Ghosh, Sunayana}, title = {Theoretically optimal inexact SDC methods}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53140}, abstract = {In several inital value problems with particularly expensive right hand side computation, there is a trade-off between accuracy and computational effort in evaluating the right hand sides. We consider inexact spectral deferred correction (SDC) methods for solving such non-stiff initial value problems. SDC methods are interpreted as fixed point iterations and, due to their corrective iterative nature, allow to exploit the accuracy-work-tradeoff for a reduction of the total computational effort. On one hand we derive an error model bounding the total error in terms of the right hand side evaluation errors. On the other hand, we define work models describing the computational effort in terms of the evaluation accuracy. Combining both, a theoretically optimal tolerance selection is worked out by minimizing the total work subject to achieving the requested tolerance.}, language = {en} } @misc{LubkollSchielaWeiser, author = {Lubkoll, Lars and Schiela, Anton and Weiser, Martin}, title = {An affine covariant composite step method for optimization with PDEs as equality constraints}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53954}, abstract = {We propose a composite step method, designed for equality constrained optimization with partial differential equations. Focus is laid on the construction of a globalization scheme, which is based on cubic regularization of the objective and an affine covariant damped Newton method for feasibility. We show finite termination of the inner loop and fast local convergence of the algorithm. We discuss preconditioning strategies for the iterative solution of the arising linear systems with projected conjugate gradient. Numerical results are shown for optimal control problems subject to a nonlinear heat equation and subject to nonlinear elastic equations arising from an implant design problem in craniofacial surgery.}, language = {en} } @misc{WeiserScacchi, author = {Weiser, Martin and Scacchi, Simone}, title = {Spectral Deferred Correction methods for adaptive electro-mechanical coupling in cardiac simulation}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-50695}, abstract = {We investigate spectral deferred correction (SDC) methods for time stepping and their interplay with spatio-temporal adaptivity, applied to the solution of the cardiac electro-mechanical coupling model. This model consists of the Monodomain equations, a reaction-diffusion system modeling the cardiac bioelectrical activity, coupled with a quasi-static mechanical model describing the contraction and relaxation of the cardiac muscle. The numerical approximation of the cardiac electro-mechanical coupling is a challenging multiphysics problem, because it exhibits very different spatial and temporal scales. Therefore, spatio-temporal adaptivity is a promising approach to reduce the computational complexity. SDC methods are simple iterative methods for solving collocation systems. We exploit their flexibility for combining them in various ways with spatio-temporal adaptivity. The accuracy and computational complexity of the resulting methods are studied on some numerical examples.}, language = {en} } @misc{GoetschelvonTycowiczPolthieretal., author = {G{\"o}tschel, Sebastian and von Tycowicz, Christoph and Polthier, Konrad and Weiser, Martin}, title = {Reducing Memory Requirements in Scientific Computing and Optimal Control}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42695}, abstract = {In high accuracy numerical simulations and optimal control of time-dependent processes, often both many time steps and fine spatial discretizations are needed. Adjoint gradient computation, or post-processing of simulation results, requires the storage of the solution trajectories over the whole time, if necessary together with the adaptively refined spatial grids. In this paper we discuss various techniques to reduce the memory requirements, focusing first on the storage of the solution data, which typically are double precision floating point values. We highlight advantages and disadvantages of the different approaches. Moreover, we present an algorithm for the efficient storage of adaptively refined, hierarchic grids, and the integration with the compressed storage of solution data.}, language = {en} } @misc{GoetschelNagaiahKunischetal., author = {G{\"o}tschel, Sebastian and Nagaiah, Chamakuri and Kunisch, Karl and Weiser, Martin}, title = {Lossy Compression in Optimal Control of Cardiac Defibrillation}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-18566}, abstract = {This paper presents efficient computational techniques for solving an optimization problem in cardiac defibrillation governed by the monodomain equations. Time-dependent electrical currents injected at different spatial positions act as the control. Inexact Newton-CG methods are used, with reduced gradient computation by adjoint solves. In order to reduce the computational complexity, adaptive mesh refinement for state and adjoint equations is performed. To reduce the high storage and bandwidth demand imposed by adjoint gradient and Hessian-vector evaluations, a lossy compression technique for storing trajectory data is applied. An adaptive choice of quantization tolerance based on error estimates is developed in order to ensure convergence. The efficiency of the proposed approach is demonstrated on numerical examples.}, language = {en} } @misc{GoetschelWeiser, author = {G{\"o}tschel, Sebastian and Weiser, Martin}, title = {Lossy Compression for PDE-constrained Optimization: Adaptive Error Control}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-18575}, abstract = {For the solution of optimal control problems governed by nonlinear parabolic PDEs, methods working on the reduced objective functional are often employed to avoid a full spatio-temporal discretization of the problem. The evaluation of the reduced gradient requires one solve of the state equation forward in time, and one backward solve of the ad-joint equation. The state enters into the adjoint equation, requiring the storage of a full 4D data set. If Newton-CG methods are used, two additional trajectories have to be stored. To get numerical results which are accurate enough, in many case very fine discretizations in time and space are necessary, which leads to a significant amount of data to be stored and transmitted to mass storage. Lossy compression methods were developed to overcome the storage problem by reducing the accuracy of the stored trajectories. The inexact data induces errors in the reduced gradient and reduced Hessian. In this paper, we analyze the influence of such a lossy trajectory compression method on Newton-CG methods for optimal control of parabolic PDEs and design an adaptive strategy for choosing appropriate quantization tolerances.}, language = {en} } @misc{Weiser, author = {Weiser, Martin}, title = {Faster SDC convergence on non-equidistant grids by DIRK sweeps}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-18662}, abstract = {Spectral deferred correction methods for solving stiff ODEs are known to converge rapidly towards the collocation limit solution on equidistant grids, but show a much less favourable contraction on non-equidistant grids such as Radau-IIa points. We interprete SDC methods as fixed point iterations for the collocation system and propose new DIRK-type sweeps for stiff problems based on purely linear algebraic considerations. Good convergence is recovered also on non-equidistant grids. The properties of different variants are explored on a couple of numerical examples.}, language = {en} } @misc{MoualeuNgangueWeiserEhrigetal., author = {Moualeu-Ngangue, Dany Pascal and Weiser, Martin and Ehrig, Rainald and Deuflhard, Peter}, title = {Optimal control for a tuberculosis model with undetected cases in Cameroon}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43142}, abstract = {This paper considers the optimal control of tuberculosis through education, diagnosis campaign and chemoprophylaxis of latently infected. A mathematical model which includes important components such as undiagnosed infectious, diagnosed infectious, latently infected and lost-sight infectious is formulated. The model combines a frequency dependent and a density dependent force of infection for TB transmission. Through optimal control theory and numerical simulations, a cost-effective balance of two different intervention methods is obtained. Seeking to minimize the amount of money the government spends when tuberculosis remain endemic in the Cameroonian population, Pontryagin's maximum principle is used to characterize the optimal control. The optimality system is derived and solved numerically using the forward-backward sweep method (FBSM). Results provide a framework for designing cost-effective strategies for diseases with multiple intervention methods. It comes out that combining chemoprophylaxis and education, the burden of TB can be reduced by 80 \% in 10 years}, language = {en} } @misc{GoetschelWeiserMaierhoferetal., author = {G{\"o}tschel, Sebastian and Weiser, Martin and Maierhofer, Christiane and Richter, Regina}, title = {Data Enhancement for Active Thermography}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-15243}, abstract = {Pulse thermography is a non-destructive testing method based on infrared imaging of transient thermal patterns. Heating the surface of the structure under test for a short period of time generates a non-stationary temperature distribution and thus a thermal contrast between the defect and the sound material. Due to measurement noise, preprocessing of the experimental data is necessary, before reconstruction algorithms can be applied. We propose a decomposition of the measured temperature into Green's function solutions to eliminate noise.}, language = {en} } @misc{LubkollSchielaWeiser, author = {Lubkoll, Lars and Schiela, Anton and Weiser, Martin}, title = {An optimal control problem in polyconvex hyperelasticity}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14745}, number = {12-08}, abstract = {We consider a shape implant design problem that arises in the context of facial surgery. We introduce a reformulation as an optimal control problem, where the control acts as a boundary force. The state is modelled as a minimizer of a polyconvex hyperelastic energy functional. We show existence of optimal solutions and derive - on a formal level - first order optimality conditions. Finally, preliminary numerical results are presented.}, language = {en} }