@misc{KaplanLauferProhaskaetal., author = {Kaplan, Bernhard and Laufer, Jan and Prohaska, Steffen and Buchmann, Jens}, title = {Monte-Carlo-based inversion scheme for 3D quantitative photoacoustic tomography}, issn = {1438-0064}, doi = {10.1117/12.2251945}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-62318}, abstract = {The goal of quantitative photoacoustic tomography (qPAT) is to recover maps of the chromophore distributions from multiwavelength images of the initial pressure. Model-based inversions that incorporate the physical processes underlying the photoacoustic (PA) signal generation represent a promising approach. Monte-Carlo models of the light transport are computationally expensive, but provide accurate fluence distributions predictions, especially in the ballistic and quasi-ballistic regimes. Here, we focus on the inverse problem of 3D qPAT of blood oxygenation and investigate the application of the Monte-Carlo method in a model-based inversion scheme. A forward model of the light transport based on the MCX simulator and acoustic propagation modeled by the k-Wave toolbox was used to generate a PA image data set acquired in a tissue phantom over a planar detection geometry. The combination of the optical and acoustic models is shown to account for limited-view artifacts. In addition, the errors in the fluence due to, for example, partial volume artifacts and absorbers immediately adjacent to the region of interest are investigated. To accomplish large-scale inversions in 3D, the number of degrees of freedom is reduced by applying image segmentation to the initial pressure distribution to extract a limited number of regions with homogeneous optical parameters. The absorber concentration in the tissue phantom was estimated using a coordinate descent parameter search based on the comparison between measured and modeled PA spectra. The estimated relative concentrations using this approach lie within 5 \% compared to the known concentrations. Finally, we discuss the feasibility of this approach to recover the blood oxygenation from experimental data.}, language = {en} } @misc{TateiwaShinanoYasudaetal., author = {Tateiwa, Nariaki and Shinano, Yuji and Yasuda, Masaya and Kaji, Shizuo and Yamamura, Keiichiro and Fujisawa, Katsuki}, title = {Massively parallel sharing lattice basis reduction}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-85209}, abstract = {For cryptanalysis in lattice-based schemes, the performance evaluation of lattice basis reduction using high-performance computers is becoming increasingly important for the determination of the security level. We propose a distributed and asynchronous parallel reduction algorithm based on randomization and DeepBKZ, which is an improved variant of the block Korkine-Zolotarev (BKZ) reduction algorithm. Randomized copies of a lattice basis are distributed to up to 103,680 cores and independently reduced in parallel, while some basis vectors are shared asynchronously among all processes via MPI. There is a trade-off between randomization and information sharing; if a substantial amount of information is shared, all processes will work on the same problem, thereby diminishing the benefit of parallelization. To monitor this balance between randomness and sharing, we propose a metric to quantify the variety of lattice bases. We empirically find an optimal parameter of sharing for high-dimensional lattices. We demonstrate the efficacy of our proposed parallel algorithm and implementation with respect to both performance and scalability through our experiments.}, language = {en} } @misc{ClasenPaarProhaska, author = {Clasen, Malte and Paar, Philip and Prohaska, Steffen}, title = {Level of Detail for Trees Using Clustered Ellipsoids}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14251}, number = {11-41}, abstract = {We present a level of detail method for trees based on ellipsoids and lines. We leverage the Expectation Maximization algorithm with a Gaussian Mixture Model to create a hierarchy of high-quality leaf clusterings, while the branches are simplified using agglomerative bottom-up clustering to preserve the connectivity. The simplification runs in a preprocessing step and requires no human interaction. For a fly by over and through a scene of 10k trees, our method renders on average at 40 ms/frame, up to 6 times faster than billboard clouds with comparable artifacts.}, language = {en} } @misc{Krause, type = {Master Thesis}, author = {Krause, Jan}, title = {Investigation of Options to Handle 3D MRI Data via Convolutional Neural Networks Application in Knee Osteoarthritits Classification}, pages = {127}, language = {en} } @misc{RiberaBorrellQuerRichteretal., author = {Ribera Borrell, Enric and Quer, Jannes and Richter, Lorenz and Sch{\"u}tte, Christof}, title = {Improving control based importance sampling strategies for metastable diffusions via adapted metadynamics}, issn = {1438-0064}, abstract = {Sampling rare events in metastable dynamical systems is often a computationally expensive task and one needs to resort to enhanced sampling methods such as importance sampling. Since we can formulate the problem of finding optimal importance sampling controls as a stochastic optimization problem, this then brings additional numerical challenges and the convergence of corresponding algorithms might as well suffer from metastabilty. In this article we address this issue by combining systematic control approaches with the heuristic adaptive metadynamics method. Crucially, we approximate the importance sampling control by a neural network, which makes the algorithm in principle feasible for high dimensional applications. We can numerically demonstrate in relevant metastable problems that our algorithm is more effective than previous attempts and that only the combination of the two approaches leads to a satisfying convergence and therefore to an efficient sampling in certain metastable settings.}, language = {en} } @misc{MasingLindnerEbert, author = {Masing, Berenike and Lindner, Niels and Ebert, Patricia}, title = {Forward and Line-Based Cycle Bases for Periodic Timetabling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-89731}, abstract = {The optimization of periodic timetables is an indispensable planning task in public transport. Although the periodic event scheduling problem (PESP) provides an elegant mathematical formulation of the periodic timetabling problem that led to many insights for primal heuristics, it is notoriously hard to solve to optimality. One reason is that for the standard mixed-integer linear programming formulations, linear programming relaxations are weak and the integer variables are of pure technical nature and in general do not correlate with the objective value. While the first problem has been addressed by developing several families of cutting planes, we focus on the second aspect. We discuss integral forward cycle bases as a concept to compute improved dual bounds for PESP instances. To this end, we develop the theory of forward cycle bases on general digraphs. Specifically for the application of timetabling, we devise a generic procedure to construct line-based event-activity networks, and give a simple recipe for an integral forward cycle basis on such networks. Finally, we analyze the 16 railway instances of the benchmark library PESPlib, match them to the line-based structure and use forward cycle bases to compute better dual bounds for 14 out of the 16 instances.}, language = {en} } @misc{ShinanoHeinzVigerskeetal., author = {Shinano, Yuji and Heinz, Stefan and Vigerske, Stefan and Winkler, Michael}, title = {FiberSCIP - A shared memory parallelization of SCIP}, issn = {1438-0064}, doi = {10.1287/ijoc.2017.0762}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42595}, abstract = {Recently, parallel computing environments have become significantly popular. In order to obtain the benefit of using parallel computing environments, we have to deploy our programs for these effectively. This paper focuses on a parallelization of SCIP (Solving Constraint Integer Programs), which is a MIP solver and constraint integer programming framework available in source code. There is a parallel extension of SCIP named ParaSCIP, which parallelizes SCIP on massively parallel distributed memory computing environments. This paper describes FiberSCIP, which is yet another parallel extension of SCIP to utilize multi-threaded parallel computation on shared memory computing environments, and has the following contributions: First, the basic concept of having two parallel extensions and the relationship between them and the parallelization framework provided by UG (Ubiquity Generator) is presented, including an implementation of deterministic parallelization. Second, the difficulties to achieve a good performance that utilizes all resources on an actual computing environment and the difficulties of performance evaluation of the parallel solvers are discussed. Third, a way to evaluate the performance of new algorithms and parameter settings of the parallel extensions is presented. Finally, current performance of FiberSCIP for solving mixed-integer linear programs (MIPs) and mixed-integer non-linear programs (MINLPs) in parallel is demonstrated.}, language = {en} } @misc{EhlkeRammLameckeretal., author = {Ehlke, Moritz and Ramm, Heiko and Lamecker, Hans and Hege, Hans-Christian and Zachow, Stefan}, title = {Fast Generation of Virtual X-ray Images from Deformable Tetrahedral Meshes}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-41896}, abstract = {We propose a novel GPU-based approach to render virtual X-ray projections of deformable tetrahedral meshes. These meshes represent the shape and the internal density distribution of a particular anatomical structure and are derived from statistical shape and intensity models (SSIMs). We apply our method to improve the geometric reconstruction of 3D anatomy (e.g.\ pelvic bone) from 2D X-ray images. For that purpose, shape and density of a tetrahedral mesh are varied and virtual X-ray projections are generated within an optimization process until the similarity between the computed virtual X-ray and the respective anatomy depicted in a given clinical X-ray is maximized. The OpenGL implementation presented in this work deforms and projects tetrahedral meshes of high resolution (200.000+ tetrahedra) at interactive rates. It generates virtual X-rays that accurately depict the density distribution of an anatomy of interest. Compared to existing methods that accumulate X-ray attenuation in deformable meshes, our novel approach significantly boosts the deformation/projection performance. The proposed projection algorithm scales better with respect to mesh resolution and complexity of the density distribution, and the combined deformation and projection on the GPU scales better with respect to the number of deformation parameters. The gain in performance allows for a larger number of cycles in the optimization process. Consequently, it reduces the risk of being stuck in a local optimum. We believe that our approach contributes in orthopedic surgery, where 3D anatomy information needs to be extracted from 2D X-rays to support surgeons in better planning joint replacements.}, language = {en} } @misc{OrlowskiWernerWessaely, author = {Orlowski, Sebastian and Werner, Axel and Wess{\"a}ly, Roland}, title = {Estimating trenching costs in FTTx network planning}, issn = {1438-0064}, doi = {10.1007/978-3-642-29210-1_15}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14884}, abstract = {In this paper we assess to which extent trenching costs of an FTTx network are unavoidable, even if technical side constraints are neglected. For that purpose we present an extended Steiner tree model. Using a variety of realistic problem instances we demonstrate that the total trenching cost can only be reduced by about 5 percent in realistic scenarios. This work has been funded by BMBF (German Federal Ministry of Education and Research) within the program "KMU-innovativ".}, language = {en} } @misc{Paskin, type = {Master Thesis}, author = {Paskin, Martha}, title = {Estimating 3D Shape of the Head Skeleton of Basking Sharks Using Annotated Landmarks on a 2D Image}, abstract = {Basking sharks are thought to be one of the most efficient filter-feeding fish in terms of the throughput of water filtered through their gills. Details about the underlying morphology of their branchial region have not been studied due to various challenges in acquiring real-world data. The present thesis aims to facilitate this, by developing a mathematical shape model which constructs the 3D structure of the head skeleton of a basking shark using annotated landmarks on a single 2D image. This is an ill-posed problem as estimating the depth of a 3D object from a single 2D view is, in general, not possible. To reduce this ambiguity, we create a set of pre-defined training shapes in 3D from CT scans of basking sharks. First, the damaged structures of the sharks in the scans are corrected via solving a set of optimization problems, before using them as accurate 3D representations of the object. Then, two approaches are employed for the 2D-to-3D shape fitting problem-an Active Shape Model approach and a Kendall's Shape Space approach. The former represents a shape as a point on a high-dimensional Euclidean space, whereas the latter represents a shape as an equivalence class of points in this Euclidean space. Kendall's shape space approach is a novel technique that has not yet been applied in this context, and a comprehensive comparison of the two approaches suggests this approach to be superior for the problem at hand. This can be credited to an improved interpolation of the training shapes.}, language = {en} }