@inproceedings{SchnauberSchallBounouaretal.2019, author = {Schnauber, Peter and Schall, Johannes and Bounouar, Samir and H{\"o}hne, Theresa and Park, Suk-In and Ryu, Geun-Hwan and Heindel, Tobias and Burger, Sven and Song, Jin-Dong and Rodt, Sven and Reitzenstein, Stephan}, title = {Deterministic integration of quantum dots into on-chip multi-mode interference couplers via in-situ electron beam lithography}, booktitle = {Conference on Lasers and Electro-Optics Europe and European Quantum Electronics Conference (CLEO/EQEC)}, doi = {10.1109/CLEOE-EQEC.2019.8872583}, pages = {EB2.2}, year = {2019}, language = {en} } @article{Pulaj2019, author = {Pulaj, Jonad}, title = {Cutting planes for families implying Frankl's conjecture}, journal = {Mathematics of Computation}, doi = {10.1090/mcom/3461}, year = {2019}, abstract = {We find previously unknown families of sets which ensure Frankl's conjecture holds for all families that contain them using an algorithmic framework. The conjecture states that for any nonempty finite union-closed (UC) family there exists an element of the ground set in at least half the sets of the considered UC family. Poonen's Theorem characterizes the existence of weights which determine whether a given UC family implies the conjecture for all UC families which contain it. We design a cutting-plane method that computes the explicit weights which satisfy the existence conditions of Poonen's Theorem. This method enables us to answer several open questions regarding structural properties of UC families, including the construction of a counterexample to a conjecture of Morris from 2006.}, language = {en} } @misc{SahinAhmadiBorndoerferetal.2019, author = {Sahin, Guvenc and Ahmadi, Amin and Bornd{\"o}rfer, Ralf and Schlechte, Thomas}, title = {Multi-Period Line Planning with Resource Transfers}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74662}, year = {2019}, abstract = {Urban transportation systems are subject to a high level of variation and fluctuation in demand over the day. When this variation and fluctuation are observed in both time and space, it is crucial to develop line plans that are responsive to demand. A multi-period line planning approach that considers a changing demand during the planning horizon is proposed. If such systems are also subject to limitations of resources, a dynamic transfer of resources from one line to another throughout the planning horizon should also be considered. A mathematical modelling framework is developed to solve the line planning problem with transfer of resources during a finite length planning horizon of multiple periods. We analyze whether or not multi-period solutions outperform single period solutions in terms of feasibility and relevant costs. The importance of demand variation on multi-period solutions is investigated. We evaluate the impact of resource transfer constraints on the effectiveness of solutions. We also study the effect of line type designs and question the choice of period lengths along with the problem parameters that are significant for and sensitive to the optimality of solutions.}, language = {en} } @article{ŻołnaczMusiałSrockaetal.2019, author = {Żołnacz, Kinga and Musiał, Anna and Srocka, Nicole and Große, Jan and Schl{\"o}singer, Maximilian and Schneider, Philipp-Immanuel and Kravets, Oleh and Mikulicz, Monika and Olszewski, Jacek and Poturaj, Krzysztof and W{\´o}jcik, Grzegorz and Mergo, Paweł and Dybka, Kamil and Dyrkacz, Mariusz and Dłubek, Michał and Rodt, Sven and Burger, Sven and Zschiedrich, Lin and Sęk, Grzegorz and Reitzenstein, Stephan and Urbańczyk, Wacław}, title = {Method for direct coupling of a semiconductor quantum dot to an optical fiber for single-photon source applications}, volume = {27}, journal = {Opt. Express}, doi = {10.1364/OE.27.026772}, pages = {26772}, year = {2019}, language = {en} } @misc{GotzesHoppmann2019, author = {Gotzes, Uwe and Hoppmann, Kai}, title = {Bounds for the final ranks during a round robin tournament}, journal = {Operational Research - An International Journal (ORIJ)}, doi = {https://doi.org/10.1007/s12351-020-00546-w}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74638}, year = {2019}, abstract = {This article answers two kinds of questions regarding the Bundesliga which is Germany's primary football (soccer) competition having the highest average stadium attendance worldwide. First "At any point of the season, what final rank will a certain team definitely reach?" and second "At any point of the season, what final rank can a certain team at most reach?". Although we focus especially on the Bundesliga, the models that we use to answer the two questions can easily be adopted to league systems that are similar to that of the Bundesliga.}, language = {en} } @misc{NavaYazdaniHegevonTycowicz2019, author = {Nava-Yazdani, Esfandiar and Hege, Hans-Christian and von Tycowicz, Christoph}, title = {A Geodesic Mixed Effects Model in Kendall's Shape Space}, journal = {Proc. 7th MICCAI workshop on Mathematical Foundations of Computational Anatomy (MFCA)}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74621}, year = {2019}, abstract = {In many applications, geodesic hierarchical models are adequate for the study of temporal observations. We employ such a model derived for manifold-valued data to Kendall's shape space. In particular, instead of the Sasaki metric, we adapt a functional-based metric, which increases the computational efficiency and does not require the implementation of the curvature tensor. We propose the corresponding variational time discretization of geodesics and apply the approach for the estimation of group trends and statistical testing of 3D shapes derived from an open access longitudinal imaging study on osteoarthritis.}, language = {en} } @misc{GoetschelSchielaWeiser2019, author = {G{\"o}tschel, Sebastian and Schiela, Anton and Weiser, Martin}, title = {Kaskade 7 -- a Flexible Finite Element Toolbox}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74616}, year = {2019}, abstract = {Kaskade 7 is a finite element toolbox for the solution of stationary or transient systems of partial differential equations, aimed at supporting application-oriented research in numerical analysis and scientific computing. The library is written in C++ and is based on the Dune interface. The code is independent of spatial dimension and works with different grid managers. An important feature is the mix-and-match approach to discretizing systems of PDEs with different ansatz and test spaces for all variables. We describe the mathematical concepts behind the library as well as its structure, illustrating its use at several examples on the way.}, language = {en} } @article{FroehlerElberfeldMoelleretal.2019, author = {Fr{\"o}hler, Bernhard and Elberfeld, Tim and M{\"o}ller, Torsten and Hege, Hans-Christian and Weissenb{\"o}ck, Johannes and De Beenhouwer, Jan and Sijbers, Jan and Kastner, Johann and Heinzl, Christoph}, title = {A Visual Tool for the Analysis of Algorithms for Tomographic Fiber Reconstruction in Materials Science}, volume = {38}, journal = {Computer Graphics Forum}, number = {3}, doi = {10.1111/cgf.13688}, pages = {273 -- 283}, year = {2019}, abstract = {We present visual analysis methods for the evaluation of tomographic fiber reconstruction algorithms by means of analysis, visual debugging and comparison of reconstructed fibers in materials science. The methods are integrated in a tool (FIAKER) that supports the entire workflow. It enables the analysis of various fiber reconstruction algorithms, of differently parameterized fiber reconstruction algorithms and of individual steps in iterative fiber reconstruction algorithms. Insight into the performance of fiber reconstruction algorithms is obtained by a list-based ranking interface. A 3D view offers interactive visualization techniques to gain deeper insight, e.g., into the aggregated quality of the examined fiber reconstruction algorithms and parameterizations. The tool was designed in close collaboration with researchers who work with fiber-reinforced polymers on a daily basis and develop algorithms for tomographic reconstruction and characterization of such materials. We evaluate the tool using synthetic datasets as well as tomograms of real materials. Five case studies certify the usefulness of the tool, showing that it significantly accelerates the analysis and provides valuable insights that make it possible to improve the fiber reconstruction algorithms. The main contribution of the paper is the well-considered combination of methods and their seamless integration into a visual tool that supports the entire workflow. Further findings result from the analysis of (dis-)similarity measures for fibers as well as from the discussion of design decisions. It is also shown that the generality of the analytical methods allows a wider range of applications, such as the application in pore space analysis.}, language = {en} } @article{GoubergritsHellmeierBrueningetal.2019, author = {Goubergrits, Leonid and Hellmeier, Florian and Bruening, Jan Joris and Spuler, Andreas and Hege, Hans-Christian and Voss, Samuel and Janiga, G{\´a}bor and Saalfeld, Sylvia and Beuing, Oliver and Berg, Philipp}, title = {Multiple Aneurysms AnaTomy CHallenge 2018 (MATCH): Uncertainty Quantification of Geometric Rupture Risk Parameters}, volume = {18}, journal = {BioMedical Engineering OnLine}, number = {35}, doi = {10.1186/s12938-019-0657-y}, year = {2019}, abstract = {Background Geometric parameters have been proposed for prediction of cerebral aneurysm rupture risk. Predicting the rupture risk for incidentally detected unruptured aneurysms could help clinicians in their treatment decision. However, assessment of geometric parameters depends on several factors, including the spatial resolution of the imaging modality used and the chosen reconstruction procedure. The aim of this study was to investigate the uncertainty of a variety of previously proposed geometric parameters for rupture risk assessment, caused by variability of reconstruction procedures. Materials 26 research groups provided segmentations and surface reconstructions of five cerebral aneurysms as part of the Multiple Aneurysms AnaTomy CHallenge (MATCH) 2018. 40 dimensional and non-dimensional geometric parameters, describing aneurysm size, neck size, and irregularity of aneurysm shape, were computed. The medians as well as the absolute and relative uncertainties of the parameters were calculated. Additionally, linear regression analysis was performed on the absolute uncertainties and the median parameter values. Results A large variability of relative uncertainties in the range between 3.9 and 179.8\% was found. Linear regression analysis indicates that some parameters capture similar geometric aspects. The lowest uncertainties < 6\% were found for the non-dimensional parameters isoperimetric ratio, convexity ratio, and ellipticity index. Uncertainty of 2D and 3D size parameters was significantly higher than uncertainty of 1D parameters. The most extreme uncertainties > 80\% were found for some curvature parameters. Conclusions Uncertainty analysis is essential on the road to clinical translation and use of rupture risk prediction models. Uncertainty quantification of geometric rupture risk parameters provided by this study may help support development of future rupture risk prediction models.}, language = {en} } @misc{AmbellanZachowvonTycowicz2019, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {An as-invariant-as-possible GL+(3)-based Statistical Shape Model}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74566}, year = {2019}, abstract = {We describe a novel nonlinear statistical shape model basedon differential coordinates viewed as elements of GL+(3). We adopt an as-invariant-as possible framework comprising a bi-invariant Lie group mean and a tangent principal component analysis based on a unique GL+(3)-left-invariant, O(3)-right-invariant metric. Contrary to earlier work that equips the coordinates with a specifically constructed group structure, our method employs the inherent geometric structure of the group-valued data and therefore features an improved statistical power in identifying shape differences. We demonstrate this in experiments on two anatomical datasets including comparison to the standard Euclidean as well as recent state-of-the-art nonlinear approaches to statistical shape modeling.}, language = {en} } @article{LangFischerWeberetal.2019, author = {Lang, Annemarie and Fischer, Lisa and Weber, Marie-Christin and Gaber, Timo and Ehrig, Rainald and R{\"o}blitz, Susanna and Buttgereit, Frank}, title = {Combining in vitro simulation and in silico modelling towards a sophisticated human osteoarthritis model}, volume = {27}, journal = {Osteoarthritis and Cartilage}, doi = {10.1016/j.joca.2019.02.277}, pages = {S183}, year = {2019}, abstract = {Our project aimed at building an in silico model based on our recently developed in vitro osteoarthritis (OA) model seeking for refinement of the model to enhance validity and translatability towards the more sophisticated simulation of OA. In detail, the previously 3D in vitro model is based on 3D chondrogenic constructs generated solely from human bone marrow derived mesenchymal stromal cells (hMSCs). Besides studying the normal state of the model over 3 weeks, the in vitro model was treated with interleukin-1β (IL-1β) and tumor necrosis factor alpha (TNFα) to mimic an OA-like environment.}, language = {en} } @incollection{Baum2019, author = {Baum, Daniel}, title = {An Evaluation of Color Maps for Visual Data Exploration}, booktitle = {Science in Color: Visualizing Achromatic Knowledge}, editor = {Bock von W{\"u}lfingen, Bettina}, publisher = {De Gruyter}, address = {Berlin}, pages = {147 -- 161}, year = {2019}, language = {en} } @misc{AmbellanZachowvonTycowicz2019, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {A Surface-Theoretic Approach for Statistical Shape Modeling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74497}, year = {2019}, abstract = {We present a novel approach for nonlinear statistical shape modeling that is invariant under Euclidean motion and thus alignment-free. By analyzing metric distortion and curvature of shapes as elements of Lie groups in a consistent Riemannian setting, we construct a framework that reliably handles large deformations. Due to the explicit character of Lie group operations, our non-Euclidean method is very efficient allowing for fast and numerically robust processing. This facilitates Riemannian analysis of large shape populations accessible through longitudinal and multi-site imaging studies providing increased statistical power. We evaluate the performance of our model w.r.t. shape-based classification of pathological malformations of the human knee and show that it outperforms the standard Euclidean as well as a recent nonlinear approach especially in presence of sparse training data. To provide insight into the model's ability of capturing natural biological shape variability, we carry out an analysis of specificity and generalization ability.}, language = {en} } @inproceedings{AmbellanZachowvonTycowicz2019, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {An as-invariant-as-possible GL+(3)-based Statistical Shape Model}, volume = {11846}, booktitle = {Proc. 7th MICCAI workshop on Mathematical Foundations of Computational Anatomy (MFCA)}, publisher = {Springer}, doi = {10.1007/978-3-030-33226-6_23}, pages = {219 -- 228}, year = {2019}, abstract = {We describe a novel nonlinear statistical shape model basedon differential coordinates viewed as elements of GL+(3). We adopt an as-invariant-as possible framework comprising a bi-invariant Lie group mean and a tangent principal component analysis based on a unique GL+(3)-left-invariant, O(3)-right-invariant metric. Contrary to earlier work that equips the coordinates with a specifically constructed group structure, our method employs the inherent geometric structure of the group-valued data and therefore features an improved statistical power in identifying shape differences. We demonstrate this in experiments on two anatomical datasets including comparison to the standard Euclidean as well as recent state-of-the-art nonlinear approaches to statistical shape modeling.}, language = {en} } @phdthesis{Mangalgiri2019, author = {Mangalgiri, Gauri}, title = {Development of Titanium Dioxide Metasurfaces and Nanosoupbowls for Optically Enhancing Silicon Photocathodes}, doi = {10.18452/20160}, year = {2019}, language = {en} } @inproceedings{HammerschmidtZschiedrichSchneideretal.2019, author = {Hammerschmidt, Martin and Zschiedrich, Lin and Schneider, Philipp-Immanuel and Binkowski, Felix and Burger, Sven}, title = {Numerical optimization of resonant photonic devices}, volume = {11057}, booktitle = {Proc. SPIE}, doi = {10.1117/12.2534348}, pages = {1105702}, year = {2019}, language = {en} } @misc{OezelKulkarniHasanetal.2019, author = {{\"O}zel, M. Neset and Kulkarni, Abhishek and Hasan, Amr and Brummer, Josephine and Moldenhauer, Marian and Daumann, Ilsa-Maria and Wolfenberg, Heike and Dercksen, Vincent J. and Kiral, F. Ridvan and Weiser, Martin and Prohaska, Steffen and von Kleist, Max and Hiesinger, Peter Robin}, title = {Serial synapse formation through filopodial competition for synaptic seeding factors}, issn = {1438-0064}, doi = {10.1016/j.devcel.2019.06.014}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74397}, year = {2019}, abstract = {Following axon pathfinding, growth cones transition from stochastic filopodial exploration to the formation of a limited number of synapses. How the interplay of filopodia and synapse assembly ensures robust connectivity in the brain has remained a challenging problem. Here, we developed a new 4D analysis method for filopodial dynamics and a data-driven computational model of synapse formation for R7 photoreceptor axons in developing Drosophila brains. Our live data support a 'serial synapse formation' model, where at any time point only a single 'synaptogenic' filopodium suppresses the synaptic competence of other filopodia through competition for synaptic seeding factors. Loss of the synaptic seeding factors Syd-1 and Liprin-α leads to a loss of this suppression, filopodial destabilization and reduced synapse formation, which is sufficient to cause the destabilization of entire axon terminals. Our model provides a filopodial 'winner-takes-all' mechanism that ensures the formation of an appropriate number of synapses.}, language = {en} } @inproceedings{JaegerJoštSutteretal.2019, author = {J{\"a}ger, Klaus and Jošt, Marko and Sutter, Johannes and Tockhorn, Philipp and K{\"o}hnen, Eike and Eisenhauer, David and Manley, Phillip and Albrecht, Steve and Becker, Christiane}, title = {Improving Monolithic Perovskite/Silicon Tandem Solar Cells From an Optical Viewpoint}, booktitle = {OSA Advanced Photonics Congress}, doi = {10.1364/PVLED.2019.PM4C.2}, pages = {PM4C.2}, year = {2019}, language = {en} } @phdthesis{Beckenbach2019, author = {Beckenbach, Isabel}, title = {Matchings and Flows in Hypergraphs}, year = {2019}, abstract = {In this dissertation, we study matchings and flows in hypergraphs using combinatorial methods. These two problems are among the best studied in the field of combinatorial optimization. As hypergraphs are a very general concept, not many results on graphs can be generalized to arbitrary hypergraphs. Therefore, we consider special classes of hypergraphs, which admit more structure, to transfer results from graph theory to hypergraph theory. In Chapter 2, we investigate the perfect matching problem on different classes of hypergraphs generalizing bipartite graphs. First, we give a polynomial time approximation algorithm for the maximum weight matching problem on so-called partitioned hypergraphs, whose approximation factor is best possible up to a constant. Afterwards, we look at the theorems of K{\"o}nig and Hall and their relation. Our main result is a condition for the existence of perfect matchings in normal hypergraphs that generalizes Hall's condition for bipartite graphs. In Chapter 3, we consider perfect f-matchings, f-factors, and (g,f)-matchings. We prove conditions for the existence of (g,f)-matchings in unimodular hypergraphs, perfect f-matchings in uniform Mengerian hypergraphs, and f-factors in uniform balanced hypergraphs. In addition, we give an overview about the complexity of the (g,f)-matching problem on different classes of hypergraphs generalizing bipartite graphs. In Chapter 4, we study the structure of hypergraphs that admit a perfect matching. We show that these hypergraphs can be decomposed along special cuts. For graphs it is known that the resulting decomposition is unique, which does not hold for hypergraphs in general. However, we prove the uniqueness of this decomposition (up to parallel hyperedges) for uniform hypergraphs. In Chapter 5, we investigate flows on directed hypergraphs, where we focus on graph-based directed hypergraphs, which means that every hyperarc is the union of a set of pairwise disjoint ordinary arcs. We define a residual network, which can be used to decide whether a given flow is optimal or not. Our main result in this chapter is an algorithm that computes a minimum cost flow on a graph-based directed hypergraph. This algorithm is a generalization of the network simplex algorithm.}, language = {en} } @article{WeberFischerDamerauetal.2019, author = {Weber, Marie-Christin and Fischer, Lisa and Damerau, Alexandra and Ponomarev, Igor and Pfeiffenberger, Moritz and Gaber, Timo and G{\"o}tschel, Sebastian and Lang, Jens and R{\"o}blitz, Susanna and Buttgereit, Frank and Ehrig, Rainald and Lang, Annemarie}, title = {In vitro and in silico modeling of cellular and matrix-related changes during the early phase of osteoarthritis}, journal = {BioRxiv}, doi = {10.1101/725317}, year = {2019}, abstract = {Understanding the pathophysiological processes of osteoarthritis (OA) require adequate model systems. Although different in vitro or in vivo models have been described, further comprehensive approaches are needed to study specific parts of the disease. This study aimed to combine in vitro and in silico modeling to describe cellular and matrix-related changes during the early phase of OA. We developed an in vitro OA model based on scaffold-free cartilage-like constructs (SFCCs), which was mathematically modeled using a partial differential equation (PDE) system to resemble the processes during the onset of OA. SFCCs were produced from mesenchymal stromal cells and analyzed weekly by histology and qPCR to characterize the cellular and matrix-related composition. To simulate the early phase of OA, SFCCs were treated with interleukin-1β (IL-1β), tumor necrosis factor α (TNFα) and examined after 3 weeks or cultivated another 3 weeks without inflammatory cytokines to validate the regeneration potential. Mathematical modeling was performed in parallel to the in vitro experiments. SFCCs expressed cartilage-specific markers, and after stimulation an increased expression of inflammatory markers, matrix degrading enzymes, a loss of collagen II (Col-2) and a reduced cell density was observed which could be partially reversed by retraction of stimulation. Based on the PDEs, the distribution processes within the SFCCs, including those of IL-1β, Col-2 degradation and cell number reduction was simulated. By combining in vitro and in silico methods, we aimed to develop a valid, efficient alternative approach to examine and predict disease progression and new therapeutic strategies.}, language = {en} } @article{KraemerMaggioniBrissonetal.2019, author = {Kr{\"a}mer, Martin and Maggioni, Marta and Brisson, Nicholas and Zachow, Stefan and Teichgr{\"a}ber, Ulf and Duda, Georg and Reichenbach, J{\"u}rgen}, title = {T1 and T2* mapping of the human quadriceps and patellar tendons using ultra-short echo-time (UTE) imaging and bivariate relaxation parameter-based volumetric visualization}, volume = {63}, journal = {Magnetic Resonance Imaging}, number = {11}, doi = {10.1016/j.mri.2019.07.015}, pages = {29 -- 36}, year = {2019}, abstract = {Quantification of magnetic resonance (MR)-based relaxation parameters of tendons and ligaments is challenging due to their very short transverse relaxation times, requiring application of ultra-short echo-time (UTE) imaging sequences. We quantify both T1 and T2⁎ in the quadriceps and patellar tendons of healthy volunteers at a field strength of 3 T and visualize the results based on 3D segmentation by using bivariate histogram analysis. We applied a 3D ultra-short echo-time imaging sequence with either variable repetition times (VTR) or variable flip angles (VFA) for T1 quantification in combination with multi-echo acquisition for extracting T2⁎. The values of both relaxation parameters were subsequently binned for bivariate histogram analysis and corresponding cluster identification, which were subsequently visualized. Based on manually-drawn regions of interest in the tendons on the relaxation parameter maps, T1 and T2⁎ boundaries were selected in the bivariate histogram to segment the quadriceps and patellar tendons and visualize the relaxation times by 3D volumetric rendering. Segmentation of bone marrow, fat, muscle and tendons was successfully performed based on the bivariate histogram analysis. Based on the segmentation results mean T2⁎ relaxation times, over the entire tendon volumes averaged over all subjects, were 1.8 ms ± 0.1 ms and 1.4 ms ± 0.2 ms for the patellar and quadriceps tendons, respectively. The mean T1 value of the patellar tendon, averaged over all subjects, was 527 ms ± 42 ms and 476 ms ± 40 ms for the VFA and VTR acquisitions, respectively. The quadriceps tendon had higher mean T1 values of 662 ms ± 97 ms (VFA method) and 637 ms ± 40 ms (VTR method) compared to the patellar tendon. 3D volumetric visualization of the relaxation times revealed that T1 values are not constant over the volume of both tendons, but vary locally. This work provided additional data to build upon the scarce literature available on relaxation times in the quadriceps and patellar tendons. We were able to segment both tendons and to visualize the relaxation parameter distributions over the entire tendon volumes.}, language = {en} } @inproceedings{NeumannHellwichZachow2019, author = {Neumann, Mario and Hellwich, Olaf and Zachow, Stefan}, title = {Localization and Classification of Teeth in Cone Beam CT using Convolutional Neural Networks}, booktitle = {Proc. of the 18th annual conference on Computer- and Robot-assisted Surgery (CURAC)}, isbn = {978-3-00-063717-9}, pages = {182 -- 188}, year = {2019}, abstract = {In dentistry, software-based medical image analysis and visualization provide efficient and accurate diagnostic and therapy planning capabilities. We present an approach for the automatic recognition of tooth types and positions in digital volume tomography (DVT). By using deep learning techniques in combination with dimensionality reduction through non-planar reformatting of the jaw anatomy, DVT data can be efficiently processed and teeth reliably recognized and classified, even in the presence of imaging artefacts, missing or dislocated teeth. We evaluated our approach, which is based on 2D Convolutional Neural Networks (CNNs), on 118 manually annotated cases of clinical DVT datasets. Our proposed method correctly classifies teeth with an accuracy of 94\% within a limit of 2mm distance to ground truth labels.}, language = {en} } @inproceedings{JoachimskyMaIckingetal.2019, author = {Joachimsky, Robert and Ma, Lihong and Icking, Christian and Zachow, Stefan}, title = {A Collision-Aware Articulated Statistical Shape Model of the Human Spine}, booktitle = {Proc. of the 18th annual conference on Computer- and Robot-assisted Surgery (CURAC)}, pages = {58 -- 64}, year = {2019}, abstract = {Statistical Shape Models (SSMs) are a proven means for model-based 3D anatomy reconstruction from medical image data. In orthopaedics and biomechanics, SSMs are increasingly employed to individualize measurement data or to create individualized anatomical models to which implants can be adapted to or functional tests can be performed on. For modeling and analysis of articulated structures, so called articulated SSMs (aSSMs) have been developed. However, a missing feature of aSSMs is the consideration of collisions in the course of individual fitting and articulation. The aim of our work was to develop aSSMs that handle collisions between components correctly. That way it becomes possible to adjust shape and articulation in view of a physically and geometrically plausible individualization. To be able to apply collision-aware aSSMs in simulation and optimisation, our approach is based on an e� cient collision detection method employing Graphics Processing Units (GPUs).}, language = {en} } @misc{Joachimsky2019, type = {Master Thesis}, author = {Joachimsky, Robert}, title = {Approaching Spinal Kinematics using a Collision-Aware Articulated Deformable Model}, pages = {84}, year = {2019}, abstract = {Statistical Shape Models (SSMs) allow for a compact representation of shape and shape variation and they are a proven means for model-based 3D anatomy reconstruction from medical image data. In orthopaedics and biomechanics, SSMs are increasingly employed to individualize measurement data or to create individualized anatomical models. The human spine is a versatile and complex articulated structure and thus is an interesting candidate to be modeled using an advanced type of SSMs. For modeling and analysis of articulated structures, so called articulated SSMs (aSSMs) have been developed. However, a missing feature of aSSMs is the consideration of collisions in the course of individual fitting and articulation. The aim of this thesis is to develop an aSSM of two adjacent vertebrae that handles collisions between components correctly. The model will incorporate the two major aspects of variability: Shape of a single vertebra and the relative positioning of neighboring vertebrae. That way it becomes possible to adjust shape and articulation in view of a physically and geometrically plausible individualization. To be able to apply collision-aware aSSMs in simulation and optimisation in future work, the approach is based on a parallelized collision detection method employing Graphics Processing Units (GPUs).}, language = {en} } @misc{BreugemBorndoerferSchlechteetal.2019, author = {Breugem, Thomas and Bornd{\"o}rfer, Ralf and Schlechte, Thomas and Schulz, Christof}, title = {A Three-Phase Heuristic for Cyclic Crew Rostering with Fairness Requirements}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74297}, year = {2019}, abstract = {In this paper, we consider the Cyclic Crew Rostering Problem with Fairness Requirements (CCRP-FR). In this problem, attractive cyclic rosters have to be constructed for groups of employees, considering multiple, a priori determined, fairness levels. The attractiveness follows from the structure of the rosters (e.g., sufficient rest times and variation in work), whereas fairness is based on the work allocation among the different roster groups. We propose a three-phase heuristic for the CCRP-FR, which combines the strength of column generation techniques with a large-scale neighborhood search algorithm. The design of the heuristic assures that good solutions for all fairness levels are obtained quickly, and can still be further improved if additional running time is available. We evaluate the performance of the algorithm using real-world data from Netherlands Railways, and show that the heuristic finds close to optimal solutions for many of the considered instances. In particular, we show that the heuristic is able to quickly find major improvements upon the current sequential practice: For most instances, the heuristic is able to increase the attractiveness by at least 20\% in just a few minutes.}, language = {en} } @misc{MahnkeArltBaumetal.2019, author = {Mahnke, Heinz-Eberhard and Arlt, Tobias and Baum, Daniel and Hege, Hans-Christian and Herter, Felix and Lindow, Norbert and Manke, Ingo and Siopi, Tzulia and Menei, Eve and Etienne, Marc and Lepper, Verena}, title = {Virtual unfolding of folded papyri}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74338}, year = {2019}, abstract = {The historical importance of ancient manuscripts is unique since they provide information about the heritage of ancient cultures. Often texts are hidden in rolled or folded documents. Due to recent impro- vements in sensitivity and resolution, spectacular disclosures of rolled hidden texts were possible by X-ray tomography. However, revealing text on folded manuscripts is even more challenging. Manual unfolding is often too risky in view of the fragile condition of fragments, as it can lead to the total loss of the document. X-ray tomography allows for virtual unfolding and enables non-destructive access to hid- den texts. We have recently demonstrated the procedure and tested unfolding algorithms on a mockup sample. Here, we present results on unfolding ancient papyrus packages from the papyrus collection of the Mus{\´e}e du Louvre, among them objects folded along approximately orthogonal folding lines. In one of the packages, the first identification of a word was achieved, the Coptic word for "Lord".}, language = {en} } @misc{RehfeldtHobbieSchoenheitetal.2019, author = {Rehfeldt, Daniel and Hobbie, Hannes and Sch{\"o}nheit, David and Gleixner, Ambros and Koch, Thorsten and M{\"o}st, Dominik}, title = {A massively parallel interior-point solver for linear energy system models with block structure}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74321}, year = {2019}, abstract = {Linear energy system models are often a crucial component of system design and operations, as well as energy policy consulting. Such models can lead to large-scale linear programs, which can be intractable even for state-of-the-art commercial solvers|already the available memory on a desktop machine might not be sufficient. Against this backdrop, this article introduces an interior-point solver that exploits common structures of linear energy system models to efficiently run in parallel on distributed memory systems. The solver is designed for linear programs with doubly bordered block-diagonal constraint matrix and makes use of a Schur complement based decomposition. Special effort has been put into handling large numbers of linking constraints and variables as commonly observed in energy system models. In order to handle this strong linkage, a distributed preconditioning of the Schur complement is used. In addition, the solver features a number of more generic techniques such as parallel matrix scaling and structure-preserving presolving. The implementation is based on the existing parallel interior-point solver PIPS-IPM. We evaluate the computational performance on energy system models with up to 700 million non-zero entries in the constraint matrix, and with more than 200 million columns and 250 million rows. This article mainly concentrates on the energy system model ELMOD, which is a linear optimization model representing the European electricity markets by the use of a nodal pricing market clearing. It has been widely applied in the literature on energy system analyses during the recent years. However, it will be demonstrated that the new solver is also applicable to other energy system models.}, language = {en} } @misc{Baum2019, author = {Baum, Daniel}, title = {An Evaluation of Color Maps for Visual Data Exploration}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74259}, year = {2019}, language = {en} } @article{BaumWeaverZlotnikovetal.2019, author = {Baum, Daniel and Weaver, James C. and Zlotnikov, Igor and Kn{\"o}tel, David and Tomholt, Lara and Dean, Mason N.}, title = {High-Throughput Segmentation of Tiled Biological Structures using Random-Walk Distance Transforms}, journal = {Integrative And Comparative Biology}, doi = {10.1093/icb/icz117}, year = {2019}, abstract = {Various 3D imaging techniques are routinely used to examine biological materials, the results of which are usually a stack of grayscale images. In order to quantify structural aspects of the biological materials, however, they must first be extracted from the dataset in a process called segmentation. If the individual structures to be extracted are in contact or very close to each other, distance-based segmentation methods utilizing the Euclidean distance transform are commonly employed. Major disadvantages of the Euclidean distance transform, however, are its susceptibility to noise (very common in biological data), which often leads to incorrect segmentations (i.e. poor separation of objects of interest), and its limitation of being only effective for roundish objects. In the present work, we propose an alternative distance transform method, the random-walk distance transform, and demonstrate its effectiveness in high-throughput segmentation of three microCT datasets of biological tilings (i.e. structures composed of a large number of similar repeating units). In contrast to the Euclidean distance transform, this random-walk approach represents the global, rather than the local, geometric character of the objects to be segmented and, thus, is less susceptible to noise. In addition, it is directly applicable to structures with anisotropic shape characteristics. Using three case studies—stingray tessellated cartilage, starfish dermal endoskeleton, and the prismatic layer of bivalve mollusc shell—we provide a typical workflow for the segmentation of tiled structures, describe core image processing concepts that are underused in biological research, and show that for each study system, large amounts of biologically-relevant data can be rapidly segmented, visualized and analyzed.}, language = {en} } @inproceedings{Paetsch2019, author = {Paetsch, Olaf}, title = {Possibilities and Limitations of Automatic Feature Extraction shown by the Example of Crack Detection in 3D-CT Images of Concrete Specimen}, booktitle = {iCT 2019}, year = {2019}, abstract = {To assess the influence of the alkali-silica reaction (ASR) on pavement concrete 3D-CT imaging has been applied to concrete samples. Prior to imaging these samples have been drilled out of a concrete beam pre-damaged by fatigue loading. The resulting high resolution 3D-CT images consist of several gigabytes of voxels. Current desktop computers can visualize such big datasets without problems but a visual inspection or manual segmentation of features such as cracks by experts can only be carried out on a few slices. A quantitative analysis of cracks requires a segmentation of the whole specimen which could only be done by an automatic feature detection. This arises the question of the reliability of an automatic crack detection algorithm, its certainty and limitations. Does the algorithm find all cracks? Does it find too many cracks? Can parameters of that algorithm, once identified as good, be applied to other samples as well? Can ensemble computing with many crack parameters overcome the difficulties with parameter finding? By means of a crack detection algorithm based on shape recognition (template matching) these questions will be discussed. Since the author has no access to reliable ground truth data of cracks the assessment of the certainty of the automatic crack is restricted to visual inspection by experts. Therefore, an artificial dataset based on a combination of manually segmented cracks processed together with simple image processing algorithms is used to quantify the accuracy of the crack detection algorithm. Part of the evaluation of cracks in concrete samples is the knowledge of the surrounding material. The surrounding material can be used to assess the detected cracks, e.g. micro-cracks within the aggregate-matrix interface may be starting points for cracks on a macro scale. Furthermore, the knowledge of the surrounding material can help to find better parameter sets for the crack detection itself because crack characteristics may vary depending on their surrounding material. Therefore, in addition to a crack detection a complete segmentation of the sample into the components of concrete, such as aggregates, cement matrix and pores is needed. Since such a segmentation task cannot be done manually due to the amount of data, an approach utilizing convolutional neuronal networks stemming from a medical application has been applied. The learning phase requires a ground truth i.e. a segmentation of the components. This has to be created manually in a time-consuming task. However, this segmentation can be used for a quantitative evaluation of the automatic segmentation afterwards. Even though that work has been performed as a short term subtask of a bigger project funded by the German Research Foundation (DFG) this paper discusses problems which may arise in similar projects, too.}, language = {en} } @inproceedings{HartungSchintke2019, author = {Hartung, Marc and Schintke, Florian}, title = {Learned Clause Minimization in Parallel SAT Solvers}, booktitle = {Pragmatics of SAT 2019}, arxiv = {http://arxiv.org/abs/arXiv:1908.01624v1}, pages = {1 -- 11}, year = {2019}, abstract = {Learned clauses minimization (LCM) let to performance improvements of modern SAT solvers especially in solving hard SAT instances. Despite the success of LCM approaches in sequential solvers, they are not widely incorporated in parallel SAT solvers. In this paper we explore the potential of LCM for parallel SAT solvers by defining multiple LCM approaches based on clause vivification, comparing their runtime in different SAT solvers and discussing reasons for performance gains and losses. Results show that LCM only boosts performance of parallel SAT solvers on a fraction of SAT instances. More commonly applying LCM decreases performance. Only certain LCM approaches are able to improve the overall performance of parallel SAT solvers.}, language = {en} } @article{WerdehausenBurgerStaudeetal.2019, author = {Werdehausen, Daniel and Burger, Sven and Staude, Isabelle and Pertsch, Thomas and Decker, Manuel}, title = {Dispersion-engineered nanocomposites enable achromatic diffractive optical elements}, volume = {6}, journal = {Optica}, doi = {10.1364/OPTICA.6.001031}, pages = {1031}, year = {2019}, language = {en} } @inproceedings{SkrzypczakSchintkeSchuett2019, author = {Skrzypczak, Jan and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {Linearizable State Machine Replication of State-Based CRDTs without Logs}, booktitle = {Proceedings of the 2019 ACM Symposium on Principles of Distributed Computing, PODC 2019}, doi = {10.1145/3293611.3331568}, pages = {455 -- 457}, year = {2019}, abstract = {General solutions of state machine replication have to ensure that all replicas apply the same commands in the same order, even in the presence of failures. Such strict ordering incurs high synchronization costs due to the use of distributed consensus or a leader. This paper presents a protocol for linearizable state machine replication of conflict-free replicated data types (CRDTs) that neither requires consensus nor a leader. By leveraging the properties of state-based CRDTs—in particular the monotonic growth of a join semilattice—synchronization overhead is greatly reduced. In addition, updates just need a single round trip and modify the state 'in-place' without the need for a log. Furthermore, the message size overhead for coordination consists of a single counter per message. While reads in the presence of concurrent updates are not wait-free without a coordinator, we show that more than 97 \% of reads can be handled in one or two round trips under highly concurrent accesses. Our protocol achieves high throughput without auxiliary processes such as command log management or leader election. It is well suited for all practical scenarios that need linearizable access on CRDT data on a fine-granular scale.}, language = {en} } @article{MrowinskiSchnauberGutscheetal.2019, author = {Mrowinski, Paweł and Schnauber, Peter and Gutsche, Philipp and Kaganskiy, Arsenty and Schall, Johannes and Burger, Sven and Rodt, Sven and Reitzenstein, Stephan}, title = {Directional emission of a deterministically fabricated quantum dot - Bragg reflection multi-mode waveguide system}, volume = {6}, journal = {ACS Photonics}, arxiv = {http://arxiv.org/abs/1902.01905}, doi = {10.1021/acsphotonics.9b00369}, pages = {2231}, year = {2019}, language = {en} } @misc{Roessig2019, author = {R{\"o}ssig, Ansgar}, title = {Verification of Neural Networks}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74174}, year = {2019}, language = {en} } @misc{GleixnerKempkeKochetal.2019, author = {Gleixner, Ambros and Kempke, Nils-Christian and Koch, Thorsten and Rehfeldt, Daniel and Uslu, Svenja}, title = {First Experiments with Structure-Aware Presolving for a Parallel Interior-Point Method}, issn = {1438-0064}, doi = {10.1007/978-3-030-48439-2_13}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74084}, year = {2019}, abstract = {In linear optimization, matrix structure can often be exploited algorithmically. However, beneficial presolving reductions sometimes destroy the special structure of a given problem. In this article, we discuss structure-aware implementations of presolving as part of a parallel interior-point method to solve linear programs with block-diagonal structure, including both linking variables and linking constraints. While presolving reductions are often mathematically simple, their implementation in a high-performance computing environment is a complex endeavor. We report results on impact, performance, and scalability of the resulting presolving routines on real-world energy system models with up to 700 million nonzero entries in the constraint matrix.}, language = {en} } @misc{Neumann2019, type = {Master Thesis}, author = {Neumann, Mario}, title = {Localization and Classification of Teeth in Cone Beam Computed Tomography using 2D CNNs}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74045}, pages = {77}, year = {2019}, abstract = {In dentistry, software-based medical image analysis and visualization provide effcient and accurate diagnostic and therapy planning capabilities. We present an approach for the automatic recognition of tooth types and positions in digital volume tomography (DVT). By using deep learning techniques in combination with dimension reduction through non-planar reformatting of the jaw anatomy, DVT data can be effciently processed and teeth reliably recognized and classified, even in the presence of imaging artefacts, missing or dislocated teeth. We evaluated our approach, which is based on 2D Convolutional Neural Networks (CNNs), on 118 manually annotated cases of clinical DVT datasets. Our proposed method correctly classifies teeth with an accuracy of 94\% within a limit of 2mm distancr to ground truth landmarks.}, language = {en} } @misc{Serrano2019, author = {Serrano, Felipe}, title = {Visible points, the separation problem, and applications to MINLP}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74016}, year = {2019}, abstract = {In this paper we introduce a technique to produce tighter cutting planes for mixed-integer non-linear programs. Usually, a cutting plane is generated to cut off a specific infeasible point. The underlying idea is to use the infeasible point to restrict the feasible region in order to obtain a tighter domain. To ensure validity, we require that every valid cut separating the infeasible point from the restricted feasible region is still valid for the original feasible region. We translate this requirement in terms of the separation problem and the reverse polar. In particular, if the reverse polar of the restricted feasible region is the same as the reverse polar of the feasible region, then any cut valid for the restricted feasible region that \emph{separates} the infeasible point, is valid for the feasible region. We show that the reverse polar of the \emph{visible points} of the feasible region from the infeasible point coincides with the reverse polar of the feasible region. In the special where the feasible region is described by a single non-convex constraint intersected with a convex set we provide a characterization of the visible points. Furthermore, when the non-convex constraint is quadratic the characterization is particularly simple. We also provide an extended formulation for a relaxation of the visible points when the non-convex constraint is a general polynomial. Finally, we give some conditions under which for a given set there is an inclusion-wise smallest set, in some predefined family of sets, whose reverse polars coincide.}, language = {en} } @article{BorndoerferGrimmReutheretal.2019, author = {Bornd{\"o}rfer, Ralf and Grimm, Boris and Reuther, Markus and Schlechte, Thomas}, title = {Optimization of handouts for rolling stock rotations}, journal = {Journal of Rail Transport Planning \& Management}, number = {10}, doi = {10.1016/j.jrtpm.2019.02.001}, pages = {1 -- 8}, year = {2019}, abstract = {A railway operator creates (rolling stock) rotations in order to have a precise master plan for the operation of a timetable by railway vehicles. A rotation is considered as a cycle that multiply traverses a set of operational days while covering trips of the timetable. As it is well known, the proper creation of rolling stock rotations by, e.g., optimization algorithms is challenging and still a topical research subject. Nevertheless, we study a completely different but strongly related question in this paper, i.e.: How to visualize a rotation? For this purpose, we introduce a basic handout concept, which directly leads to the visualization, i.e., handout of a rotation. In our industrial application at DB Fernverkehr AG, the handout is exactly as important as the rotation itself. Moreover, it turns out that also other European railway operators use exactly the same methodology (but not terminology). Since a rotation can have many handouts of different quality, we show how to compute optimal ones through an integer program (IP) by standard software. In addition, a construction as well as an improvement heuristic are presented. Our computational results show that the heuristics are a very reliable standalone approach to quickly find near-optimal and even optimal handouts. The efficiency of the heuristics is shown via a computational comparison to the IP approach.}, language = {en} } @inproceedings{BorndoerferGrimmSchlechte2019, author = {Bornd{\"o}rfer, Ralf and Grimm, Boris and Schlechte, Thomas}, title = {Re-optimizing ICE Rotations after a Tunnel Breakdown near Rastatt}, volume = {Link{\"o}ping Electronic Conference Proceedings}, booktitle = {Proceedings of the 8th International Conference on Railway Operations Modelling and Analysis - RailNorrk{\"o}ping 2019}, number = {069}, publisher = {Link{\"o}ping University Electronic Press, Link{\"o}pings universitet}, address = {Link{\"o}ping, Sweden}, isbn = {978-91-7929-992-7}, issn = {1650-3686}, pages = {160 -- 168}, year = {2019}, abstract = {Planning rolling stock movements in industrial passenger railway applications isa long-term process based on timetables which are also often valid for long periods of time. For these timetables and rotation plans, i.e., plans of railway vehicle movements are constructed as templates for these periods. During operation the rotation plans are affected by all kinds of unplanned events. An unusal example for that is the collapse of a tunnel ceiling near Rastatt in southern Germany due to construction works related to the renewal of the central station in Stuttgart. As a result the main railway connection between Stuttgart and Frankfurt am Main, located on top of the tunnel, had to be closed from August 12th to October 2nd 2017. This had a major impact on the railway network in southern Germany. Hence, all rotation plans and train schedules for both passenger and cargo traffic had to be revised. In this paper we focus on a case study for this situation and compute new rotation plans via mixed integer programming for the ICE high speed fleet of DB Fernverkehr AG one of the largest passenger railway companies in Europe. In our approach we take care of some side constraints to ensure a smooth continuation of the rotation plans after the disruption has ended.}, language = {en} } @misc{GrimmBorndoerferSchlechte2019, author = {Grimm, Boris and Bornd{\"o}rfer, Ralf and Schlechte, Thomas}, title = {Re-optimizing ICE Rotations after a Tunnel Breakdown near Rastatt}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73976}, year = {2019}, abstract = {Planning rolling stock movements in industrial passenger railway applications isa long-term process based on timetables which are also often valid for long periods of time. For these timetables and rotation plans, i.e., plans of railway vehicle movements are constructed as templates for these periods. During operation the rotation plans are affected by all kinds of unplanned events. An unusal example for that is the collapse of a tunnel ceiling near Rastatt in southern Germany due to construction works related to the renewal of the central station in Stuttgart. As a result the main railway connection between Stuttgart and Frankfurt am Main, located on top of the tunnel, had to be closed from August 12th to October 2nd 2017. This had a major impact on the railway network in southern Germany. Hence, all rotation plans and train schedules for both passenger and cargo traffic had to be revised. In this paper we focus on a case study for this situation and compute new rotation plans via mixed integer programming for the ICE high speed fleet of DB Fernverkehr AG one of the largest passenger railway companies in Europe. In our approach we take care of some side constraints to ensure a smooth continuation of the rotation plans after the disruption has ended.}, language = {en} } @inproceedings{BertholdGrimmReutheretal.2019, author = {Berthold, Timo and Grimm, Boris and Reuther, Markus and Schade, Stanley and Schlechte, Thomas}, title = {Strategic Planning of Rolling Stock Rotations for Public Tenders}, volume = {Link{\"o}ping Electronic Conference Proceedings}, booktitle = {Proceedings of the 8th International Conference on Railway Operations Modelling and Analysis - RailNorrköping 2019}, number = {069}, publisher = {Link{\"o}ping University Electronic Press, Link{\"o}pings universitet}, isbn = {978-91-7929-992-7}, issn = {1650-3686}, pages = {148 -- 159}, year = {2019}, abstract = {Since railway companies have to apply for long-term public contracts to operate railway lines in public tenders, the question how they can estimate the operating cost for long-term periods adequately arises naturally. We consider a rolling stock rotation problem for a time period of ten years, which is based on a real world instance provided by an industry partner. We use a two stage approach for the cost estimation of the required rolling stock. In the first stage, we determine a weekly rotation plan. In the second stage, we roll out this weekly rotation plan for a longer time period and incorporate scheduled maintenance treatments. We present a heuristic approach and a mixed integer programming model to implement the process of the second stage. Finally, we discuss computational results for a real world tendering scenario.}, language = {en} } @inproceedings{FroehlerdaCunhaMeloWeissenboecketal.2019, author = {Fr{\"o}hler, Bernhard and da Cunha Melo, Lucas and Weissenb{\"o}ck, Johannes and Kastner, Johann and M{\"o}ller, Torsten and Hege, Hans-Christian and Gr{\"o}ller, Eduard M. and Sanctorum, Jonathan and De Beenhouwer, Jan and Sijbers, Jan and Heinzl, Christoph}, title = {Tools for the analysis of datasets from X-ray computed tomography based on Talbot-Lau grating interferometry}, booktitle = {Proceedings of iCT 2019, (9th Conference on Industrial Computed Tomography, Padova, Italy - iCT 2019, February 13-15, 2019)}, number = {paper 52}, pages = {8}, year = {2019}, abstract = {This work introduces methods for analyzing the three imaging modalities delivered by Talbot-Lau grating interferometry X-ray computed tomography (TLGI-XCT). The first problem we address is providing a quick way to show a fusion of all three modal- ities. For this purpose the tri-modal transfer function widget is introduced. The widget controls a mixing function that uses the output of the transfer functions of all three modalities, allowing the user to create one customized fused image. A second problem prevalent in processing TLGI-XCT data is a lack of tools for analyzing the segmentation process of such multimodal data. We address this by providing methods for computing three types of uncertainty: From probabilistic segmentation algorithms, from the voxel neighborhoods as well as from a collection of results. We furthermore introduce a linked views interface to explore this data. The techniques are evaluated on a TLGI-XCT scan of a carbon-fiber reinforced dataset with impact damage. We show that the transfer function widget accelerates and facilitates the exploration of this dataset, while the uncertainty analysis methods give insights into how to tweak and improve segmentation algorithms for more suitable results.}, language = {en} } @article{FarchminHammerschmidtSchneideretal.2019, author = {Farchmin, Nando and Hammerschmidt, Martin and Schneider, Philipp-Immanuel and Wurm, Matthias and B{\"a}r, Markus and Heidenreich, Sebastian}, title = {Efficient global sensitivity analysis for silicon line gratings using polynomial chaos}, volume = {11057}, journal = {Proc. SPIE}, doi = {10.1117/12.2525978}, pages = {110570J}, year = {2019}, language = {en} } @article{AndrleHoenickeSchneideretal.2019, author = {Andrle, Anna and H{\"o}nicke, Philipp and Schneider, Philipp-Immanuel and Kayser, Yves and Hammerschmidt, Martin and Burger, Sven and Scholze, Frank and Beckhoff, Burkhard and Soltwisch, Victor}, title = {Grazing incidence x-ray fluorescence based characterization of nanostructures for element sensitive profile reconstruction}, volume = {11057}, journal = {Proc. SPIE}, arxiv = {http://arxiv.org/abs/2104.13749}, doi = {10.1117/12.2526082}, pages = {110570M}, year = {2019}, language = {en} } @misc{LoebelLindnerBorndoerfer2019, author = {L{\"o}bel, Fabian and Lindner, Niels and Bornd{\"o}rfer, Ralf}, title = {The Restricted Modulo Network Simplex Method for Integrated Periodic Timetabling and Passenger Routing}, issn = {1438-0064}, doi = {https://doi.org/https://doi.org/10.1007/978-3-030-48439-2_92}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73868}, year = {2019}, abstract = {The Periodic Event Scheduling Problem is a well-studied NP-hard problem with applications in public transportation to find good periodic timetables. Among the most powerful heuristics to solve the periodic timetabling problem is the modulo network simplex method. In this paper, we consider the more difficult version with integrated passenger routing and propose a refined integrated variant to solve this problem on real-world-based instances.}, language = {en} } @misc{LindnerLiebchen2019, author = {Lindner, Niels and Liebchen, Christian}, title = {New Perspectives on PESP: T-Partitions and Separators}, issn = {1438-0064}, doi = {10.4230/OASIcs.ATMOS.2019.2}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73853}, year = {2019}, abstract = {In the planning process of public transportation companies, designing the timetable is among the core planning steps. In particular in the case of periodic (or cyclic) services, the Periodic Event Scheduling Problem (PESP) is well-established to compute high-quality periodic timetables. We are considering algorithms for computing good solutions for the very basic PESP with no additional extra features as add-ons. The first of these algorithms generalizes several primal heuristics that had been proposed in the past, such as single-node cuts and the modulo network simplex algorithm. We consider partitions of the graph, and identify so-called delay cuts as a structure that allows to generalize several previous heuristics. In particular, when no more improving delay cut can be found, we already know that the other heuristics could not improve either. The second of these algorithms turns a strategy, that had been discussed in the past, upside-down: Instead of gluing together the network line-by-line in a bottom-up way, we develop a divide-and-conquer-like top-down approach to separate the initial problem into two easier subproblems such that the information loss along their cutset edges is as small as possible. We are aware that there may be PESP instances that do not fit well the separator setting. Yet, on the RxLy-instances of PESPlib in our experimental computations, we come up with good primal solutions and dual bounds. In particular, on the largest instance (R4L4), this new separator approach, which applies a state-of-the-art solver as subroutine, is able to come up with better dual bounds than purely applying this state-of-the-art solver in the very same time.}, language = {en} } @misc{BaumWeaverZlotnikovetal.2019, author = {Baum, Daniel and Weaver, James C. and Zlotnikov, Igor and Kn{\"o}tel, David and Tomholt, Lara and Dean, Mason N.}, title = {High-Throughput Segmentation of Tiled Biological Structures using Random-Walk Distance Transforms}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73841}, year = {2019}, abstract = {Various 3D imaging techniques are routinely used to examine biological materials, the results of which are usually a stack of grayscale images. In order to quantify structural aspects of the biological materials, however, they must first be extracted from the dataset in a process called segmentation. If the individual structures to be extracted are in contact or very close to each other, distance-based segmentation methods utilizing the Euclidean distance transform are commonly employed. Major disadvantages of the Euclidean distance transform, however, are its susceptibility to noise (very common in biological data), which often leads to incorrect segmentations (i.e. poor separation of objects of interest), and its limitation of being only effective for roundish objects. In the present work, we propose an alternative distance transform method, the random-walk distance transform, and demonstrate its effectiveness in high-throughput segmentation of three microCT datasets of biological tilings (i.e. structures composed of a large number of similar repeating units). In contrast to the Euclidean distance transform, this random-walk approach represents the global, rather than the local, geometric character of the objects to be segmented and, thus, is less susceptible to noise. In addition, it is directly applicable to structures with anisotropic shape characteristics. Using three case studies—stingray tessellated cartilage, starfish dermal endoskeleton, and the prismatic layer of bivalve mollusc shell—we provide a typical workflow for the segmentation of tiled structures, describe core image processing concepts that are underused in biological research, and show that for each study system, large amounts of biologically-relevant data can be rapidly segmented, visualized and analyzed.}, language = {en} } @misc{GoetschelWeiser2019, author = {G{\"o}tschel, Sebastian and Weiser, Martin}, title = {Lossy Compression for Large Scale PDE Problems}, issn = {1438-0064}, doi = {10.1101/506378}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73817}, year = {2019}, abstract = {Solvers for partial differential equations (PDE) are one of the cornerstones of computational science. For large problems, they involve huge amounts of data that needs to be stored and transmitted on all levels of the memory hierarchy. Often, bandwidth is the limiting factor due to relatively small arithmetic intensity, and increasingly so due to the growing disparity between computing power and bandwidth. Consequently, data compression techniques have been investigated and tailored towards the specific requirements of PDE solvers during the last decades. This paper surveys data compression challenges and corresponding solution approaches for PDE problems, covering all levels of the memory hierarchy from mass storage up to main memory. Exemplarily, we illustrate concepts at particular methods, and give references to alternatives.}, language = {en} } @article{Sullivan2019, author = {Sullivan, T. J.}, title = {Contributed discussion on the article "A Bayesian conjugate gradient method"}, volume = {14}, journal = {Bayesian Analysis}, number = {3}, arxiv = {http://arxiv.org/abs/1906.10240}, doi = {10.1214/19-BA1145}, pages = {985 -- 989}, year = {2019}, abstract = {The recent article "A Bayesian conjugate gradient method" by Cockayne, Oates, Ipsen, and Girolami proposes an approximately Bayesian iterative procedure for the solution of a system of linear equations, based on the conjugate gradient method, that gives a sequence of Gaussian/normal estimates for the exact solution. The purpose of the probabilistic enrichment is that the covariance structure is intended to provide a posterior measure of uncertainty or confidence in the solution mean. This note gives some comments on the article, poses some questions, and suggests directions for further research.}, language = {en} } @misc{GamrathPetkovic2019, author = {Gamrath, Inken and Petkovic, Milena}, title = {Prediction of Intermitted Flows in Large Gas Networks}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73717}, year = {2019}, language = {en} } @misc{BorndoerferElijazyferSchwartz2019, author = {Bornd{\"o}rfer, Ralf and Elijazyfer, Ziena and Schwartz, Stephan}, title = {Approximating Balanced Graph Partitions}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73675}, year = {2019}, abstract = {We consider the problem of partitioning a weighted graph into k connected components of similar weight. In particular, we consider the two classical objectives to maximize the lightest part or to minimize the heaviest part. For a partitioning of the vertex set and for both objectives, we give the first known approximation results on general graphs. Specifically, we give a \$\Delta\$-approximation where \$\Delta\$ is the maximum degree of an arbitrary spanning tree of the given graph. Concerning the edge partition case, we even obtain a 2-approximation for the min-max and the max-min problem, by using the claw-freeness of line graphs.}, language = {en} } @misc{HenningsAndersonHoppmannetal.2019, author = {Hennings, Felix and Anderson, Lovis and Hoppmann, Kai and Turner, Mark and Koch, Thorsten}, title = {Controlling transient gas flow in real-world pipeline intersection areas}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73645}, year = {2019}, abstract = {Compressor stations are the heart of every high-pressure gas transport network. Located at intersection areas of the network they are contained in huge complex plants, where they are in combination with valves and regulators responsible for routing and pushing the gas through the network. Due to their complexity and lack of data compressor stations are usually dealt with in the scientific literature in a highly simplified and idealized manner. As part of an ongoing project with one of Germany's largest Transmission System Operators to develop a decision support system for their dispatching center, we investigated how to automatize control of compressor stations. Each station has to be in a particular configuration, leading in combination with the other nearby elements to a discrete set of up to 2000 possible feasible operation modes in the intersection area. Since the desired performance of the station changes over time, the configuration of the station has to adapt. Our goal is to minimize the necessary changes in the overall operation modes and related elements over time, while fulfilling a preset performance envelope or demand scenario. This article describes the chosen model and the implemented mixed integer programming based algorithms to tackle this challenge. By presenting extensive computational results on real world data we demonstrate the performance of our approach.}, language = {en} } @misc{HoppmannHenningsLenzetal.2019, author = {Hoppmann, Kai and Hennings, Felix and Lenz, Ralf and Gotzes, Uwe and Heinecke, Nina and Spreckelsen, Klaus and Koch, Thorsten}, title = {Optimal Operation of Transient Gas Transport Networks}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73639}, year = {2019}, language = {en} } @misc{PetkovicChenGamrathetal.2019, author = {Petkovic, Milena and Chen, Ying and Gamrath, Inken and Gotzes, Uwe and Hadjidimitriou, Natalia Selini and Zittel, Janina and Xu, Xiaofei and Koch, Thorsten}, title = {A Hybrid Approach for High Precision Prediction of Gas Flows}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73525}, year = {2019}, abstract = {About 20\% of the German energy demand is supplied by natural gas. Ad- ditionally, for about twice the amount Germany serves as a transit country. Thereby, the German network represents a central hub in the European natural gas transport network. The transport infrastructure is operated by so-called transmissions system operators or TSOs. The number one priority of the TSOs is to ensure security of supply. However, the TSOs have no knowledge of the intentions and planned actions of the shippers (traders). Open Grid Europe (OGE), one of Germany's largest TSO, operates a high- pressure transport network of about 12.000 km length. Since flexibility and security of supply is of utmost importance to the German Energy Transition ("Energiewende") especially with the introduction of peak-load gas power stations, being able to predict in- and out-flow of the network is of great importance. In this paper we introduce a new hybrid forecast method applied to gas flows at the boundary nodes of a transport network. The new method employs optimized feature minimization and selection. We use a combination of an FAR, LSTM DNN and mathematical programming to achieve robust high quality forecasts on real world data for different types of network nodes. Keywords: Gas Forecast, Time series, Hybrid Method, FAR, LSTM, Mathematical Optimisation}, language = {en} } @inproceedings{ShinanoRehfeldtGally2019, author = {Shinano, Yuji and Rehfeldt, Daniel and Gally, Tristan}, title = {An Easy Way to Build Parallel State-of-the-art Combinatorial Optimization Problem Solvers: A Computational Study on Solving Steiner Tree Problems and Mixed Integer Semidefinite Programs by using ug[SCIP-*,*]-libraries}, booktitle = {Proceedings of the 9th IEEE Workshop Parallel / Distributed Combinatorics and Optimization}, publisher = {IEEE}, doi = {10.1109/IPDPSW.2019.00095}, pages = {530 -- 541}, year = {2019}, abstract = {Branch-and-bound (B\&B) is an algorithmic framework for solving NP-hard combinatorial optimization problems. Although several well-designed software frameworks for parallel B\&B have been developed over the last two decades, there is very few literature about successfully solving previously intractable combinatorial optimization problem instances to optimality by using such frameworks.The main reason for this limited impact of parallel solvers is that the algorithmic improvements for specific problem types are significantly greater than performance gains obtained by parallelization in general. Therefore, in order to solve hard problem instances for the first time, one needs to accelerate state-of-the-art algorithm implementations. In this paper, we present a computational study for solving Steiner tree problems and mixed integer semidefinite programs in parallel. These state-of-the-art algorithm implementations are based on SCIP and were parallelized via the ug[SCIP-*,*]-libraries---by adding less than 200 lines of glue code. Despite the ease of their parallelization, these solvers have the potential to solve previously intractable instances. In this paper, we demonstrate the convenience of such a parallelization and present results for previously unsolvable instances from the well-known PUC benchmark set, widely regarded as the most difficult Steiner tree test set in the literature.}, language = {en} } @inproceedings{NoackFochtSteinke2019, author = {Noack, Matthias and Focht, Erich and Steinke, Thomas}, title = {Heterogeneous Active Messages for Offloading on the NEC SX-Aurora TSUBASA}, booktitle = {2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), Heterogeneity in Computing Workshop (HCW 2019)}, year = {2019}, abstract = {The NEC SX-Aurora TSUBASA is a new generation of vector processing architectures that combines a standard Intel Xeon host with the newly developed NEC Vector Engine co-processor cards. One way to use these co-processors is offloading suitable parts of the program from the host to the Vector Engines. Currently, the only vendor-provided offloading solutions are the low-level Vector Engine Offloading (VEO) library, and a builtin reverse-offloading mechanism named VHcall. In this work, we extend the portable Heterogeneous Active Messages (HAM) based HAM-Offload framework with support for the NEC SX-Aurora TSUBASA. Therefore, we design, implement, and evaluate two messaging protocols aimed at minimising offloading cost. This sheds some light on how to achieve fast communication between host CPU and the Vector Engines of the NEC SX-Aurora TSUBASA. Compared with VEO, the DMA-based protocol reduces offloading overhead by a factor of 13×. The resulting framework enables users to write portable offload applications with low overhead, that do neither require a language extension like OpenMP, nor a special language like OpenCL. Existing HAM-Offload applications are now ready to run on the NEC SX-Aurora TSUBASA.}, language = {en} } @inproceedings{Noack2019, author = {Noack, Matthias}, title = {Heterogeneous Active Messages (HAM) — Implementing Lightweight Remote Procedure Calls in C++}, booktitle = {Proceedings of the 5th International Workshop on OpenCL, The Distributed \& Heterogeneous Programming in C/C++ (DHPCC++ 2019) Conference}, doi = {10.1145/3318170.3318195}, year = {2019}, abstract = {We present HAM (Heterogeneous Active Messages), a C++-only active messaging solution for heterogeneous distributed systems.Combined with a communication protocol, HAM can be used as a generic Remote Procedure Call (RPC) mechanism. It has been used in HAM-Offload to implement a low-overhead offloading framework for inter- and intra-node offloading between different architectures including accelerators like the Intel Xeon Phi x100 series and the NEC SX-Aurora TSUBASA Vector Engine. HAM uses template meta-programming to implicitly generate active message types and their corresponding handler functions. Heterogeneity is enabled by providing an efficient address translation mechanism between the individual handler code addresses of processes running different binaries on different architectures, as well a hooks to inject serialisation and deserialisation code on a per-type basis. Implementing such a solution in modern C++ sheds some light on the shortcomings and grey areas of the C++ standard when it comes to distributed and heterogeneous environments.}, language = {en} } @inproceedings{ShinanoRehfeldtKoch2019, author = {Shinano, Yuji and Rehfeldt, Daniel and Koch, Thorsten}, title = {Building Optimal Steiner Trees on Supercomputers by Using up to 43,000 Cores}, volume = {11494}, booktitle = {Integration of Constraint Programming, Artificial Intelligence, and Operations Research. CPAIOR 2019}, publisher = {Springer}, doi = {10.1007/978-3-030-19212-9_35}, pages = {529 -- 539}, year = {2019}, abstract = {SCIP-JACK is a customized, branch-and-cut based solver for Steiner tree and related problems. ug [SCIP-JACK, MPI] extends SCIP-JACK to a massively parallel solver by using the Ubiquity Generator (UG) framework. ug [SCIP-JACK, MPI] was the only solver that could run on a distributed environment at the (latest) 11th DIMACS Challenge in 2014. Furthermore, it could solve three well-known open instances and updated 14 best-known solutions to instances from the benchmark libary STEINLIB. After the DIMACS Challenge, SCIP-JACK has been considerably improved. However, the improvements were not reflected on ug [SCIP- JACK, MPI]. This paper describes an updated version of ug [SCIP-JACK, MPI], especially branching on constrains and a customized racing ramp-up. Furthermore, the different stages of the solution process on a supercomputer are described in detail. We also show the latest results on open instances from the STEINLIB.}, language = {en} } @misc{OmariLangePloentzkeetal.2019, author = {Omari, Mohamed and Lange, Alexander and Pl{\"o}ntzke, Julia and R{\"o}blitz, Susanna}, title = {A Mathematical Model for the Influence of Glucose-Insulin Dynamics on the Estrous Cycle in Dairy Cows}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73475}, year = {2019}, abstract = {Nutrition plays a crucial role in regulating reproductive hormones and follicular development in cattle. This is visible particularly during the time of negative energy balance at the onset of milk production after calving. Here, elongated periods of anovulation have been observed, resulting from alterations in luteiniz- ing hormone concentrations, likely caused by lower glucose and insulin concen- trations in the blood. The mechanisms that result in a reduced fertility are not completely understood, although a close relationship to the glucose-insulin metabolism is widely supported. Following this idea, a mathematical model of the hormonal network combining reproductive hormones and hormones that are coupled to the glucose compartments within the body of the cow was developed. The model is built on ordinary differential equations and relies on previously introduced models on the bovine estrous cycle and the glucose-insulin dynam- ics. Necessary modifications and coupling mechanisms are thoroughly discussed. Depending on the composition and the amount of food, in particular the glu- cose content in the dry matter, the model quantifies reproductive hormones and follicular development over time. Simulation results for different nutritional regimes in lactating and non-lactating dairy cows are examined and compared with experimental studies. Regarding its applicability, this work is an early attempt towards developing in silico feeding strategies and may eventually help refining and reducing animal experiments.}, language = {en} } @misc{HelfmannDjurdjevacConradDjurdjevacetal.2019, author = {Helfmann, Luzie and Djurdjevac Conrad, Natasa and Djurdjevac, Ana and Winkelmann, Stefanie and Sch{\"u}tte, Christof}, title = {From interacting agents to density-based modeling with stochastic PDEs}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73456}, year = {2019}, abstract = {Many real-world processes can naturally be modeled as systems of interacting agents. However, the long-term simulation of such agent-based models is often intractable when the system becomes too large. In this paper, starting from a stochastic spatio-temporal agent-based model (ABM), we present a reduced model in terms of stochastic PDEs that describes the evolution of agent number densities for large populations. We discuss the algorithmic details of both approaches; regarding the SPDE model, we apply Finite Element discretization in space which not only ensures efficient simulation but also serves as a regularization of the SPDE. Illustrative examples for the spreading of an innovation among agents are given and used for comparing ABM and SPDE models.}, language = {en} } @inproceedings{WerdehausenBurgerStaudeetal.2019, author = {Werdehausen, Daniel and Burger, Sven and Staude, Isabelle and Pertsch, Thomas and Decker, Manuel}, title = {Nanocomposites - A Route to better and smaller optical Elements?}, booktitle = {Optical Design and Fabrication 2019}, doi = {10.1364/OFT.2019.OT2A.2}, pages = {OT2A.2}, year = {2019}, language = {en} } @article{BinkowskiZschiedrichHammerschmidtetal.2019, author = {Binkowski, Felix and Zschiedrich, Lin and Hammerschmidt, Martin and Burger, Sven}, title = {Modal analysis for nanoplasmonics with nonlocal material properties}, volume = {100}, journal = {Phys. Rev. B}, arxiv = {http://arxiv.org/abs/1906.01941}, doi = {10.1103/PhysRevB.100.155406}, pages = {155406}, year = {2019}, language = {en} } @inproceedings{AmbellanZachowvonTycowicz2019, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {A Surface-Theoretic Approach for Statistical Shape Modeling}, volume = {11767}, booktitle = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part IV}, publisher = {Springer}, doi = {10.1007/978-3-030-32251-9_3}, pages = {21 -- 29}, year = {2019}, abstract = {We present a novel approach for nonlinear statistical shape modeling that is invariant under Euclidean motion and thus alignment-free. By analyzing metric distortion and curvature of shapes as elements of Lie groups in a consistent Riemannian setting, we construct a framework that reliably handles large deformations. Due to the explicit character of Lie group operations, our non-Euclidean method is very efficient allowing for fast and numerically robust processing. This facilitates Riemannian analysis of large shape populations accessible through longitudinal and multi-site imaging studies providing increased statistical power. We evaluate the performance of our model w.r.t. shape-based classification of pathological malformations of the human knee and show that it outperforms the standard Euclidean as well as a recent nonlinear approach especially in presence of sparse training data. To provide insight into the model's ability of capturing natural biological shape variability, we carry out an analysis of specificity and generalization ability.}, language = {en} } @article{ZhangKlusConradetal.2019, author = {Zhang, Wei and Klus, Stefan and Conrad, Tim and Sch{\"u}tte, Christof}, title = {Learning chemical reaction networks from trajectory data}, volume = {18}, journal = {SIAM Journal on Applied Dynamical Systems (SIADS)}, number = {4}, arxiv = {http://arxiv.org/abs/1902.04920}, doi = {10.1137/19M1265880}, pages = {2000 -- 2046}, year = {2019}, abstract = {We develop a data-driven method to learn chemical reaction networks from trajectory data. Modeling the reaction system as a continuous-time Markov chain and assuming the system is fully observed,our method learns the propensity functions of the system with predetermined basis functions by maximizing the likelihood function of the trajectory data under l^1 sparse regularization. We demonstrate our method with numerical examples using synthetic data and carry out an asymptotic analysis of the proposed learning procedure in the infinite-data limit.}, language = {en} } @article{ThielDjurdjevacConradNtinietal.2019, author = {Thiel, Denise and Djurdjevac Conrad, Natasa and Ntini, Evgenia and Peschutter, Ria and Siebert, Heike and Marsico, Annalisa}, title = {Identifying lncRNA-mediated regulatory modules via ChIA-PET network analysis}, volume = {20}, journal = {BMC Bioinformatics}, number = {1471-2105}, doi = {10.1186/s12859-019-2900-8}, year = {2019}, abstract = {Background: Although several studies have provided insights into the role of long non-coding RNAs (lncRNAs), the majority of them have unknown function. Recent evidence has shown the importance of both lncRNAs and chromatin interactions in transcriptional regulation. Although network-based methods, mainly exploiting gene-lncRNA co-expression, have been applied to characterize lncRNA of unknown function by means of 'guilt-by-association', no strategy exists so far which identifies mRNA-lncRNA functional modules based on the 3D chromatin interaction graph. Results: To better understand the function of chromatin interactions in the context of lncRNA-mediated gene regulation, we have developed a multi-step graph analysis approach to examine the RNA polymerase II ChIA-PET chromatin interaction network in the K562 human cell line. We have annotated the network with gene and lncRNA coordinates, and chromatin states from the ENCODE project. We used centrality measures, as well as an adaptation of our previously developed Markov State Models (MSM) clustering method, to gain a better understanding of lncRNAs in transcriptional regulation. The novelty of our approach resides in the detection of fuzzy regulatory modules based on network properties and their optimization based on co-expression analysis between genes and gene-lncRNA pairs. This results in our method returning more bona fide regulatory modules than other state-of-the art approaches for clustering on graphs. Conclusions: Interestingly, we find that lncRNA network hubs tend to be significantly enriched in evolutionary conserved lncRNAs and enhancer-like functions. We validated regulatory functions for well known lncRNAs, such as MALAT1 and the enhancer-like lncRNA FALEC. In addition, by investigating the modular structure of bigger components we mine putative regulatory functions for uncharacterized lncRNAs.}, language = {en} } @article{SkrzypczakSchintkeSchuett2019, author = {Skrzypczak, Jan and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {Linearizable State Machine Replication of State-Based CRDTs without Logs}, journal = {arXiv}, arxiv = {http://arxiv.org/abs/1905.08733v1}, year = {2019}, abstract = {General solutions of state machine replication have to ensure that all replicas apply the same commands in the same order, even in the presence of failures. Such strict ordering incurs high synchronization costs caused by distributed consensus or by the use of a leader. This paper presents a protocol for linearizable state machine replication of conflict-free replicated data types (CRDTs) that neither requires consensus nor a leader. By leveraging the properties of state-based CRDTs - in particular the monotonic growth of a join semilattice - synchronization overhead is greatly reduced. In addition, updates just need a single round trip and modify the state `in-place' without the need for a log. Furthermore, the message size overhead for coordination consists of a single counter per message. While reads in the presence of concurrent updates are not wait-free without a coordinator, we show that more than 97\% of reads can be handled in one or two round trips under highly concurrent accesses. Our protocol achieves high throughput without auxiliary processes like command log management or leader election. It is well suited for all practical scenarios that need linearizable access on CRDT data on a fine-granular scale.}, language = {en} } @inproceedings{AlonsoBinnigPandisetal.2019, author = {Alonso, Gustavo and Binnig, Carsten and Pandis, Ippokratis and Salem, Kenneth and Skrzypczak, Jan and Stutsman, Ryan and Thostrup, Lasse and Wang, Tianzheng and Wang, Zeke and Ziegler, Tobias}, title = {DPI: The Data Processing Interface for Modern Networks}, booktitle = {9th Biennial Conference on Innovative Data Systems Research}, year = {2019}, abstract = {As data processing evolves towards large scale, distributed platforms, the network will necessarily play a substantial role in achieving efficiency and performance. Increasingly, switches, network cards, and protocols are becoming more flexible while programmability at all levels (aka, software defined networks) opens up many possibilities to tailor the network to data processing applications and to push processing down to the network elements. In this paper, we propose DPI, an interface providing a set of simple yet powerful abstractions flexible enough to exploit features of modern networks (e.g., RDMA or in-network processing) suitable for data processing. Mirroring the concept behind the Message Passing Interface (MPI) used extensively in high-performance computing, DPI is an interface definition rather than an implementation so as to be able to bridge different networking technologies and to evolve with them. In the paper we motivate and discuss key primitives of the interface and present a number of use cases that show the potential of DPI for data-intensive applications, such as analytic engines and distributed database systems.}, language = {en} } @misc{Hoppmann2019, author = {Hoppmann, Kai}, title = {On the Complexity of the Maximum Minimum Cost Flow Problem}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73359}, year = {2019}, abstract = {Consider a flow network, i.e., a directed graph where each arc has a nonnegative capacity and an associated length, together with nonempty supply-intervals for the sources and nonempty demand-intervals for the sinks. The goal of the Maximum Minimum Cost Flow Problem (MMCF) is to find fixed supply and demand values within these intervals, such that the optimal objective value of the induced Minimum Cost Flow Problem (MCF) is maximized. In this paper, we show that MMCF is APX-hard and remains NP-hard in the uncapacitated case.}, language = {en} } @inproceedings{SalemSchuettSchintkeetal.2019, author = {Salem, Farouk and Sch{\"u}tt, Thorsten and Schintke, Florian and Reinefeld, Alexander}, title = {Scheduling Data Streams for Low Latency and High Throughput on a Cray XC40 Using Libfabric}, booktitle = {CUG Conference Proceedings}, year = {2019}, abstract = {Achieving efficient many-to-many communication on a given network topology is a challenging task when many data streams from different sources have to be scattered concurrently to many destinations with low variance in arrival times. In such scenarios, it is critical to saturate but not to congest the bisectional bandwidth of the network topology in order to achieve a good aggregate throughput. When there are many concurrent point-to-point connections, the communication pattern needs to be dynamically scheduled in a fine-grained manner to avoid network congestion (links, switches), overload in the node's incoming links, and receive buffer overflow. Motivated by the use case of the Compressed Baryonic Matter experiment (CBM), we study the performance and variance of such communication patterns on a Cray XC40 with different routing schemes and scheduling approaches. We present a distributed Data Flow Scheduler (DFS) that reduces the variance of arrival times from all sources at least 30 times and increases the achieved aggregate bandwidth by up to 50\%.}, language = {en} } @misc{SerranoSchwarzGleixner2019, author = {Serrano, Felipe and Schwarz, Robert and Gleixner, Ambros}, title = {On the Relation between the Extended Supporting Hyperplane Algorithm and Kelley's Cutting Plane Algorithm}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73253}, year = {2019}, abstract = {Recently, Kronqvist et al. (2016) rediscovered the supporting hyperplane algorithm of Veinott (1967) and demonstrated its computational benefits for solving convex mixed-integer nonlinear programs. In this paper we derive the algorithm from a geometric point of view. This enables us to show that the supporting hyperplane algorithm is equivalent to Kelley's cutting plane algorithm applied to a particular reformulation of the problem. As a result, we extend the applicability of the supporting hyperplane algorithm to convex problems represented by general, not necessarily convex, differentiable functions that satisfy a mild condition.}, language = {en} } @incollection{AmbellanLameckervonTycowiczetal.2019, author = {Ambellan, Felix and Lamecker, Hans and von Tycowicz, Christoph and Zachow, Stefan}, title = {Statistical Shape Models - Understanding and Mastering Variation in Anatomy}, volume = {3}, booktitle = {Biomedical Visualisation}, number = {1156}, editor = {Rea, Paul M.}, edition = {1}, publisher = {Springer Nature Switzerland AG}, isbn = {978-3-030-19384-3}, doi = {10.1007/978-3-030-19385-0_5}, pages = {67 -- 84}, year = {2019}, abstract = {In our chapter we are describing how to reconstruct three-dimensional anatomy from medical image data and how to build Statistical 3D Shape Models out of many such reconstructions yielding a new kind of anatomy that not only allows quantitative analysis of anatomical variation but also a visual exploration and educational visualization. Future digital anatomy atlases will not only show a static (average) anatomy but also its normal or pathological variation in three or even four dimensions, hence, illustrating growth and/or disease progression. Statistical Shape Models (SSMs) are geometric models that describe a collection of semantically similar objects in a very compact way. SSMs represent an average shape of many three-dimensional objects as well as their variation in shape. The creation of SSMs requires a correspondence mapping, which can be achieved e.g. by parameterization with a respective sampling. If a corresponding parameterization over all shapes can be established, variation between individual shape characteristics can be mathematically investigated. We will explain what Statistical Shape Models are and how they are constructed. Extensions of Statistical Shape Models will be motivated for articulated coupled structures. In addition to shape also the appearance of objects will be integrated into the concept. Appearance is a visual feature independent of shape that depends on observers or imaging techniques. Typical appearances are for instance the color and intensity of a visual surface of an object under particular lighting conditions, or measurements of material properties with computed tomography (CT) or magnetic resonance imaging (MRI). A combination of (articulated) statistical shape models with statistical models of appearance lead to articulated Statistical Shape and Appearance Models (a-SSAMs).After giving various examples of SSMs for human organs, skeletal structures, faces, and bodies, we will shortly describe clinical applications where such models have been successfully employed. Statistical Shape Models are the foundation for the analysis of anatomical cohort data, where characteristic shapes are correlated to demographic or epidemiologic data. SSMs consisting of several thousands of objects offer, in combination with statistical methods ormachine learning techniques, the possibility to identify characteristic clusters, thus being the foundation for advanced diagnostic disease scoring.}, language = {en} } @article{GelssKlusEisertetal.2019, author = {Gelß, Patrick and Klus, Stefan and Eisert, Jens and Sch{\"u}tte, Christof}, title = {Multidimensional Approximation of Nonlinear Dynamical Systems}, volume = {14}, journal = {Journal of Computational and Nonlinear Dynamics}, number = {6}, doi = {10.1115/1.4043148}, year = {2019}, abstract = {A key task in the field of modeling and analyzing nonlinear dynamical systems is the recovery of unknown governing equations from measurement data only. There is a wide range of application areas for this important instance of system identification, ranging from industrial engineering and acoustic signal processing to stock market models. In order to find appropriate representations of underlying dynamical systems, various data-driven methods have been proposed by different communities. However, if the given data sets are high-dimensional, then these methods typically suffer from the curse of dimensionality. To significantly reduce the computational costs and storage consumption, we propose the method multidimensional approximation of nonlinear dynamical systems (MANDy) which combines data-driven methods with tensor network decompositions. The efficiency of the introduced approach will be illustrated with the aid of several high-dimensional nonlinear dynamical systems.}, language = {en} } @article{ReuterFackeldeyWeber2019, author = {Reuter, Bernhard and Fackeldey, Konstantin and Weber, Marcus}, title = {Generalized Markov modeling of nonreversible molecular kinetics}, volume = {17}, journal = {The Journal of Chemical Physics}, number = {150}, doi = {10.1063/1.5064530}, pages = {174103}, year = {2019}, abstract = {Markov state models are to date the gold standard for modeling molecular kinetics since they enable the identification and analysis of metastable states and related kinetics in a very instructive manner. The state-of-the-art Markov state modeling methods and tools are very well developed for the modeling of reversible processes in closed equilibrium systems. On the contrary, they are largely not well suited to deal with nonreversible or even nonautonomous processes of nonequilibrium systems. Thus, we generalized the common Robust Perron Cluster Cluster Analysis (PCCA+) method to enable straightforward modeling of nonequilibrium systems as well. The resulting Generalized PCCA (G-PCCA) method readily handles equilibrium as well as nonequilibrium data by utilizing real Schur vectors instead of eigenvectors. This is implemented in the G-PCCA algorithm that enables the semiautomatic coarse graining of molecular kinetics. G-PCCA is not limited to the detection of metastable states but also enables the identification and modeling of cyclic processes. This is demonstrated by three typical examples of nonreversible systems.}, language = {en} } @article{ErnstFackeldeyVolkameretal.2019, author = {Ernst, Natalia and Fackeldey, Konstantin and Volkamer, Andrea and Opatz, Oliver and Weber, Marcus}, title = {Computation of temperature-dependent dissociation rates of metastable protein-ligand complexes}, volume = {45}, journal = {Molecular Simulation}, number = {11}, doi = {10.1080/08927022.2019.1610949}, pages = {904 -- 911}, year = {2019}, abstract = {Molecular simulations are often used to analyse the stability of protein-ligand complexes. The stability can be characterised by exit rates or using the exit time approach, i.e. by computing the expected holding time of the complex before its dissociation. However determining exit rates by straightforward molecular dynamics methods can be challenging for stochastic processes in which the exit event occurs very rarely. Finding a low variance procedure for collecting rare event statistics is still an open problem. In this work we discuss a novel method for computing exit rates which uses results of Robust Perron Cluster Analysis (PCCA+). This clustering method gives the possibility to define a fuzzy set by a membership function, which provides additional information of the kind 'the process is being about to leave the set'. Thus, the derived approach is not based on the exit event occurrence and, therefore, is also applicable in case of rare events. The novel method can be used to analyse the temperature effect of protein-ligand systems through the differences in exit rates, and, thus, open up new drug design strategies and therapeutic applications.}, language = {en} } @article{SagnolPauwels2019, author = {Sagnol, Guillaume and Pauwels, Edouard}, title = {An unexpected connection between Bayes A-optimal designs and the group lasso}, volume = {60}, journal = {Statistical Papers}, number = {2}, doi = {10.1007/s00362-018-01062-y}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73059}, pages = {215 -- 234}, year = {2019}, abstract = {We show that the A-optimal design optimization problem over m design points in R^n is equivalent to minimizing a quadratic function plus a group lasso sparsity inducing term over n x m real matrices. This observation allows to describe several new algorithms for A-optimal design based on splitting and block coordinate decomposition. These techniques are well known and proved powerful to treat large scale problems in machine learning and signal processing communities. The proposed algorithms come with rigorous convergence guarantees and convergence rate estimate stemming from the optimization literature. Performances are illustrated on synthetic benchmarks and compared to existing methods for solving the optimal design problem.}, language = {en} } @article{DelleSiteKrekelerWhittakeretal.2019, author = {Delle Site, Luigi and Krekeler, Christian and Whittaker, John and Agarwal, Animesh and Klein, Rupert and H{\"o}fling, Felix}, title = {Molecular Dynamics of Open Systems: Construction of a Mean-Field Particle Reservoir}, volume = {2}, journal = {Advanced Theory and Simulations}, arxiv = {http://arxiv.org/abs/1902.07067}, doi = {10.1002/adts.201900014}, pages = {1900014}, year = {2019}, abstract = {The simulation of open molecular systems requires explicit or implicit reservoirs of energy and particles. Whereas full atomistic resolution is desired in the region of interest, there is some freedom in the implementation of the reservoirs. Here, a combined, explicit reservoir is constructed by interfacing the atomistic region with regions of point-like, non-interacting particles (tracers) embedded in a thermodynamic mean field. The tracer molecules acquire atomistic resolution upon entering the atomistic region and equilibrate with this environment, while atomistic molecules become tracers governed by an effective mean-field potential after crossing the atomistic boundary. The approach is extensively tested on thermodynamic, structural, and dynamic properties of liquid water. Conceptual and numerical advantages of the procedure as well as new perspectives are highlighted and discussed.}, language = {en} } @misc{BertholdGamrathSalvagnin2019, author = {Berthold, Timo and Gamrath, Gerald and Salvagnin, Domenico}, title = {Exploiting Dual Degeneracy in Branching}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-73028}, year = {2019}, abstract = {Branch-and-bound methods for mixed-integer programming (MIP) are traditionally based on solving a linear programming (LP) relaxation and branching on a variable which takes a fractional value in the (single) computed relaxation optimum. In this paper, we study branching strategies for mixed-integer programs that exploit the knowledge of multiple alternative optimal solutions (a cloud ) of the current LP relaxation. These strategies naturally extend common methods like most infeasible branching, strong branching, pseudocost branching, and their hybrids, but we also propose a novel branching rule called cloud diameter branching. We show that dual degeneracy, a requirement for alternative LP optima, is present for many instances from common MIP test sets. Computational experiments show significant improvements in the quality of branching decisions as well as reduced branching effort when using our modifications of existing branching rules. We discuss different ways to generate a cloud of solutions and present extensive computational results showing that through a careful implementation, cloud modifications can speed up full strong branching by more than 10 \% on standard test sets. Additionally, by exploiting degeneracy, we are also able to improve the state-of-the-art hybrid branching rule and reduce the solving time on affected instances by almost 20 \% on average.}, language = {en} } @misc{BuchmannKaplanPowelletal.2019, author = {Buchmann, Jens and Kaplan, Bernhard and Powell, Samuel and Prohaska, Steffen and Laufer, Jan}, title = {3D quantitative photoacoustic tomography using an adjoint radiance Monte Carlo model and gradient descent}, issn = {1438-0064}, doi = {10.1117/1.JBO.24.6.066001}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72995}, year = {2019}, abstract = {Quantitative photoacoustic tomography aims to recover maps of the local concentrations of tissue chromophores from multispectral images. While model-based inversion schemes are promising approaches, major challenges to their practical implementation include the unknown fluence distribution and the scale of the inverse problem. This paper describes an inversion scheme based on a radiance Monte Carlo model and an adjoint-assisted gradient optimization that incorporates fluence-dependent step sizes and adaptive moment estimation. The inversion is shown to recover absolute chromophore concentrations, blood oxygen saturation and the Gr{\"u}neisen parameter from in silico 3D phantom images for different radiance approximations. The scattering coefficient was assumed to be homogeneous and known a priori.}, language = {en} } @article{BuchmannKaplanPowelletal.2019, author = {Buchmann, Jens and Kaplan, Bernhard and Powell, Samuel and Prohaska, Steffen and Laufer, Jan}, title = {3D quantitative photoacoustic tomography using an adjoint radiance Monte Carlo model and gradient descent}, volume = {24}, journal = {Journal of Biomedical Optics}, number = {6}, doi = {10.1117/1.JBO.24.6.066001}, pages = {066001}, year = {2019}, abstract = {Quantitative photoacoustic tomography aims to recover maps of the local concentrations of tissue chromophores from multispectral images. While model-based inversion schemes are promising approaches, major challenges to their practical implementation include the unknown fluence distribution and the scale of the inverse problem. This paper describes an inversion scheme based on a radiance Monte Carlo model and an adjoint-assisted gradient optimization that incorporates fluence-dependent step sizes and adaptive moment estimation. The inversion is shown to recover absolute chromophore concentrations, blood oxygen saturation and the Gr{\"u}neisen parameter from in silico 3D phantom images for different radiance approximations. The scattering coefficient was assumed to be homogeneous and known a priori.}, language = {en} } @article{GrafFeisSantiagoetal.2019, author = {Graf, Florian and Feis, Joshua and Santiago, Xavier Garcia and Wegener, Martin and Rockstuhl, Carsten and Fernandez-Corbaton, Ivan}, title = {Achiral, Helicity Preserving, and Resonant Structures for Enhanced Sensing of Chiral Molecules}, volume = {6}, journal = {ACS Photonics}, doi = {10.1021/acsphotonics.8b01454}, pages = {482}, year = {2019}, language = {en} } @article{BinkowskiZschiedrichBurger2019, author = {Binkowski, Felix and Zschiedrich, Lin and Burger, Sven}, title = {An auxiliary field approach for computing optical resonances in dispersive media}, volume = {15}, journal = {J. Eur. Opt. Soc.-Rapid}, doi = {10.1186/s41476-019-0098-z}, pages = {3}, year = {2019}, language = {en} } @misc{Niemann2019, type = {Master Thesis}, author = {Niemann, Jan-Hendrik}, title = {A Dynamic Memory Approach for Consensus Problems with Packet Loss}, pages = {64}, year = {2019}, language = {en} } @article{SchneiderHammerschmidtZschiedrichetal.2019, author = {Schneider, Philipp-Immanuel and Hammerschmidt, Martin and Zschiedrich, Lin and Burger, Sven}, title = {Using Gaussian process regression for efficient parameter reconstruction}, volume = {10959}, journal = {Proc. SPIE}, arxiv = {http://arxiv.org/abs/1903.12128}, doi = {10.1117/12.2513268}, pages = {1095911}, year = {2019}, language = {en} } @misc{ShinanoRehfeldtGalley2019, author = {Shinano, Yuji and Rehfeldt, Daniel and Galley, Tristan}, title = {An Easy Way to Build Parallel State-of-the-art Combinatorial Optimization Problem Solvers: A Computational Study on Solving Steiner Tree Problems and Mixed Integer Semidefinite Programs by using ug[SCIP-*,*]-libraries}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72804}, year = {2019}, abstract = {Branch-and-bound (B\&B) is an algorithmic framework for solving NP-hard combinatorial optimization problems. Although several well-designed software frameworks for parallel B\&B have been developed over the last two decades, there is very few literature about successfully solving previously intractable combinatorial optimization problem instances to optimality by using such frameworks.The main reason for this limited impact of parallel solvers is that the algorithmic improvements for specific problem types are significantly greater than performance gains obtained by parallelization in general. Therefore, in order to solve hard problem instances for the first time, one needs to accelerate state-of-the-art algorithm implementations. In this paper, we present a computational study for solving Steiner tree problems and mixed integer semidefinite programs in parallel. These state-of-the-art algorithm implementations are based on SCIP and were parallelized via the ug[SCIP-*,*]-libraries---by adding less than 200 lines of glue code. Despite the ease of their parallelization, these solvers have the potential to solve previously intractable instances. In this paper, we demonstrate the convenience of such a parallelization and present results for previously unsolvable instances from the well-known PUC benchmark set, widely regarded as the most difficult Steiner tree test set in the literature.}, language = {en} } @article{KramerLaeuter2019, author = {Kramer, Tobias and L{\"a}uter, Matthias}, title = {Outgassing induced acceleration of comet 67P/Churyumov-Gerasimenko}, volume = {630}, journal = {Astronomy \& Astrophysics}, arxiv = {http://arxiv.org/abs/1902.02701}, doi = {10.1051/0004-6361/201935229}, pages = {A4}, year = {2019}, abstract = {Cometary activity affects the orbital motion and rotation state due to sublimation induced forces. The availability of precise rotation-axis orientation and position data from the Rosetta mission allows one to accurately determine the outgassing of comet Churyumov-Gerasimenko/67P (67P). We derive the observed non-gravitational acceleration of 67P directly from the Rosetta spacecraft trajectory. From the non-gravitational acceleration we recover the diurnal outgassing variations and study a possible delay of the sublimation response with respect to the peak solar illumination. This allows us to compare the non-gravitational acceleration of 67P with expectations based on empirical models and common assumptions about the sublimation process. We use an iterative orbit refinement and Fourier decomposition of the diurnal activity to derive the outgassing induced non-gravitational acceleration. The uncertainties of the data reduction are established by a sensitivity analysis of an ensemble of best-fit orbits for comet 67P. We find that the Marsden non-gravitational acceleration parameters reproduce part of the non-gravitational acceleration but need to be augmented by an analysis of the nucleus geometry and surface illumination to draw conclusions about the sublimation process on the surface. The non-gravitational acceleration follows closely the subsolar latitude (seasonal illumination), with a small lag angle with respect to local noon around perihelion. The observed minor changes of the rotation axis do not favor forced precession models for the non-gravitational acceleration. In contrast to the sublimation induced torques, the non-gravitational acceleration does not put strong constraints on localized active areas on the nucleus. We find a close agreement of the orbit deduced non-gravitational acceleration and the water production independently derived from Rosetta in-situ measurement.}, language = {en} } @misc{MuellerSerranoGleixner2019, author = {M{\"u}ller, Benjamin and Serrano, Felipe and Gleixner, Ambros}, title = {Using two-dimensional Projections for Stronger Separation and Propagation of Bilinear Terms}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72759}, year = {2019}, abstract = {One of the most fundamental ingredients in mixed-integer nonlinear programming solvers is the well- known McCormick relaxation for a product of two variables x and y over a box-constrained domain. The starting point of this paper is the fact that the convex hull of the graph of xy can be much tighter when computed over a strict, non-rectangular subset of the box. In order to exploit this in practice, we propose to compute valid linear inequalities for the projection of the feasible region onto the x-y-space by solving a sequence of linear programs akin to optimization-based bound tightening. These valid inequalities allow us to employ results from the literature to strengthen the classical McCormick relaxation. As a consequence, we obtain a stronger convexification procedure that exploits problem structure and can benefit from supplementary information obtained during the branch-and bound algorithm such as an objective cutoff. We complement this by a new bound tightening procedure that efficiently computes the best possible bounds for x, y, and xy over the available projections. Our computational evaluation using the academic solver SCIP exhibit that the proposed methods are applicable to a large portion of the public test library MINLPLib and help to improve performance significantly.}, language = {en} } @article{TawfikLimbourg2019, author = {Tawfik, Christine and Limbourg, Sabine}, title = {A Bilevel Model for Network Design and Pricing Based on a Level-of-Service Assessment}, volume = {53}, journal = {Transportation Science}, number = {6}, publisher = {Informs}, address = {Transportation Science}, doi = {10.1287/trsc.2019.0906}, pages = {1609 -- 1626}, year = {2019}, abstract = {Within a wide view to stimulate intermodal transport, this paper is devoted to the examination of the intrinsically related problems of designing freight carrying services and determining their associated prices as observed by the shipper firms. A path-based multicommodity formulation is developed for a medium-term planning horizon, from the perspective of an intermodal operator. In the quest of incorporating nonprice attributes, two approaches are proposed to depict a realistic assessment of the service quality. First, frequency delay constraints are added to the upper level problem. Second, based on a random utility model, behavioural concepts are integrated in the expression of the lower level as a logistics costs minimization problem. Exact tests are invoked on real-world instances, demonstrating the capability of the presented approaches of reaching reasonable results within acceptable computation times and optimality gaps. The broader level-of-service perspective imposes additional costs on the service providers, although to a lesser extent on long-distance freight corridors, as indicated by the computed market share and net profit. Further experiments are conducted to test the impact of certain transport management instruments (e.g. subsidies and service capacities) on the modal split, as well as to assess the intermodality's future based on a scenario analysis methodology.}, language = {en} } @misc{AmbellanTackEhlkeetal.2019, author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72704}, year = {2019}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging (MRI) that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs).The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures.The shape models and neural networks employed are trained using data from the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets from the SKI10 challenge.For the first time, an accuracy equivalent to the inter-observer variability of human readers is achieved in this challenge.Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We make the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation.In conclusion, combining localized classification via CNNs with statistical anatomical knowledge via SSMs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @misc{AmbellanLameckervonTycowiczetal.2019, author = {Ambellan, Felix and Lamecker, Hans and von Tycowicz, Christoph and Zachow, Stefan}, title = {Statistical Shape Models - Understanding and Mastering Variation in Anatomy}, issn = {1438-0064}, doi = {10.1007/978-3-030-19385-0_5}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72699}, year = {2019}, abstract = {In our chapter we are describing how to reconstruct three-dimensional anatomy from medical image data and how to build Statistical 3D Shape Models out of many such reconstructions yielding a new kind of anatomy that not only allows quantitative analysis of anatomical variation but also a visual exploration and educational visualization. Future digital anatomy atlases will not only show a static (average) anatomy but also its normal or pathological variation in three or even four dimensions, hence, illustrating growth and/or disease progression. Statistical Shape Models (SSMs) are geometric models that describe a collection of semantically similar objects in a very compact way. SSMs represent an average shape of many three-dimensional objects as well as their variation in shape. The creation of SSMs requires a correspondence mapping, which can be achieved e.g. by parameterization with a respective sampling. If a corresponding parameterization over all shapes can be established, variation between individual shape characteristics can be mathematically investigated. We will explain what Statistical Shape Models are and how they are constructed. Extensions of Statistical Shape Models will be motivated for articulated coupled structures. In addition to shape also the appearance of objects will be integrated into the concept. Appearance is a visual feature independent of shape that depends on observers or imaging techniques. Typical appearances are for instance the color and intensity of a visual surface of an object under particular lighting conditions, or measurements of material properties with computed tomography (CT) or magnetic resonance imaging (MRI). A combination of (articulated) statistical shape models with statistical models of appearance lead to articulated Statistical Shape and Appearance Models (a-SSAMs).After giving various examples of SSMs for human organs, skeletal structures, faces, and bodies, we will shortly describe clinical applications where such models have been successfully employed. Statistical Shape Models are the foundation for the analysis of anatomical cohort data, where characteristic shapes are correlated to demographic or epidemiologic data. SSMs consisting of several thousands of objects offer, in combination with statistical methods ormachine learning techniques, the possibility to identify characteristic clusters, thus being the foundation for advanced diagnostic disease scoring.}, language = {en} } @misc{AndersonHendelLeBodicetal.2019, author = {Anderson, Daniel and Hendel, Gregor and Le Bodic, Pierre and Viernickel, Jan Merlin}, title = {Clairvoyant Restarts in Branch-and-Bound Search Using Online Tree-Size Estimation}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72653}, year = {2019}, abstract = {We propose a simple and general online method to measure the search progress within the Branch-and-Bound algorithm, from which we estimate the size of the remaining search tree. We then show how this information can help solvers algorithmically at runtime by designing a restart strategy for Mixed-Integer Programming (MIP) solvers that decides whether to restart the search based on the current estimate of the number of remaining nodes in the tree. We refer to this type of algorithm as clairvoyant. Our clairvoyant restart strategy outperforms a state-of-the-art solver on a large set of publicly available MIP benchmark instances. It is implemented in the MIP solver SCIP and will be available in future releases.}, language = {en} } @article{RettigHaasePletnyovetal.2019, author = {Rettig, Anika and Haase, Tobias and Pletnyov, Alexandr and Kohl, Benjamin and Ertel, Wolfgang and von Kleist, Max and Sunkara, Vikram}, title = {SLCV - A Supervised Learning - Computer Vision combined strategy for automated muscle fibre detection in cross sectional images}, journal = {PeerJ}, publisher = {PeerJ}, address = {PeerJ}, doi = {10.7717/peerj.7053}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72639}, year = {2019}, abstract = {Muscle fibre cross sectional area (CSA) is an important biomedical measure used to determine the structural composition of skeletal muscle, and it is relevant for tackling research questions in many different fields of research. To date, time consuming and tedious manual delineation of muscle fibres is often used to determine the CSA. Few methods are able to automatically detect muscle fibres in muscle fibre cross sections to quantify CSA due to challenges posed by variation of bright- ness and noise in the staining images. In this paper, we introduce SLCV, a robust semi-automatic pipeline for muscle fibre detection, which combines supervised learning (SL) with computer vision (CV). SLCV is adaptable to different staining methods and is quickly and intuitively tunable by the user. We are the first to perform an error analysis with respect to cell count and area, based on which we compare SLCV to the best purely CV-based pipeline in order to identify the contribution of SL and CV steps to muscle fibre detection. Our results obtained on 27 fluorescence-stained cross sectional images of varying staining quality suggest that combining SL and CV performs signifi- cantly better than both SL based and CV based methods with regards to both the cell separation- and the area reconstruction error. Furthermore, applying SLCV to our test set images yielded fibre detection results of very high quality, with average sensitivity values of 0.93 or higher on different cluster sizes and an average Dice Similarity Coefficient (DSC) of 0.9778.}, language = {en} } @article{AchterbergBixbyGuetal.2019, author = {Achterberg, Tobias and Bixby, Robert E. and Gu, Zonghao and Rothberg, Edward and Weninger, Dieter}, title = {Presolve Reductions in Mixed Integer Programming}, journal = {INFORMS Journal on Computing}, year = {2019}, abstract = {Mixed integer programming has become a very powerful tool for modeling and solving real-world planning and scheduling problems, with the breadth of applications appearing to be almost unlimited. A critical component in the solution of these mixed-integer programs is a set of routines commonly referred to as presolve. Presolve can be viewed as a collection of preprocessing techniques that reduce the size of and, more importantly, improve the ``strength'' of the given model formulation, that is, the degree to which the constraints of the formulation accurately describe the underlying polyhedron of integer-feasible solutions. As our computational results will show, presolve is a key factor in the speed with which we can solve mixed-integer programs, and is often the difference between a model being intractable and solvable, in some cases easily solvable. In this paper we describe the presolve functionality in the Gurobi commercial mixed-integer programming code. This includes an overview, or taxonomy of the different methods that are employed, as well as more-detailed descriptions of several of the techniques, with some of them appearing, to our knowledge, for the first time in the literature.}, language = {en} } @misc{SakuraiOnoCarretal.2019, author = {Sakurai, Daisuke and Ono, Kenji and Carr, Hamish and Nonaka, Jorji and Kawanabe, Tomohiro}, title = {Flexible Fiber Surfaces: A Reeb-Free Approach}, journal = {Topological Methods in Data Analysis and Visualization V}, editor = {Carr, Hamish and Fujishiro, Issei and Sadlo, Filip and Takahashi, Shigeo}, publisher = {Springer}, pages = {14}, year = {2019}, abstract = {The fiber surface generalizes the popular isosurface to multi-fields, so that pre-images can be visualized as surfaces. As with the isosurface, however, the fiber surface suffers from visual occlusion. We propose to avoid such occlusion by restricting the components to only the relevant ones with a new component-wise flexing algorithm. The approach, flexible fiber surface, generalizes the manipulation idea found in the flexible isosurface for the fiber surface. The flexible isosurface in the original form, however, relies on the contour tree. For the fiber surface, this corresponds to the Reeb space, which is challenging for both the computation and user interaction. We thus take a Reeb-free approach, in which one does not compute the Reeb space. Under this constraint, we generalize a few selected interactions in the flexible isosurface and discuss the implication of the restriction.}, language = {en} } @misc{SakuraiOnoCarretal.2019, author = {Sakurai, Daisuke and Ono, Kenji and Carr, Hamish and Nonaka, Jorji and Kawanabe, Tomohiro}, title = {Flexible Fiber Surfaces: A Reeb-Free Approach}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72599}, year = {2019}, abstract = {The fiber surface generalizes the popular isosurface to multi-fields, so that pre-images can be visualized as surfaces. As with the isosurface, however, the fiber surface suffers from visual occlusion. We propose to avoid such occlusion by restricting the components to only the relevant ones with a new component-wise flexing algorithm. The approach, flexible fiber surface, generalizes the manipulation idea found in the flexible isosurface for the fiber surface. The flexible isosurface in the original form, however, relies on the contour tree. For the fiber surface, this corresponds to the Reeb space, which is challenging for both the computation and user interaction. We thus take a Reeb-free approach, in which one does not compute the Reeb space. Under this constraint, we generalize a few selected interactions in the flexible isosurface and discuss the implication of the restriction.}, language = {en} } @article{MunguiaOxberryRajanetal.2019, author = {Munguia, Lluis-Miquel and Oxberry, Geoffrey and Rajan, Deepak and Shinano, Yuji}, title = {Parallel PIPS-SBB: Multi-Level Parallelism For Stochastic Mixed-Integer Programs}, journal = {Computational Optimization and Applications}, doi = {10.1007/s10589-019-00074-0}, year = {2019}, abstract = {PIPS-SBB is a distributed-memory parallel solver with a scalable data distribution paradigm. It is designed to solve MIPs with a dual-block angular structure, which is characteristic of deterministic-equivalent Stochastic Mixed-Integer Programs (SMIPs). In this paper, we present two different parallelizations of Branch \& Bound (B\&B), implementing both as extensions of PIPS-SBB, thus adding an additional layer of parallelism. In the first of the proposed frameworks, PIPS-PSBB, the coordination and load-balancing of the different optimization workers is done in a decentralized fashion. This new framework is designed to ensure all available cores are processing the most promising parts of the B\&B tree. The second, ug[PIPS-SBB,MPI], is a parallel implementation using the Ubiquity Generator (UG), a universal framework for parallelizing B\&B tree search that has been successfully applied to other MIP solvers. We show the effects of leveraging multiple levels of parallelism in potentially improving scaling performance beyond thousands of cores.}, language = {en} } @inproceedings{Serrano2019, author = {Serrano, Felipe}, title = {Intersection cuts for factorable MINLP}, volume = {11480}, booktitle = {A. Lodi, V. Nagarajan (eds), Integer Programming and Combinatorial Optimization: 20th International Conference, IPCO 2019}, doi = {10.1007/978-3-030-17953-3_29}, pages = {385 -- 398}, year = {2019}, abstract = {Given a factorable function f, we propose a procedure that constructs a concave underestimor of f that is tight at a given point. These underestimators can be used to generate intersection cuts. A peculiarity of these underestimators is that they do not rely on a bounded domain. We propose a strengthening procedure for the intersection cuts that exploits the bounds of the domain. Finally, we propose an extension of monoidal strengthening to take advantage of the integrality of the non-basic variables.}, language = {en} } @masterthesis{Prendke2019, type = {Bachelor Thesis}, author = {Prendke, Mona}, title = {Comparison of 2D and 3D CNNs for Classification of Knee MRI}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72439}, pages = {53}, year = {2019}, language = {en} } @article{HildebrandtBrueningSchmidtetal.2019, author = {Hildebrandt, Thomas and Bruening, Jan Joris and Schmidt, Nora Laura and Lamecker, Hans and Heppt, Werner and Zachow, Stefan and Goubergrits, Leonid}, title = {The Healthy Nasal Cavity - Characteristics of Morphology and Related Airflow Based on a Statistical Shape Model Viewed from a Surgeon's Perspective}, volume = {35}, journal = {Facial Plastic Surgery}, number = {1}, doi = {10.1055/s-0039-1677721}, pages = {9 -- 13}, year = {2019}, abstract = {Functional surgery on the nasal framework requires referential criteria to objectively assess nasal breathing for indication and follow-up. Thismotivated us to generate amean geometry of the nasal cavity based on a statistical shape model. In this study, the authors could demonstrate that the introduced nasal cavity's mean geometry features characteristics of the inner shape and airflow, which are commonly observed in symptom-free subjects. Therefore, the mean geometry might serve as a reference-like model when one considers qualitative aspects. However, to facilitate quantitative considerations and statistical inference, further research is necessary. Additionally, the authorswere able to obtain details about the importance of the isthmus nasi and the inferior turbinate for the intranasal airstream.}, language = {en} } @article{HildebrandtBrueningLameckeretal.2019, author = {Hildebrandt, Thomas and Bruening, Jan Joris and Lamecker, Hans and Zachow, Stefan and Heppt, Werner and Schmidt, Nora and Goubergrits, Leonid}, title = {Digital Analysis of Nasal Airflow Facilitating Decision Support in Rhinosurgery}, volume = {35}, journal = {Facial Plastic Surgery}, number = {1}, doi = {10.1055/s-0039-1677720}, pages = {1 -- 8}, year = {2019}, abstract = {Successful functional surgery on the nasal framework requires reliable and comprehensive diagnosis. In this regard, the authors introduce a new methodology: Digital Analysis of Nasal Airflow (diANA). It is based on computational fluid dynamics, a statistical shape model of the healthy nasal cavity and rhinologic expertise. diANA necessitates an anonymized tomographic dataset of the paranasal sinuses including the complete nasal cavity and, when available, clinical information. The principle of diANA is to compare the morphology and the respective airflow of an individual nose with those of a reference. This enablesmorphometric aberrations and consecutive flow field anomalies to localize and quantify within a patient's nasal cavity. Finally, an elaborated expert opinion with instructive visualizations is provided. Using diANA might support surgeons in decision-making, avoiding unnecessary surgery, gaining more precision, and target-orientation for indicated operations.}, language = {en} }