@misc{FujiiItoKimetal., author = {Fujii, Koichi and Ito, Naoki and Kim, Sunyoung and Kojima, Masakazu and Shinano, Yuji and Toh, Kim-Chuan}, title = {Solving Challenging Large Scale QAPs}, issn = {1438-0064}, doi = {10.12752/8130}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-81303}, abstract = {We report our progress on the project for solving larger scale quadratic assignment problems (QAPs). Our main approach to solve large scale NP-hard combinatorial optimization problems such as QAPs is a parallel branch-and-bound method efficiently implemented on a powerful computer system using the Ubiquity Generator(UG) framework that can utilize more than 100,000 cores. Lower bounding procedures incorporated in the branch-and-bound method play a crucial role in solving the problems. For a strong lower bounding procedure, we employ the Lagrangian doubly nonnegative (DNN) relaxation and the Newton-bracketing method developed by the authors' group. In this report, we describe some basic tools used in the project including the lower bounding procedure and branching rules, and present some preliminary numerical results. Our next target problem is QAPs with dimension at least 50, as we have succeeded to solve tai30a and sko42 from QAPLIB for the first time.}, language = {en} } @misc{Keidel, type = {Master Thesis}, author = {Keidel, Stefan}, title = {Snapshots in Scalaris}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42282}, school = {Zuse Institute Berlin (ZIB)}, pages = {87}, abstract = {Eines der gr{\"o}ßten Hindernisse beim praktischen Einsatz von Scalaris, einer skalierbaren Implementierung einer verteilten Hashtabelle mit Unterst{\"u}tzung f{\"u}r Transaktionen, ist das Fehlen eines Verfahrens zur Aufnahme eines konsistenten Zustandes des gesamten Systems. Wir stellen in dieser Arbeit ein einfaches Protokoll vor, dass diese Aufgabe erf{\"u}llt und sich, auf Grund der von uns gew{\"a}hlten Herangehensweise, leicht implementieren l{\"a}sst. Als Ausgangspunkt daf{\"u}r w{\"a}hlen wir aus einer Reihe von „klassischen" Snapshot-Algorithmen ein 1993 von Mattern entworfenes Verfahren, welches auf dem Algorithmus von Lai und Yang basiert, aus. Diese Entscheidung basiert auf einer gr{\"u}ndlichen Analyse der Protokolle unter Ber{\"u}cksichtigung der Architektur der existierenden Software. Im n{\"a}chsten Arbeitsschritt benutzen wir unser vollst{\"a}ndiges Wissen {\"u}ber die Interna des Transaktionssystems von Scalaris und vereinfachen damit das Verfahren hinsichtlich Benutzbarkeit und Implementierungskomplexit{\"a}t, ohne die Anforderungen an den aufgenommenen Zustand aufzuweichen. Statt einer losen Anh{\"a}ufung lokaler Zust{\"a}nde der einzelnen Teilnehmerknoten k{\"o}nnen wir am Ende eine große Schl{\"u}ssel-Wert-Tabelle als Ergebnis erzeugen, die konsistent ist, sich leicht weiterverarbeiten l{\"a}sst und die einem Zustand entspricht, in dem sich das System einmal befunden haben k{\"o}nnte. Nachdem wir das Verfahren dann in Software umgesetzt haben, werten wir die Ergebnisse hinsichtlich des Einflusses auf die Performanz des Gesamtsystems aus und diskutieren m{\"o}gliche Weiterentwicklungen.}, language = {de} } @misc{OezelKulkarniHasanetal., author = {{\"O}zel, M. Neset and Kulkarni, Abhishek and Hasan, Amr and Brummer, Josephine and Moldenhauer, Marian and Daumann, Ilsa-Maria and Wolfenberg, Heike and Dercksen, Vincent J. and Kiral, F. Ridvan and Weiser, Martin and Prohaska, Steffen and von Kleist, Max and Hiesinger, Peter Robin}, title = {Serial synapse formation through filopodial competition for synaptic seeding factors}, issn = {1438-0064}, doi = {10.1016/j.devcel.2019.06.014}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74397}, abstract = {Following axon pathfinding, growth cones transition from stochastic filopodial exploration to the formation of a limited number of synapses. How the interplay of filopodia and synapse assembly ensures robust connectivity in the brain has remained a challenging problem. Here, we developed a new 4D analysis method for filopodial dynamics and a data-driven computational model of synapse formation for R7 photoreceptor axons in developing Drosophila brains. Our live data support a 'serial synapse formation' model, where at any time point only a single 'synaptogenic' filopodium suppresses the synaptic competence of other filopodia through competition for synaptic seeding factors. Loss of the synaptic seeding factors Syd-1 and Liprin-α leads to a loss of this suppression, filopodial destabilization and reduced synapse formation, which is sufficient to cause the destabilization of entire axon terminals. Our model provides a filopodial 'winner-takes-all' mechanism that ensures the formation of an appropriate number of synapses.}, language = {en} } @misc{BleyD'AndreagiovanniKarch, author = {Bley, Andreas and D'Andreagiovanni, Fabio and Karch, Daniel}, title = {Scheduling technology migration in WDM Networks}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42654}, abstract = {The rapid technological evolution of telecommunication networks demands service providers to regularly update their technology, with the aim of remaining competitive in the marketplace. However, upgrading the technology in a network is not a trivial task. New hardware components need to be installed in the network and during the installation network connectivity may be temporarily compromised. The Wavelength Division Multiplexing (WDM) technology, whose upgrade is considered in here, shares fiber links among several optical connections and tearing down a single link may disrupt several optical connections at once. When the upgrades involve large parts of a network, typically not all links can be upgraded in parallel, which may lead to an unavoidable longer disruption of some connections. A bad scheduling of the overall endeavor, however, can dramatically increase the disconnection time of parts of the networks, causing extended service disruption. In this contribution, we study the problem of finding a schedule of the fiber link upgrades that minimizes the total service disruption time. To the best of our knowledge, this problem has not yet been formalized and investigated. The aim of our work is to close this gap by presenting a mathematical optimization model for the problem and an innovative solution algorithm that tackles the intrinsic difficulties of the problem. Computational experience on realistic instances completes our study. Our original investigations have been driven by real needs of DFN, operator of the German National Research and Education Network and our partner in the BMBF research project ROBUKOM (http://www.robukom.de/).}, language = {en} } @misc{BaumLindowHegeetal., author = {Baum, Daniel and Lindow, Norbert and Hege, Hans-Christian and Lepper, Verena and Siopi, Tzulia and Kutz, Frank and Mahlow, Kristin and Mahnke, Heinz-Eberhard}, title = {Revealing hidden text in rolled and folded papyri}, issn = {1438-0064}, doi = {10.1007/s00339-017-0808-6}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61826}, abstract = {Ancient Egyptian papyri are often folded, rolled up or kept as small packages, sometimes even sealed. Physically unrolling or unfolding these packages might severely damage them. We demonstrate a way to get access to the hidden script without physical unfolding by employing computed tomography and mathematical algorithms for virtual unrolling and unfolding. Our algorithmic approaches are combined with manual interaction. This provides the necessary flexibility to enable the unfolding of even complicated and partly damaged papyrus packages. In addition, it allows us to cope with challenges posed by the structure of ancient papyrus, which is rather irregular, compared to other writing substrates like metallic foils or parchment. Unfolding of packages is done in two stages. In the first stage, we virtually invert the physical folding process step by step until the partially unfolded package is topologically equivalent to a scroll or a papyrus sheet folded only along one fold line. To minimize distortions at this stage, we apply the method of moving least squares. In the second stage, the papyrus is simply flattened, which requires the definition of a medial surface. We have applied our software framework to several papyri. In this work, we present the results of applying our approaches to mockup papyri that were either rolled or folded along perpendicular fold lines. In the case of the folded papyrus, our approach represents the first attempt to address the unfolding of such complicated folds.}, language = {en} } @misc{Witzig, type = {Master Thesis}, author = {Witzig, Jakob}, title = {Reoptimization Techniques in MIP Solvers}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-54067}, pages = {176}, abstract = {Many optimization problems can be modeled as Mixed Integer Programs (MIPs). In general, MIPs cannot be solved efficiently, since solving MIPs is NP-hard, see, e.g., Schrijver, 2003. Common methods for solving NP-hard problems are branch-and-bound and column generation. In the case of column generation, the original problem becomes decomposed or re-formulated into one ore more smaller subproblems, which are easier to solve. Each of these subproblems is solved separately and recurrently, which can be interpreted as solving a sequence of optimization problems. In this thesis, we consider a sequence of MIPs which only differ in the respective objective functions. Furthermore, we assume each of these MIPs get solved with a branch-and-bound algorithm. This thesis aims to figure out whether the solving process of a given sequence of MIPs can be accelerated by reoptimization. As reoptimization we understand starting the solving process of a MIP of this sequence at a given frontier of a search tree corresponding to another MIP of this sequence. At the beginning we introduce an LP-based branch-and-bound algorithm. This algorithm is inspired by the reoptimizing algorithm of Hiller, Klug, and the author of this thesis, 2013. Since most of the state-of-the-art MIP solvers come to decisions based on dual information, which leads to the loss of feasible solutions after changing the objective function, we present a technique to guarantee optimality despite using these information. A decision is based on a dual information if this decision is valid for at least one feasible solution, whereas a decision is based on a primal information if this decision is valid for all feasible solutions. Afterwards, we consider representing the search frontier of the tree by a set of nodes of a given size. We call this the Tree Compression Problem. Moreover, we present a criterion characterizing the similarity of two objective functions. To evaluate our approach of reoptimization we extend the well-known and well-maintained MIP solver SCIP to an LP-based branch-and-bound framework, introduce two heuristics for solving the Tree Compression Problem, and a primal heuristic which is especially fitted to column generation. Finally, we present computational experiments on several problem classes, e.g., the Vertex Coloring and k-Constrained Shortest Path. Our experiments show, that a straightforward reoptimization, i.e., without additional heuristics, provides no benefit in general. However, in combination with the techniques and methods presented in this thesis, we can accelerate the solving of a given sequence up to the factor 14. For this purpose it is essential to take the differences of the objective functions into account and to restart the reoptimization, i.e., solve the subproblem from scratch, if the objective functions are not similar enough. Finally, we discuss the possibility to parallelize the solving process of the search frontier at the beginning of each solving process.}, language = {en} } @misc{LieSullivanTeckentrup, author = {Lie, Han Cheng and Sullivan, T. J. and Teckentrup, Aretha}, title = {Random forward models and log-likelihoods in Bayesian inverse problems}, series = {SIAM/ASA Journal on Uncertainty Quantification}, volume = {6}, journal = {SIAM/ASA Journal on Uncertainty Quantification}, number = {4}, issn = {1438-0064}, doi = {10.1137/18M1166523}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-66324}, pages = {1600 -- 1629}, abstract = {We consider the use of randomised forward models and log-likelihoods within the Bayesian approach to inverse problems. Such random approximations to the exact forward model or log-likelihood arise naturally when a computationally expensive model is approximated using a cheaper stochastic surrogate, as in Gaussian process emulation (kriging), or in the field of probabilistic numerical methods. We show that the Hellinger distance between the exact and approximate Bayesian posteriors is bounded by moments of the difference between the true and approximate log-likelihoods. Example applications of these stability results are given for randomised misfit models in large data applications and the probabilistic solution of ordinary differential equations.}, language = {en} } @misc{HillerVredeveld, author = {Hiller, Benjamin and Vredeveld, Tjark}, title = {Probabilistic alternatives for competitive analysis}, issn = {1438-0064}, doi = {10.1007/s00450-011-0149-1}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-15131}, abstract = {In the last 20 years competitive analysis has become the main tool for analyzing the quality of online algorithms. Despite of this, competitive analysis has also been criticized: It sometimes cannot discriminate between algorithms that exhibit significantly different empirical behavior, or it even favors an algorithm that is worse from an empirical point of view. Therefore, there have been several approaches to circumvent these drawbacks. In this survey, we discuss probabilistic alternatives for competitive analysis.}, language = {en} } @misc{GriewankStreubelLehmannetal., author = {Griewank, Andreas and Streubel, Tom and Lehmann, Lutz and Hasenfelder, Richard and Radons, Manuel}, title = {Piecewise linear secant approximation via Algorithmic Piecewise Differentiation}, issn = {1438-0064}, doi = {10.1080/10556788.2017.1387256}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61642}, abstract = {It is shown how piecewise differentiable functions \(F: R^n → R^m\) that are defined by evaluation programs can be approximated locally by a piecewise linear model based on a pair of sample points x̌ and x̂. We show that the discrepancy between function and model at any point x is of the bilinear order O(||x - x̌|| ||x - x̂||). This is a little surprising since x ∈ R^n may vary over the whole Euclidean space, and we utilize only two function samples F̌ = F(x̌) and F̂ = F(x̂), as well as the intermediates computed during their evaluation. As an application of the piecewise linearization procedure we devise a generalized Newton's method based on successive piecewise linearization and prove for it sufficient conditions for convergence and convergence rates equaling those of semismooth Newton. We conclude with the derivation of formulas for the numerically stable implementation of the aforedeveloped piecewise linearization methods.}, language = {en} } @misc{WiebelVosHege, author = {Wiebel, Alexander and Vos, Frans M. and Hege, Hans-Christian}, title = {Perception-Oriented Picking of Structures in Direct Volumetric Renderings}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14343}, number = {11-45}, abstract = {Radiologists from all application areas are trained to read slice-based visualizations of 3D medical image data. Despite the numerous examples of sophisticated three-dimensional renderings, especially all variants of direct volume rendering, such methods are often considered not very useful by radiologists who prefer slice-based visualization. Just recently there have been attempts to bridge this gap between 2D and 3D renderings. These attempts include specialized techniques for volume picking that result in repositioning slices. In this paper, we present a new volume picking technique that, in contrast to previous work, does not require pre-segmented data or metadata. The positions picked by our method are solely based on the data itself, the transfer function and, most importantly, on the way the volumetric rendering is perceived by viewers. To demonstrate the usefulness of the proposed method we apply it for automatically repositioning slices in an abdominal MRI scan, a data set from a flow simulation and a number of other volumetric scalar fields. Furthermore we discuss how the method can be implemented in combination with various different volumetric rendering techniques.}, language = {en} } @misc{LindnerReisch, author = {Lindner, Niels and Reisch, Julian}, title = {Parameterized Complexity of Periodic Timetabling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-78314}, abstract = {Public transportation networks are typically operated with a periodic timetable. The Periodic Event Scheduling Problem (PESP) is the standard mathematical modelling tool for periodic timetabling. Since PESP can be solved in linear time on trees, it is a natural question to ask whether there are polynomial-time algorithms for input networks of bounded treewidth. We show that deciding the feasibility of a PESP instance is NP-hard even when the treewidth is 2, the branchwidth is 2, or the carvingwidth is 3. Analogous results hold for the optimization of reduced PESP instances, where the feasibility problem is trivial. To complete the picture, we present two pseudo-polynomial-time dynamic programming algorithms solving PESP on input networks with bounded tree- or branchwidth. We further analyze the parameterized complexity of PESP with bounded cyclomatic number, diameter, or vertex cover number. For event-activity networks with a special -- but standard -- structure, we give explicit and sharp bounds on the branchwidth in terms of the maximum degree and the carvingwidth of an underlying line network. Finally, we investigate several parameters on the smallest instance of the benchmarking library PESPlib.}, language = {en} } @misc{MunguiaOxberryRajanetal., author = {Munguia, Lluis-Miquel and Oxberry, Geoffrey and Rajan, Deepak and Shinano, Yuji}, title = {Parallel PIPS-SBB: Multi-Level Parallelism For Stochastic Mixed-Integer Programs}, number = {ZIB-Report 17-58}, issn = {1438-0064}, doi = {10.1007/s10589-019-00074-0}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65517}, abstract = {PIPS-SBB is a distributed-memory parallel solver with a scalable data distribution paradigm. It is designed to solve MIPs with a dual-block angular structure, which is characteristic of deterministic-equivalent Stochastic Mixed-Integer Programs (SMIPs). In this paper, we present two different parallelizations of Branch \& Bound (B\&B), implementing both as extensions of PIPS-SBB, thus adding an additional layer of parallelism. In the first of the proposed frameworks, PIPS-PSBB, the coordination and load-balancing of the different optimization workers is done in a decentralized fashion. This new framework is designed to ensure all available cores are processing the most promising parts of the B\&B tree. The second, ug[PIPS-SBB,MPI], is a parallel implementation using the Ubiquity Generator (UG), a universal framework for parallelizing B\&B tree search that has been successfully applied to other MIP solvers. We show the effects of leveraging multiple levels of parallelism in potentially improving scaling performance beyond thousands of cores.}, language = {en} } @misc{BauschertBuesingD'Andreagiovannietal., author = {Bauschert, Thomas and B{\"u}sing, Christina and D'Andreagiovanni, Fabio and Koster, Arie M.C.A. and Kutschka, Manuel and Steglich, Uwe}, title = {Network Planning under Demand Uncertainty with Robust Optimization}, issn = {1438-0064}, doi = {10.1109/MCOM.2014.6736760}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42557}, abstract = {The planning of a communication network is inevitably depending on the quality of both the planning tool and the demand forecast used. In this article, we show exemplarily how the emerging area of Robust Optimization can advance the network planning by a more accurate mathematical description of the demand uncertainty. After a general introduction of the concept and its application to a basic network design problem, we present two applications: multi-layer and mixed-line-rate network design. We conclude with a discussion of extensions of the robustness concept to increase the accuracy of handling uncertainties.}, language = {en} } @misc{CostaMantonOstrovskyetal., author = {Costa, Marta and Manton, James D. and Ostrovsky, Aaron D. and Prohaska, Steffen and Jefferis, Gregory S.X.E.}, title = {NBLAST: Rapid, sensitive comparison of neuronal structure and construction of neuron family databases}, issn = {1438-0064}, doi = {10.1016/j.neuron.2016.06.012}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59672}, abstract = {Neural circuit mapping is generating datasets of 10,000s of labeled neurons. New computational tools are needed to search and organize these data. We present NBLAST, a sensitive and rapid algorithm, for measuring pairwise neuronal similarity. NBLAST considers both position and local geometry, decomposing neurons into short segments; matched segments are scored using a probabilistic scoring matrix defined by statistics of matches and non-matches. We validated NBLAST on a published dataset of 16,129 single Drosophila neurons. NBLAST can distinguish neuronal types down to the finest level (single identified neurons) without a priori information. Cluster analysis of extensively studied neuronal classes identified new types and unreported topographical features. Fully automated clustering organized the validation dataset into 1052 clusters, many of which map onto previously described neuronal types. NBLAST supports additional query types including searching neurons against transgene expression patterns. Finally we show that NBLAST is effective with data from other invertebrates and zebrafish.}, language = {en} } @misc{D'AndreagiovanniRaymond, author = {D'Andreagiovanni, Fabio and Raymond, Annie}, title = {Multiband Robust Optimization and its Adoption in Harvest Scheduling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43380}, abstract = {A central assumption in classical optimization is that all the input data of a problem are exact. However, in many real-world problems, the input data are subject to uncertainty. In such situations, neglecting uncertainty may lead to nominally optimal solutions that are actually suboptimal or even infeasible. Robust optimization offers a remedy for optimization under uncertainty by considering only the subset of solutions protected against the data deviations. In this paper, we provide an overview of the main theoretical results of multiband robustness, a new robust optimization model that extends and refines the classical theory introduced by Bertsimas and Sim. After introducing some new results for the special case of pure binary programs, we focus on the harvest scheduling problem and show how multiband robustness can be adopted to tackle the uncertainty affecting the volume of produced timber and grant a reduction in the price of robustness.}, language = {en} } @misc{AmbellanHanikvonTycowicz, author = {Ambellan, Felix and Hanik, Martin and von Tycowicz, Christoph}, title = {Morphomatics: Geometric morphometrics in non-Euclidean shape spaces}, doi = {10.12752/8544}, abstract = {Morphomatics is an open-source Python library for (statistical) shape analysis developed within the geometric data analysis and processing research group at Zuse Institute Berlin. It contains prototype implementations of intrinsic manifold-based methods that are highly consistent and avoid the influence of unwanted effects such as bias due to arbitrary choices of coordinates.}, language = {en} } @misc{KaplanLauferProhaskaetal., author = {Kaplan, Bernhard and Laufer, Jan and Prohaska, Steffen and Buchmann, Jens}, title = {Monte-Carlo-based inversion scheme for 3D quantitative photoacoustic tomography}, issn = {1438-0064}, doi = {10.1117/12.2251945}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-62318}, abstract = {The goal of quantitative photoacoustic tomography (qPAT) is to recover maps of the chromophore distributions from multiwavelength images of the initial pressure. Model-based inversions that incorporate the physical processes underlying the photoacoustic (PA) signal generation represent a promising approach. Monte-Carlo models of the light transport are computationally expensive, but provide accurate fluence distributions predictions, especially in the ballistic and quasi-ballistic regimes. Here, we focus on the inverse problem of 3D qPAT of blood oxygenation and investigate the application of the Monte-Carlo method in a model-based inversion scheme. A forward model of the light transport based on the MCX simulator and acoustic propagation modeled by the k-Wave toolbox was used to generate a PA image data set acquired in a tissue phantom over a planar detection geometry. The combination of the optical and acoustic models is shown to account for limited-view artifacts. In addition, the errors in the fluence due to, for example, partial volume artifacts and absorbers immediately adjacent to the region of interest are investigated. To accomplish large-scale inversions in 3D, the number of degrees of freedom is reduced by applying image segmentation to the initial pressure distribution to extract a limited number of regions with homogeneous optical parameters. The absorber concentration in the tissue phantom was estimated using a coordinate descent parameter search based on the comparison between measured and modeled PA spectra. The estimated relative concentrations using this approach lie within 5 \% compared to the known concentrations. Finally, we discuss the feasibility of this approach to recover the blood oxygenation from experimental data.}, language = {en} } @misc{TateiwaShinanoYasudaetal., author = {Tateiwa, Nariaki and Shinano, Yuji and Yasuda, Masaya and Kaji, Shizuo and Yamamura, Keiichiro and Fujisawa, Katsuki}, title = {Massively parallel sharing lattice basis reduction}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-85209}, abstract = {For cryptanalysis in lattice-based schemes, the performance evaluation of lattice basis reduction using high-performance computers is becoming increasingly important for the determination of the security level. We propose a distributed and asynchronous parallel reduction algorithm based on randomization and DeepBKZ, which is an improved variant of the block Korkine-Zolotarev (BKZ) reduction algorithm. Randomized copies of a lattice basis are distributed to up to 103,680 cores and independently reduced in parallel, while some basis vectors are shared asynchronously among all processes via MPI. There is a trade-off between randomization and information sharing; if a substantial amount of information is shared, all processes will work on the same problem, thereby diminishing the benefit of parallelization. To monitor this balance between randomness and sharing, we propose a metric to quantify the variety of lattice bases. We empirically find an optimal parameter of sharing for high-dimensional lattices. We demonstrate the efficacy of our proposed parallel algorithm and implementation with respect to both performance and scalability through our experiments.}, language = {en} } @misc{ClasenPaarProhaska, author = {Clasen, Malte and Paar, Philip and Prohaska, Steffen}, title = {Level of Detail for Trees Using Clustered Ellipsoids}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14251}, number = {11-41}, abstract = {We present a level of detail method for trees based on ellipsoids and lines. We leverage the Expectation Maximization algorithm with a Gaussian Mixture Model to create a hierarchy of high-quality leaf clusterings, while the branches are simplified using agglomerative bottom-up clustering to preserve the connectivity. The simplification runs in a preprocessing step and requires no human interaction. For a fly by over and through a scene of 10k trees, our method renders on average at 40 ms/frame, up to 6 times faster than billboard clouds with comparable artifacts.}, language = {en} } @misc{Krause, type = {Master Thesis}, author = {Krause, Jan}, title = {Investigation of Options to Handle 3D MRI Data via Convolutional Neural Networks Application in Knee Osteoarthritits Classification}, pages = {127}, language = {en} }