@misc{ClasenPaarProhaska2011, author = {Clasen, Malte and Paar, Philip and Prohaska, Steffen}, title = {Level of Detail for Trees Using Clustered Ellipsoids}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14251}, number = {11-41}, year = {2011}, abstract = {We present a level of detail method for trees based on ellipsoids and lines. We leverage the Expectation Maximization algorithm with a Gaussian Mixture Model to create a hierarchy of high-quality leaf clusterings, while the branches are simplified using agglomerative bottom-up clustering to preserve the connectivity. The simplification runs in a preprocessing step and requires no human interaction. For a fly by over and through a scene of 10k trees, our method renders on average at 40 ms/frame, up to 6 times faster than billboard clouds with comparable artifacts.}, language = {en} } @misc{WiebelVosHege2011, author = {Wiebel, Alexander and Vos, Frans M. and Hege, Hans-Christian}, title = {Perception-Oriented Picking of Structures in Direct Volumetric Renderings}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14343}, number = {11-45}, year = {2011}, abstract = {Radiologists from all application areas are trained to read slice-based visualizations of 3D medical image data. Despite the numerous examples of sophisticated three-dimensional renderings, especially all variants of direct volume rendering, such methods are often considered not very useful by radiologists who prefer slice-based visualization. Just recently there have been attempts to bridge this gap between 2D and 3D renderings. These attempts include specialized techniques for volume picking that result in repositioning slices. In this paper, we present a new volume picking technique that, in contrast to previous work, does not require pre-segmented data or metadata. The positions picked by our method are solely based on the data itself, the transfer function and, most importantly, on the way the volumetric rendering is perceived by viewers. To demonstrate the usefulness of the proposed method we apply it for automatically repositioning slices in an abdominal MRI scan, a data set from a flow simulation and a number of other volumetric scalar fields. Furthermore we discuss how the method can be implemented in combination with various different volumetric rendering techniques.}, language = {en} } @misc{EhlkeRammLameckeretal.2013, author = {Ehlke, Moritz and Ramm, Heiko and Lamecker, Hans and Hege, Hans-Christian and Zachow, Stefan}, title = {Fast Generation of Virtual X-ray Images from Deformable Tetrahedral Meshes}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-41896}, year = {2013}, abstract = {We propose a novel GPU-based approach to render virtual X-ray projections of deformable tetrahedral meshes. These meshes represent the shape and the internal density distribution of a particular anatomical structure and are derived from statistical shape and intensity models (SSIMs). We apply our method to improve the geometric reconstruction of 3D anatomy (e.g.\ pelvic bone) from 2D X-ray images. For that purpose, shape and density of a tetrahedral mesh are varied and virtual X-ray projections are generated within an optimization process until the similarity between the computed virtual X-ray and the respective anatomy depicted in a given clinical X-ray is maximized. The OpenGL implementation presented in this work deforms and projects tetrahedral meshes of high resolution (200.000+ tetrahedra) at interactive rates. It generates virtual X-rays that accurately depict the density distribution of an anatomy of interest. Compared to existing methods that accumulate X-ray attenuation in deformable meshes, our novel approach significantly boosts the deformation/projection performance. The proposed projection algorithm scales better with respect to mesh resolution and complexity of the density distribution, and the combined deformation and projection on the GPU scales better with respect to the number of deformation parameters. The gain in performance allows for a larger number of cycles in the optimization process. Consequently, it reduces the risk of being stuck in a local optimum. We believe that our approach contributes in orthopedic surgery, where 3D anatomy information needs to be extracted from 2D X-rays to support surgeons in better planning joint replacements.}, language = {en} } @misc{WendeSteinke2013, author = {Wende, Florian and Steinke, Thomas}, title = {Swendsen-Wang Multi-Cluster Algorithm for the 2D/3D Ising Model on Xeon Phi and GPU}, issn = {1438-0064}, doi = {10.1145/2503210.2503254}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42187}, year = {2013}, abstract = {Simulations of the critical Ising model by means of local update algorithms suffer from critical slowing down. One way to partially compensate for the influence of this phenomenon on the runtime of simulations is using increasingly faster and parallel computer hardware. Another approach is using algorithms that do not suffer from critical slowing down, such as cluster algorithms. This paper reports on the Swendsen-Wang multi-cluster algorithm on Intel Xeon Phi coprocessor 5110P, Nvidia Tesla M2090 GPU, and x86 multi-core CPU. We present shared memory versions of the said algorithm for the simulation of the two- and three-dimensional Ising model. We use a combination of local cluster search and global label reduction by means of atomic hardware primitives. Further, we describe an MPI version of the algorithm on Xeon Phi and CPU, respectively. Significant performance improvements over known im plementations of the Swendsen-Wang algorithm are demonstrated.}, language = {en} } @misc{HaslerPetersKottig2013, author = {Hasler, Tim and Peters-Kottig, Wolfgang}, title = {Vorschrift oder Thunfisch? - Zur Langzeitverf{\"u}gbarkeit von Forschungsdaten}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43010}, year = {2013}, abstract = {„Ich mache ihm ein Angebot, das er nicht ablehnen kann." Diese Aussage aus einem g{\"a}nzlich anderen Kontext l{\"a}sst sich recht treffend {\"u}bertragen als Wunsch von Dienstleistern und Zweck von Dienstleistungen f{\"u}r Datenproduzenten im Forschungsdatenmanagement. Zwar wirkt Druck zur Daten{\"u}bergabe nicht f{\"o}rderlich, die Er{\"o}ffnung einer Option aber sehr wohl. Im vorliegenden Artikel geht es um das Verst{\"a}ndnis der Nachhaltigkeit von Forschung und ihren Daten anhand der Erkenntnisse und Erfahrungen aus der ersten Phase des DFG-Projekts EWIG. [Fn 01] Eine Auswahl von Fallstricken beim Forschungsdatenmanagement wird anhand der Erkenntnisse aus Expertengespr{\"a}chen und eigenen Erfahrungen beim Aufbau von LZA-Workflows vorgestellt. Erste Konzepte in EWIG zur Daten{\"u}bertragung aus unterschiedlich strukturierten Datenquellen in die „Langfristige Dom{\"a}ne" werden beschrieben.}, language = {de} } @misc{DercksenHegeOberlaender2013, author = {Dercksen, Vincent J. and Hege, Hans-Christian and Oberlaender, Marcel}, title = {The Filament Editor: An Interactive Software Environment for Visualization, Proof-Editing and Analysis of 3D Neuron Morphology}, issn = {1438-0064}, doi = {10.1007/s12021-013-9213-2}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43157}, year = {2013}, abstract = {Neuroanatomical analysis, such as classification of cell types, depends on reliable reconstruction of large numbers of complete 3D dendrite and axon morphologies. At present, the majority of neuron reconstructions are obtained from preparations in a single tissue slice in vitro, thus suffering from cut off dendrites and, more dramatically, cut off axons. In general, axons can innervate volumes of several cubic millimeters and may reach path lengths of tens of centimeters. Thus, their complete reconstruction requires in vivo labeling, histological sectioning and imaging of large fields of view. Unfortunately, anisotropic background conditions across such large tissue volumes, as well as faintly labeled thin neurites, result in incomplete or erroneous automated tracings and even lead experts to make annotation errors during manual reconstructions. Consequently, tracing reliability renders the major bottleneck for reconstructing complete 3D neuron morphologies. Here, we present a novel set of tools, integrated into a software environment named 'Filament Editor', for creating reliable neuron tracings from sparsely labeled in vivo datasets. The Filament Editor allows for simultaneous visualization of complex neuronal tracings and image data in a 3D viewer, proof-editing of neuronal tracings, alignment and interconnection across sections, and morphometric analysis in relation to 3D anatomical reference structures. We illustrate the functionality of the Filament Editor on the example of in vivo labeled axons and demonstrate that for the exemplary dataset the final tracing results after proof-editing are independent of the expertise of the human operator.}, language = {en} } @misc{QuerDonatiKelleretal.2017, author = {Quer, Jannes and Donati, Luca and Keller, Bettina and Weber, Marcus}, title = {An automatic adaptive importance sampling algorithm for molecular dynamics in reaction coordinates}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-62075}, year = {2017}, abstract = {In this article we propose an adaptive importance sampling scheme for dynamical quantities of high dimensional complex systems which are metastable. The main idea of this article is to combine a method coming from Molecular Dynamics Simulation, Metadynamics, with a theorem from stochastic analysis, Girsanov's theorem. The proposed algorithm has two advantages compared to a standard estimator of dynamic quantities: firstly, it is possible to produce estimators with a lower variance and, secondly, we can speed up the sampling. One of the main problems for building importance sampling schemes for metastable systems is to find the metastable region in order to manipulate the potential accordingly. Our method circumvents this problem by using an assimilated version of the Metadynamics algorithm and thus creates a non-equilibrium dynamics which is used to sample the equilibrium quantities.}, language = {en} } @misc{BaumLindowHegeetal.2017, author = {Baum, Daniel and Lindow, Norbert and Hege, Hans-Christian and Lepper, Verena and Siopi, Tzulia and Kutz, Frank and Mahlow, Kristin and Mahnke, Heinz-Eberhard}, title = {Revealing hidden text in rolled and folded papyri}, issn = {1438-0064}, doi = {10.1007/s00339-017-0808-6}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61826}, year = {2017}, abstract = {Ancient Egyptian papyri are often folded, rolled up or kept as small packages, sometimes even sealed. Physically unrolling or unfolding these packages might severely damage them. We demonstrate a way to get access to the hidden script without physical unfolding by employing computed tomography and mathematical algorithms for virtual unrolling and unfolding. Our algorithmic approaches are combined with manual interaction. This provides the necessary flexibility to enable the unfolding of even complicated and partly damaged papyrus packages. In addition, it allows us to cope with challenges posed by the structure of ancient papyrus, which is rather irregular, compared to other writing substrates like metallic foils or parchment. Unfolding of packages is done in two stages. In the first stage, we virtually invert the physical folding process step by step until the partially unfolded package is topologically equivalent to a scroll or a papyrus sheet folded only along one fold line. To minimize distortions at this stage, we apply the method of moving least squares. In the second stage, the papyrus is simply flattened, which requires the definition of a medial surface. We have applied our software framework to several papyri. In this work, we present the results of applying our approaches to mockup papyri that were either rolled or folded along perpendicular fold lines. In the case of the folded papyrus, our approach represents the first attempt to address the unfolding of such complicated folds.}, language = {en} } @misc{CostaMantonOstrovskyetal.2016, author = {Costa, Marta and Manton, James D. and Ostrovsky, Aaron D. and Prohaska, Steffen and Jefferis, Gregory S.X.E.}, title = {NBLAST: Rapid, sensitive comparison of neuronal structure and construction of neuron family databases}, issn = {1438-0064}, doi = {10.1016/j.neuron.2016.06.012}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59672}, year = {2016}, abstract = {Neural circuit mapping is generating datasets of 10,000s of labeled neurons. New computational tools are needed to search and organize these data. We present NBLAST, a sensitive and rapid algorithm, for measuring pairwise neuronal similarity. NBLAST considers both position and local geometry, decomposing neurons into short segments; matched segments are scored using a probabilistic scoring matrix defined by statistics of matches and non-matches. We validated NBLAST on a published dataset of 16,129 single Drosophila neurons. NBLAST can distinguish neuronal types down to the finest level (single identified neurons) without a priori information. Cluster analysis of extensively studied neuronal classes identified new types and unreported topographical features. Fully automated clustering organized the validation dataset into 1052 clusters, many of which map onto previously described neuronal types. NBLAST supports additional query types including searching neurons against transgene expression patterns. Finally we show that NBLAST is effective with data from other invertebrates and zebrafish.}, language = {en} } @misc{OezelKulkarniHasanetal.2019, author = {{\"O}zel, M. Neset and Kulkarni, Abhishek and Hasan, Amr and Brummer, Josephine and Moldenhauer, Marian and Daumann, Ilsa-Maria and Wolfenberg, Heike and Dercksen, Vincent J. and Kiral, F. Ridvan and Weiser, Martin and Prohaska, Steffen and von Kleist, Max and Hiesinger, Peter Robin}, title = {Serial synapse formation through filopodial competition for synaptic seeding factors}, issn = {1438-0064}, doi = {10.1016/j.devcel.2019.06.014}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74397}, year = {2019}, abstract = {Following axon pathfinding, growth cones transition from stochastic filopodial exploration to the formation of a limited number of synapses. How the interplay of filopodia and synapse assembly ensures robust connectivity in the brain has remained a challenging problem. Here, we developed a new 4D analysis method for filopodial dynamics and a data-driven computational model of synapse formation for R7 photoreceptor axons in developing Drosophila brains. Our live data support a 'serial synapse formation' model, where at any time point only a single 'synaptogenic' filopodium suppresses the synaptic competence of other filopodia through competition for synaptic seeding factors. Loss of the synaptic seeding factors Syd-1 and Liprin-α leads to a loss of this suppression, filopodial destabilization and reduced synapse formation, which is sufficient to cause the destabilization of entire axon terminals. Our model provides a filopodial 'winner-takes-all' mechanism that ensures the formation of an appropriate number of synapses.}, language = {en} }