@misc{EhlkeRammLameckeretal.2013, author = {Ehlke, Moritz and Ramm, Heiko and Lamecker, Hans and Hege, Hans-Christian and Zachow, Stefan}, title = {Fast Generation of Virtual X-ray Images from Deformable Tetrahedral Meshes}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-41896}, year = {2013}, abstract = {We propose a novel GPU-based approach to render virtual X-ray projections of deformable tetrahedral meshes. These meshes represent the shape and the internal density distribution of a particular anatomical structure and are derived from statistical shape and intensity models (SSIMs). We apply our method to improve the geometric reconstruction of 3D anatomy (e.g.\ pelvic bone) from 2D X-ray images. For that purpose, shape and density of a tetrahedral mesh are varied and virtual X-ray projections are generated within an optimization process until the similarity between the computed virtual X-ray and the respective anatomy depicted in a given clinical X-ray is maximized. The OpenGL implementation presented in this work deforms and projects tetrahedral meshes of high resolution (200.000+ tetrahedra) at interactive rates. It generates virtual X-rays that accurately depict the density distribution of an anatomy of interest. Compared to existing methods that accumulate X-ray attenuation in deformable meshes, our novel approach significantly boosts the deformation/projection performance. The proposed projection algorithm scales better with respect to mesh resolution and complexity of the density distribution, and the combined deformation and projection on the GPU scales better with respect to the number of deformation parameters. The gain in performance allows for a larger number of cycles in the optimization process. Consequently, it reduces the risk of being stuck in a local optimum. We believe that our approach contributes in orthopedic surgery, where 3D anatomy information needs to be extracted from 2D X-rays to support surgeons in better planning joint replacements.}, language = {en} } @misc{WendeSteinke2013, author = {Wende, Florian and Steinke, Thomas}, title = {Swendsen-Wang Multi-Cluster Algorithm for the 2D/3D Ising Model on Xeon Phi and GPU}, issn = {1438-0064}, doi = {10.1145/2503210.2503254}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42187}, year = {2013}, abstract = {Simulations of the critical Ising model by means of local update algorithms suffer from critical slowing down. One way to partially compensate for the influence of this phenomenon on the runtime of simulations is using increasingly faster and parallel computer hardware. Another approach is using algorithms that do not suffer from critical slowing down, such as cluster algorithms. This paper reports on the Swendsen-Wang multi-cluster algorithm on Intel Xeon Phi coprocessor 5110P, Nvidia Tesla M2090 GPU, and x86 multi-core CPU. We present shared memory versions of the said algorithm for the simulation of the two- and three-dimensional Ising model. We use a combination of local cluster search and global label reduction by means of atomic hardware primitives. Further, we describe an MPI version of the algorithm on Xeon Phi and CPU, respectively. Significant performance improvements over known im plementations of the Swendsen-Wang algorithm are demonstrated.}, language = {en} } @misc{HaslerPetersKottig2013, author = {Hasler, Tim and Peters-Kottig, Wolfgang}, title = {Vorschrift oder Thunfisch? - Zur Langzeitverf{\"u}gbarkeit von Forschungsdaten}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43010}, year = {2013}, abstract = {„Ich mache ihm ein Angebot, das er nicht ablehnen kann." Diese Aussage aus einem g{\"a}nzlich anderen Kontext l{\"a}sst sich recht treffend {\"u}bertragen als Wunsch von Dienstleistern und Zweck von Dienstleistungen f{\"u}r Datenproduzenten im Forschungsdatenmanagement. Zwar wirkt Druck zur Daten{\"u}bergabe nicht f{\"o}rderlich, die Er{\"o}ffnung einer Option aber sehr wohl. Im vorliegenden Artikel geht es um das Verst{\"a}ndnis der Nachhaltigkeit von Forschung und ihren Daten anhand der Erkenntnisse und Erfahrungen aus der ersten Phase des DFG-Projekts EWIG. [Fn 01] Eine Auswahl von Fallstricken beim Forschungsdatenmanagement wird anhand der Erkenntnisse aus Expertengespr{\"a}chen und eigenen Erfahrungen beim Aufbau von LZA-Workflows vorgestellt. Erste Konzepte in EWIG zur Daten{\"u}bertragung aus unterschiedlich strukturierten Datenquellen in die „Langfristige Dom{\"a}ne" werden beschrieben.}, language = {de} } @misc{DercksenHegeOberlaender2013, author = {Dercksen, Vincent J. and Hege, Hans-Christian and Oberlaender, Marcel}, title = {The Filament Editor: An Interactive Software Environment for Visualization, Proof-Editing and Analysis of 3D Neuron Morphology}, issn = {1438-0064}, doi = {10.1007/s12021-013-9213-2}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43157}, year = {2013}, abstract = {Neuroanatomical analysis, such as classification of cell types, depends on reliable reconstruction of large numbers of complete 3D dendrite and axon morphologies. At present, the majority of neuron reconstructions are obtained from preparations in a single tissue slice in vitro, thus suffering from cut off dendrites and, more dramatically, cut off axons. In general, axons can innervate volumes of several cubic millimeters and may reach path lengths of tens of centimeters. Thus, their complete reconstruction requires in vivo labeling, histological sectioning and imaging of large fields of view. Unfortunately, anisotropic background conditions across such large tissue volumes, as well as faintly labeled thin neurites, result in incomplete or erroneous automated tracings and even lead experts to make annotation errors during manual reconstructions. Consequently, tracing reliability renders the major bottleneck for reconstructing complete 3D neuron morphologies. Here, we present a novel set of tools, integrated into a software environment named 'Filament Editor', for creating reliable neuron tracings from sparsely labeled in vivo datasets. The Filament Editor allows for simultaneous visualization of complex neuronal tracings and image data in a 3D viewer, proof-editing of neuronal tracings, alignment and interconnection across sections, and morphometric analysis in relation to 3D anatomical reference structures. We illustrate the functionality of the Filament Editor on the example of in vivo labeled axons and demonstrate that for the exemplary dataset the final tracing results after proof-editing are independent of the expertise of the human operator.}, language = {en} } @misc{Keidel2013, type = {Master Thesis}, author = {Keidel, Stefan}, title = {Snapshots in Scalaris}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42282}, school = {Zuse Institute Berlin (ZIB)}, pages = {87}, year = {2013}, abstract = {Eines der gr{\"o}ßten Hindernisse beim praktischen Einsatz von Scalaris, einer skalierbaren Implementierung einer verteilten Hashtabelle mit Unterst{\"u}tzung f{\"u}r Transaktionen, ist das Fehlen eines Verfahrens zur Aufnahme eines konsistenten Zustandes des gesamten Systems. Wir stellen in dieser Arbeit ein einfaches Protokoll vor, dass diese Aufgabe erf{\"u}llt und sich, auf Grund der von uns gew{\"a}hlten Herangehensweise, leicht implementieren l{\"a}sst. Als Ausgangspunkt daf{\"u}r w{\"a}hlen wir aus einer Reihe von „klassischen" Snapshot-Algorithmen ein 1993 von Mattern entworfenes Verfahren, welches auf dem Algorithmus von Lai und Yang basiert, aus. Diese Entscheidung basiert auf einer gr{\"u}ndlichen Analyse der Protokolle unter Ber{\"u}cksichtigung der Architektur der existierenden Software. Im n{\"a}chsten Arbeitsschritt benutzen wir unser vollst{\"a}ndiges Wissen {\"u}ber die Interna des Transaktionssystems von Scalaris und vereinfachen damit das Verfahren hinsichtlich Benutzbarkeit und Implementierungskomplexit{\"a}t, ohne die Anforderungen an den aufgenommenen Zustand aufzuweichen. Statt einer losen Anh{\"a}ufung lokaler Zust{\"a}nde der einzelnen Teilnehmerknoten k{\"o}nnen wir am Ende eine große Schl{\"u}ssel-Wert-Tabelle als Ergebnis erzeugen, die konsistent ist, sich leicht weiterverarbeiten l{\"a}sst und die einem Zustand entspricht, in dem sich das System einmal befunden haben k{\"o}nnte. Nachdem wir das Verfahren dann in Software umgesetzt haben, werten wir die Ergebnisse hinsichtlich des Einflusses auf die Performanz des Gesamtsystems aus und diskutieren m{\"o}gliche Weiterentwicklungen.}, language = {de} } @misc{Hoffmann2013, type = {Master Thesis}, author = {Hoffmann, Marie}, title = {Approximate Algorithms for Distributed Systems}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42370}, school = {Zuse Institute Berlin (ZIB)}, pages = {75}, year = {2013}, abstract = {Peer-to-peer (P2P) systems form a special class of distributed systems. Typically, nodes in a P2P system are flat and share the same responsabilities. In this thesis we focus on three problems that occur in P2P systems: the storage of data replicates, quantile computation on distributed data streams, and churn rate estimation. Data replication is one of the oldest techniques to maintain stored data in a P2P system and to reply to read requests. Applications, which use data replication are distributed databases. They are part of an abstract overlay network and do not see the underlying network topology. The question is how to place a set of data replicates in a distributed system such that response times and failure probabilities become minimal without a priori knowledge of the topology of the underlying hardware nodes? We show how to utilize an agglomerative clustering procedure to reach this goal. State-of-the-art algorithms for aggregation of distributed data or data streams require at some point synchronization, or merge data aggregates hierarchically, which does not accompany the basic principle of P2P systems. We test whether randomized communication and merging of data aggregates are able to produce the same results. These data aggregates serve for quantile queries. Constituting and maintaining a P2P overlay network requires frequent message passing. It is a goal to minimize the number of maintenance messages since they consume bandwidth which might be missing for other applications. The lower bound of the frequency for mainte- nance messages is highly dependent on the churn rate of peers. We show how to estimate the mean lifetime of peers and to reduce the frequency for maintenance messages without destabilizing the infrastructure of the constituting overlay.}, language = {en} } @misc{D'AndreagiovanniKrolikowskiPulaj2013, author = {D'Andreagiovanni, Fabio and Krolikowski, Jonatan and Pulaj, Jonad}, title = {A hybrid primal heuristic for Robust Multiperiod Network Design}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-44081}, year = {2013}, abstract = {We investigate the Robust Multiperiod Network Design Problem, a generalization of the classical Capacitated Network Design Problem that additionally considers multiple design periods and provides solutions protected against traffic uncertainty. Given the intrinsic difficulty of the problem, which proves challenging even for state-of-the art commercial solvers, we propose a hybrid primal heuristic based on the combination of ant colony optimization and an exact large neighborhood search. Computational experiments on a set of realistic instances from the SNDlib show that our heuristic can find solutions of extremely good quality with low optimality gap.}, language = {en} } @misc{D'AndreagiovanniRaymond2013, author = {D'Andreagiovanni, Fabio and Raymond, Annie}, title = {Multiband Robust Optimization and its Adoption in Harvest Scheduling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43380}, year = {2013}, abstract = {A central assumption in classical optimization is that all the input data of a problem are exact. However, in many real-world problems, the input data are subject to uncertainty. In such situations, neglecting uncertainty may lead to nominally optimal solutions that are actually suboptimal or even infeasible. Robust optimization offers a remedy for optimization under uncertainty by considering only the subset of solutions protected against the data deviations. In this paper, we provide an overview of the main theoretical results of multiband robustness, a new robust optimization model that extends and refines the classical theory introduced by Bertsimas and Sim. After introducing some new results for the special case of pure binary programs, we focus on the harvest scheduling problem and show how multiband robustness can be adopted to tackle the uncertainty affecting the volume of produced timber and grant a reduction in the price of robustness.}, language = {en} } @misc{BuesingD'AndreagiovanniRaymond2013, author = {B{\"u}sing, Christina and D'Andreagiovanni, Fabio and Raymond, Annie}, title = {0-1 Multiband Robust Optimization}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-44093}, year = {2013}, abstract = {We provide an overview of new theoretical results that we obtained while further investigating multiband robust optimization, a new model for robust optimization that we recently proposed to tackle uncertainty in mixed-integer linear programming. This new model extends and refines the classical Gamma-robustness model of Bertsimas and Sim and is particularly useful in the common case of arbitrary asymmetric distributions of the uncertainty. Here, we focus on uncertain 0-1 programs and we analyze their robust counterparts when the uncertainty is represented through a multiband set. Our investigations were inspired by the needs of our industrial partners in the research project ROBUKOM.}, language = {en} } @misc{BleyD'AndreagiovanniKarch2013, author = {Bley, Andreas and D'Andreagiovanni, Fabio and Karch, Daniel}, title = {Scheduling technology migration in WDM Networks}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42654}, year = {2013}, abstract = {The rapid technological evolution of telecommunication networks demands service providers to regularly update their technology, with the aim of remaining competitive in the marketplace. However, upgrading the technology in a network is not a trivial task. New hardware components need to be installed in the network and during the installation network connectivity may be temporarily compromised. The Wavelength Division Multiplexing (WDM) technology, whose upgrade is considered in here, shares fiber links among several optical connections and tearing down a single link may disrupt several optical connections at once. When the upgrades involve large parts of a network, typically not all links can be upgraded in parallel, which may lead to an unavoidable longer disruption of some connections. A bad scheduling of the overall endeavor, however, can dramatically increase the disconnection time of parts of the networks, causing extended service disruption. In this contribution, we study the problem of finding a schedule of the fiber link upgrades that minimizes the total service disruption time. To the best of our knowledge, this problem has not yet been formalized and investigated. The aim of our work is to close this gap by presenting a mathematical optimization model for the problem and an innovative solution algorithm that tackles the intrinsic difficulties of the problem. Computational experience on realistic instances completes our study. Our original investigations have been driven by real needs of DFN, operator of the German National Research and Education Network and our partner in the BMBF research project ROBUKOM (http://www.robukom.de/).}, language = {en} }