@misc{BuesingD'Andreagiovanni, author = {B{\"u}sing, Christina and D'Andreagiovanni, Fabio}, title = {A new theoretical framework for Robust Optimization under multi-band uncertainty}, issn = {1438-0064}, doi = {10.1007/978-3-319-00795-3_17}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42644}, abstract = {We provide an overview of our main results about studying Linear Programming Problems whose coefficient matrix is subject to uncertainty and the uncertainty is modeled through a multi-band set. Such an uncertainty set generalizes the classical one proposed by Bertsimas and Sim and is particularly suitable in the common case of arbitrary non-symmetric distributions of the parameters. Our investigations were inspired by practical needs of our industrial partner in ongoing projects with focus on the design of robust telecommunications networks.}, language = {en} } @misc{BleyD'AndreagiovanniKarch, author = {Bley, Andreas and D'Andreagiovanni, Fabio and Karch, Daniel}, title = {Scheduling technology migration in WDM Networks}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42654}, abstract = {The rapid technological evolution of telecommunication networks demands service providers to regularly update their technology, with the aim of remaining competitive in the marketplace. However, upgrading the technology in a network is not a trivial task. New hardware components need to be installed in the network and during the installation network connectivity may be temporarily compromised. The Wavelength Division Multiplexing (WDM) technology, whose upgrade is considered in here, shares fiber links among several optical connections and tearing down a single link may disrupt several optical connections at once. When the upgrades involve large parts of a network, typically not all links can be upgraded in parallel, which may lead to an unavoidable longer disruption of some connections. A bad scheduling of the overall endeavor, however, can dramatically increase the disconnection time of parts of the networks, causing extended service disruption. In this contribution, we study the problem of finding a schedule of the fiber link upgrades that minimizes the total service disruption time. To the best of our knowledge, this problem has not yet been formalized and investigated. The aim of our work is to close this gap by presenting a mathematical optimization model for the problem and an innovative solution algorithm that tackles the intrinsic difficulties of the problem. Computational experience on realistic instances completes our study. Our original investigations have been driven by real needs of DFN, operator of the German National Research and Education Network and our partner in the BMBF research project ROBUKOM (http://www.robukom.de/).}, language = {en} } @misc{ZakrzewskaD'AndreagiovanniRueppetal., author = {Zakrzewska, Anna and D'Andreagiovanni, Fabio and Ruepp, Sarah and Berger, Michael S.}, title = {Biobjective Optimization of Radio Access Technology Selection and Resource Allocation in Heterogeneous Wireless Networks}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42675}, abstract = {We propose a novel optimization model for resource assignment in heterogeneous wireless network. The model adopts two objective functions maximizing the number of served users and the minimum granted utility at once. A distinctive feature of our new model is to consider two consecutive time slots, in order to include handover as an additional decision dimension. Furthermore, the solution algorithm that we propose refines a heuristic solution approach recently proposed in literature, by considering a real joint optimization of the considered resources. The simulation study shows that the new model leads to a significant reduction in handover frequency, when compared to a traditional scheme based on maximum SNR.}, language = {en} } @misc{D'AndreagiovanniRaymond, author = {D'Andreagiovanni, Fabio and Raymond, Annie}, title = {Multiband Robust Optimization and its Adoption in Harvest Scheduling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43380}, abstract = {A central assumption in classical optimization is that all the input data of a problem are exact. However, in many real-world problems, the input data are subject to uncertainty. In such situations, neglecting uncertainty may lead to nominally optimal solutions that are actually suboptimal or even infeasible. Robust optimization offers a remedy for optimization under uncertainty by considering only the subset of solutions protected against the data deviations. In this paper, we provide an overview of the main theoretical results of multiband robustness, a new robust optimization model that extends and refines the classical theory introduced by Bertsimas and Sim. After introducing some new results for the special case of pure binary programs, we focus on the harvest scheduling problem and show how multiband robustness can be adopted to tackle the uncertainty affecting the volume of produced timber and grant a reduction in the price of robustness.}, language = {en} } @misc{BauschertBuesingD'Andreagiovannietal., author = {Bauschert, Thomas and B{\"u}sing, Christina and D'Andreagiovanni, Fabio and Koster, Arie M.C.A. and Kutschka, Manuel and Steglich, Uwe}, title = {Network Planning under Demand Uncertainty with Robust Optimization}, issn = {1438-0064}, doi = {10.1109/MCOM.2014.6736760}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42557}, abstract = {The planning of a communication network is inevitably depending on the quality of both the planning tool and the demand forecast used. In this article, we show exemplarily how the emerging area of Robust Optimization can advance the network planning by a more accurate mathematical description of the demand uncertainty. After a general introduction of the concept and its application to a basic network design problem, we present two applications: multi-layer and mixed-line-rate network design. We conclude with a discussion of extensions of the robustness concept to increase the accuracy of handling uncertainties.}, language = {en} } @misc{D'AndreagiovanniKrolikowskiPulaj, author = {D'Andreagiovanni, Fabio and Krolikowski, Jonatan and Pulaj, Jonad}, title = {A hybrid primal heuristic for Robust Multiperiod Network Design}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-44081}, abstract = {We investigate the Robust Multiperiod Network Design Problem, a generalization of the classical Capacitated Network Design Problem that additionally considers multiple design periods and provides solutions protected against traffic uncertainty. Given the intrinsic difficulty of the problem, which proves challenging even for state-of-the art commercial solvers, we propose a hybrid primal heuristic based on the combination of ant colony optimization and an exact large neighborhood search. Computational experiments on a set of realistic instances from the SNDlib show that our heuristic can find solutions of extremely good quality with low optimality gap.}, language = {en} } @misc{BuesingD'AndreagiovanniRaymond, author = {B{\"u}sing, Christina and D'Andreagiovanni, Fabio and Raymond, Annie}, title = {0-1 Multiband Robust Optimization}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-44093}, abstract = {We provide an overview of new theoretical results that we obtained while further investigating multiband robust optimization, a new model for robust optimization that we recently proposed to tackle uncertainty in mixed-integer linear programming. This new model extends and refines the classical Gamma-robustness model of Bertsimas and Sim and is particularly useful in the common case of arbitrary asymmetric distributions of the uncertainty. Here, we focus on uncertain 0-1 programs and we analyze their robust counterparts when the uncertainty is represented through a multiband set. Our investigations were inspired by the needs of our industrial partners in the research project ROBUKOM.}, language = {en} } @misc{EhlkeRammLameckeretal., author = {Ehlke, Moritz and Ramm, Heiko and Lamecker, Hans and Hege, Hans-Christian and Zachow, Stefan}, title = {Fast Generation of Virtual X-ray Images from Deformable Tetrahedral Meshes}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-41896}, abstract = {We propose a novel GPU-based approach to render virtual X-ray projections of deformable tetrahedral meshes. These meshes represent the shape and the internal density distribution of a particular anatomical structure and are derived from statistical shape and intensity models (SSIMs). We apply our method to improve the geometric reconstruction of 3D anatomy (e.g.\ pelvic bone) from 2D X-ray images. For that purpose, shape and density of a tetrahedral mesh are varied and virtual X-ray projections are generated within an optimization process until the similarity between the computed virtual X-ray and the respective anatomy depicted in a given clinical X-ray is maximized. The OpenGL implementation presented in this work deforms and projects tetrahedral meshes of high resolution (200.000+ tetrahedra) at interactive rates. It generates virtual X-rays that accurately depict the density distribution of an anatomy of interest. Compared to existing methods that accumulate X-ray attenuation in deformable meshes, our novel approach significantly boosts the deformation/projection performance. The proposed projection algorithm scales better with respect to mesh resolution and complexity of the density distribution, and the combined deformation and projection on the GPU scales better with respect to the number of deformation parameters. The gain in performance allows for a larger number of cycles in the optimization process. Consequently, it reduces the risk of being stuck in a local optimum. We believe that our approach contributes in orthopedic surgery, where 3D anatomy information needs to be extracted from 2D X-rays to support surgeons in better planning joint replacements.}, language = {en} } @misc{Hoffmann, type = {Master Thesis}, author = {Hoffmann, Marie}, title = {Approximate Algorithms for Distributed Systems}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42370}, school = {Zuse Institute Berlin (ZIB)}, pages = {75}, abstract = {Peer-to-peer (P2P) systems form a special class of distributed systems. Typically, nodes in a P2P system are flat and share the same responsabilities. In this thesis we focus on three problems that occur in P2P systems: the storage of data replicates, quantile computation on distributed data streams, and churn rate estimation. Data replication is one of the oldest techniques to maintain stored data in a P2P system and to reply to read requests. Applications, which use data replication are distributed databases. They are part of an abstract overlay network and do not see the underlying network topology. The question is how to place a set of data replicates in a distributed system such that response times and failure probabilities become minimal without a priori knowledge of the topology of the underlying hardware nodes? We show how to utilize an agglomerative clustering procedure to reach this goal. State-of-the-art algorithms for aggregation of distributed data or data streams require at some point synchronization, or merge data aggregates hierarchically, which does not accompany the basic principle of P2P systems. We test whether randomized communication and merging of data aggregates are able to produce the same results. These data aggregates serve for quantile queries. Constituting and maintaining a P2P overlay network requires frequent message passing. It is a goal to minimize the number of maintenance messages since they consume bandwidth which might be missing for other applications. The lower bound of the frequency for mainte- nance messages is highly dependent on the churn rate of peers. We show how to estimate the mean lifetime of peers and to reduce the frequency for maintenance messages without destabilizing the infrastructure of the constituting overlay.}, language = {en} } @misc{Keidel, type = {Master Thesis}, author = {Keidel, Stefan}, title = {Snapshots in Scalaris}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42282}, school = {Zuse Institute Berlin (ZIB)}, pages = {87}, abstract = {Eines der gr{\"o}ßten Hindernisse beim praktischen Einsatz von Scalaris, einer skalierbaren Implementierung einer verteilten Hashtabelle mit Unterst{\"u}tzung f{\"u}r Transaktionen, ist das Fehlen eines Verfahrens zur Aufnahme eines konsistenten Zustandes des gesamten Systems. Wir stellen in dieser Arbeit ein einfaches Protokoll vor, dass diese Aufgabe erf{\"u}llt und sich, auf Grund der von uns gew{\"a}hlten Herangehensweise, leicht implementieren l{\"a}sst. Als Ausgangspunkt daf{\"u}r w{\"a}hlen wir aus einer Reihe von „klassischen" Snapshot-Algorithmen ein 1993 von Mattern entworfenes Verfahren, welches auf dem Algorithmus von Lai und Yang basiert, aus. Diese Entscheidung basiert auf einer gr{\"u}ndlichen Analyse der Protokolle unter Ber{\"u}cksichtigung der Architektur der existierenden Software. Im n{\"a}chsten Arbeitsschritt benutzen wir unser vollst{\"a}ndiges Wissen {\"u}ber die Interna des Transaktionssystems von Scalaris und vereinfachen damit das Verfahren hinsichtlich Benutzbarkeit und Implementierungskomplexit{\"a}t, ohne die Anforderungen an den aufgenommenen Zustand aufzuweichen. Statt einer losen Anh{\"a}ufung lokaler Zust{\"a}nde der einzelnen Teilnehmerknoten k{\"o}nnen wir am Ende eine große Schl{\"u}ssel-Wert-Tabelle als Ergebnis erzeugen, die konsistent ist, sich leicht weiterverarbeiten l{\"a}sst und die einem Zustand entspricht, in dem sich das System einmal befunden haben k{\"o}nnte. Nachdem wir das Verfahren dann in Software umgesetzt haben, werten wir die Ergebnisse hinsichtlich des Einflusses auf die Performanz des Gesamtsystems aus und diskutieren m{\"o}gliche Weiterentwicklungen.}, language = {de} } @misc{ShinanoHeinzVigerskeetal., author = {Shinano, Yuji and Heinz, Stefan and Vigerske, Stefan and Winkler, Michael}, title = {FiberSCIP - A shared memory parallelization of SCIP}, issn = {1438-0064}, doi = {10.1287/ijoc.2017.0762}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42595}, abstract = {Recently, parallel computing environments have become significantly popular. In order to obtain the benefit of using parallel computing environments, we have to deploy our programs for these effectively. This paper focuses on a parallelization of SCIP (Solving Constraint Integer Programs), which is a MIP solver and constraint integer programming framework available in source code. There is a parallel extension of SCIP named ParaSCIP, which parallelizes SCIP on massively parallel distributed memory computing environments. This paper describes FiberSCIP, which is yet another parallel extension of SCIP to utilize multi-threaded parallel computation on shared memory computing environments, and has the following contributions: First, the basic concept of having two parallel extensions and the relationship between them and the parallelization framework provided by UG (Ubiquity Generator) is presented, including an implementation of deterministic parallelization. Second, the difficulties to achieve a good performance that utilizes all resources on an actual computing environment and the difficulties of performance evaluation of the parallel solvers are discussed. Third, a way to evaluate the performance of new algorithms and parameter settings of the parallel extensions is presented. Finally, current performance of FiberSCIP for solving mixed-integer linear programs (MIPs) and mixed-integer non-linear programs (MINLPs) in parallel is demonstrated.}, language = {en} } @misc{HaslerPetersKottig, author = {Hasler, Tim and Peters-Kottig, Wolfgang}, title = {Vorschrift oder Thunfisch? - Zur Langzeitverf{\"u}gbarkeit von Forschungsdaten}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43010}, abstract = {„Ich mache ihm ein Angebot, das er nicht ablehnen kann." Diese Aussage aus einem g{\"a}nzlich anderen Kontext l{\"a}sst sich recht treffend {\"u}bertragen als Wunsch von Dienstleistern und Zweck von Dienstleistungen f{\"u}r Datenproduzenten im Forschungsdatenmanagement. Zwar wirkt Druck zur Daten{\"u}bergabe nicht f{\"o}rderlich, die Er{\"o}ffnung einer Option aber sehr wohl. Im vorliegenden Artikel geht es um das Verst{\"a}ndnis der Nachhaltigkeit von Forschung und ihren Daten anhand der Erkenntnisse und Erfahrungen aus der ersten Phase des DFG-Projekts EWIG. [Fn 01] Eine Auswahl von Fallstricken beim Forschungsdatenmanagement wird anhand der Erkenntnisse aus Expertengespr{\"a}chen und eigenen Erfahrungen beim Aufbau von LZA-Workflows vorgestellt. Erste Konzepte in EWIG zur Daten{\"u}bertragung aus unterschiedlich strukturierten Datenquellen in die „Langfristige Dom{\"a}ne" werden beschrieben.}, language = {de} } @misc{DercksenHegeOberlaender2013, author = {Dercksen, Vincent J. and Hege, Hans-Christian and Oberlaender, Marcel}, title = {The Filament Editor: An Interactive Software Environment for Visualization, Proof-Editing and Analysis of 3D Neuron Morphology}, issn = {1438-0064}, doi = {10.1007/s12021-013-9213-2}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43157}, year = {2013}, abstract = {Neuroanatomical analysis, such as classification of cell types, depends on reliable reconstruction of large numbers of complete 3D dendrite and axon morphologies. At present, the majority of neuron reconstructions are obtained from preparations in a single tissue slice in vitro, thus suffering from cut off dendrites and, more dramatically, cut off axons. In general, axons can innervate volumes of several cubic millimeters and may reach path lengths of tens of centimeters. Thus, their complete reconstruction requires in vivo labeling, histological sectioning and imaging of large fields of view. Unfortunately, anisotropic background conditions across such large tissue volumes, as well as faintly labeled thin neurites, result in incomplete or erroneous automated tracings and even lead experts to make annotation errors during manual reconstructions. Consequently, tracing reliability renders the major bottleneck for reconstructing complete 3D neuron morphologies. Here, we present a novel set of tools, integrated into a software environment named 'Filament Editor', for creating reliable neuron tracings from sparsely labeled in vivo datasets. The Filament Editor allows for simultaneous visualization of complex neuronal tracings and image data in a 3D viewer, proof-editing of neuronal tracings, alignment and interconnection across sections, and morphometric analysis in relation to 3D anatomical reference structures. We illustrate the functionality of the Filament Editor on the example of in vivo labeled axons and demonstrate that for the exemplary dataset the final tracing results after proof-editing are independent of the expertise of the human operator.}, language = {en} } @misc{WendeSteinke, author = {Wende, Florian and Steinke, Thomas}, title = {Swendsen-Wang Multi-Cluster Algorithm for the 2D/3D Ising Model on Xeon Phi and GPU}, issn = {1438-0064}, doi = {10.1145/2503210.2503254}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42187}, abstract = {Simulations of the critical Ising model by means of local update algorithms suffer from critical slowing down. One way to partially compensate for the influence of this phenomenon on the runtime of simulations is using increasingly faster and parallel computer hardware. Another approach is using algorithms that do not suffer from critical slowing down, such as cluster algorithms. This paper reports on the Swendsen-Wang multi-cluster algorithm on Intel Xeon Phi coprocessor 5110P, Nvidia Tesla M2090 GPU, and x86 multi-core CPU. We present shared memory versions of the said algorithm for the simulation of the two- and three-dimensional Ising model. We use a combination of local cluster search and global label reduction by means of atomic hardware primitives. Further, we describe an MPI version of the algorithm on Xeon Phi and CPU, respectively. Significant performance improvements over known im plementations of the Swendsen-Wang algorithm are demonstrated.}, language = {en} } @masterthesis{Witzig, type = {Bachelor Thesis}, author = {Witzig, Jakob}, title = {Effiziente Reoptimierung in Branch\&Bound-Verfahren f{\"u}r die Steuerung von Aufz{\"u}gen}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42210}, school = {Zuse Institute Berlin (ZIB)}, pages = {111}, abstract = {Heutzutage ist eine Vielzahl der mehrst{\"o}ckigen Geb{\"a}ude mit Personenaufzugsgruppen ausgestattet. Uns wohl bekannt sind die sogenannten konventionellen Systeme. Bei diesen Systemen bet{\"a}tigt jeder ankommende Passagier eine der beiden Richtungstasten und teilt dem dahinterstehenden Steuerungsalgorithmus seine gew{\"u}nschte Startetage und Fahrtrichtung mit. Betreten wird der zuerst auf der Startetage ankommende Aufzug mit gleicher Fahrtrichtung und ausreichend Kapazit{\"a}t. Die entsprechende Zieletage wird dem System erst nach dem Betreten der Fahrgastkabine mitgeteilt. Neben diesen konventionellen Systemen gibt es Aufzugsgruppen mit Zielrufsteuerung. Die Besonderheit eines zielrufgesteuerten Systems ist, dass ein ankommender Passagier bereits auf der Startetage seine gew{\"u}nschte Zieletage angibt und eine R{\"u}ckmeldung vom System erh{\"a}lt, welchen Aufzug er nutzen soll. Diese Zuweisung durch das System hat das Ziel, die Warte- und Reisezeiten der Passagiere zu minimieren. Ein wesentlicher Faktor bei der Berechnung warte- und reisezeitminimaler Fahrpl{\"a}ne ist das momentane Verkehrsmuster. Eine Einteilung der Verkehrsszenarien l{\"a}sst sich am besten bei B{\"u}rogeb{\"a}uden vornehmen. So ist es typisch f{\"u}r die Morgenstunden, dass jeder Passagier auf einer Zugangsebene seine Fahrt beginnt und alle Passagiere die gleiche Fahrtrichtung haben. Unter einer Zugangsebene ist z. B. der Haupteingang oder ein Parkdeck zu verstehen. Ein weiterer wesentlicher Punkt bei Zielrufsystemen ist die Art der Zuweisung der Passagiere durch das System. Zum einen gibt es unmittelbar zuweisende (UZ-) Systeme. In einem UZ-System wird nach jeder Ankunft eines Passagiers eine Momentaufnahme des momentanen Verkehrs erstellt und es findet eine Neuplanung und Zuweisung statt. Eine solche Momentaufnahme werden wir im sp{\"a}teren Verkauf als Schnappschussproblem bezeichnen. Jeder Passagier bekommt im Anschluss an die L{\"o}sung des Schnappschussproblems eine Mitteilung vom System, z. B. {\"u}ber ein Display, welchen Aufzug er benutzen soll. Zum anderen gibt es verz{\"o}gert zuweisende (VZ-) Systeme. In diesen Systemen wird die Erstellung und L{\"o}sung eines Schnappschussproblems bis kurz vor Ankunft eines Aufzuges auf einer Etage verz{\"o}gert. In einem VZ-System teilt das System allen wartenden Passagieren die geplanten Zieletagen des ankommenden Aufzugs mit. Jeder Passagier, der einen Ruf get{\"a}tigt hat und zu einer dieser Zieletagen fahren will, kann jetzt diesen Aufzug betreten. Durch die Verz{\"o}gerung muss im Vergleich zu einem UZ-System eine weitaus gr{\"o}ßere Menge von Passagieren zugewiesen werden. Dadurch kann der L{\"o}sungsprozess bedeutend aufw{\"a}ndiger werden. Vorteil eines VZ-Systems ist hingegen der gr{\"o}ßere Freiheitsgrad bei der Optimierung, da aufgrund der sp{\"a}ten Zuweisung die weitere Verkehrsentwicklung mit einbezogen werden kann. VZ-Systeme sind aufgrund des gr{\"o}ßeren Freiheitsgrades interessant f{\"u}r die Praxis ist, wir uns demzufolge in dieser Arbeit mit einer effizienteren L{\"o}sung dieser Art von Schnappschussproblemen befassen. Es gen{\"u}gt dabei den L{\"o}sungsprozess eines Schnappschussproblems zu betrachten. Das Ziel ist eine Reduzierung der ben{\"o}tigten Rechenzeit. Unter Reoptimierung verstehen wir die Konstruktion zul{\"a}ssiger Spalten in den jeweiligen Iterationsrunden der Spaltengenerierung innerhalb eines Schnappschussproblems. Als eine Iterationsrunde bezeichnet wir einer Menge zul{\"a}ssiger Touren mit negativen reduzierten Kosten. Eine effiziente Reoptimierung zeichnet sich durch die Wiederverwendung und Aufbereitung von Informationen aus vorangegangenen Iterationsrunden desselben Schnappschussproblems aus. Zu den wichtigen Informationen geh{\"o}rt der konstruierte Suchbaum der vorherigen Iterationsrunde mit seinen ausgeloteten (abgeschnittenen) Bl{\"a}ttern sowie konstruierten Touren bzw. Spalten, welche in der Iterationsrunde ihrer Konstruktion nicht zur L{\"o}sung des Teilproblems der Spaltengenerierung beitrugen. Eine solche Wiederverwendung und Aufbereitung von Informationen nennen wir Warmstart.}, language = {de} }