@misc{Rehfeldt, type = {Master Thesis}, author = {Rehfeldt, Daniel}, title = {A Generic Approach to Solving the Steiner Tree Problem and Variants}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-57817}, pages = {185}, abstract = {Spawned by practical applications, numerous variations of the classical Steiner tree problem in graphs have been studied during the last decades. Despite the strong relationship between the different variants, solution approaches employed so far have been prevalently problem-specific. In contrast, we pursue a general-purpose strategy resulting in a solver able to solve both the classical Steiner tree problem and ten of its variants without modification. These variants include well-known problems such as the prize-collecting Steiner tree problem, the maximum-weight connected subgraph problem or the rectilinear minimum Steiner tree problem. Bolstered by a variety of new methods, most notably reduction techniques, our solver is not only of unprecedented versatility, but furthermore competitive or even superior to specialized state-of-the-art programs for several Steiner problem variants.}, language = {en} } @misc{Keidel, type = {Master Thesis}, author = {Keidel, Stefan}, title = {Snapshots in Scalaris}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42282}, school = {Zuse Institute Berlin (ZIB)}, pages = {87}, abstract = {Eines der gr{\"o}ßten Hindernisse beim praktischen Einsatz von Scalaris, einer skalierbaren Implementierung einer verteilten Hashtabelle mit Unterst{\"u}tzung f{\"u}r Transaktionen, ist das Fehlen eines Verfahrens zur Aufnahme eines konsistenten Zustandes des gesamten Systems. Wir stellen in dieser Arbeit ein einfaches Protokoll vor, dass diese Aufgabe erf{\"u}llt und sich, auf Grund der von uns gew{\"a}hlten Herangehensweise, leicht implementieren l{\"a}sst. Als Ausgangspunkt daf{\"u}r w{\"a}hlen wir aus einer Reihe von „klassischen" Snapshot-Algorithmen ein 1993 von Mattern entworfenes Verfahren, welches auf dem Algorithmus von Lai und Yang basiert, aus. Diese Entscheidung basiert auf einer gr{\"u}ndlichen Analyse der Protokolle unter Ber{\"u}cksichtigung der Architektur der existierenden Software. Im n{\"a}chsten Arbeitsschritt benutzen wir unser vollst{\"a}ndiges Wissen {\"u}ber die Interna des Transaktionssystems von Scalaris und vereinfachen damit das Verfahren hinsichtlich Benutzbarkeit und Implementierungskomplexit{\"a}t, ohne die Anforderungen an den aufgenommenen Zustand aufzuweichen. Statt einer losen Anh{\"a}ufung lokaler Zust{\"a}nde der einzelnen Teilnehmerknoten k{\"o}nnen wir am Ende eine große Schl{\"u}ssel-Wert-Tabelle als Ergebnis erzeugen, die konsistent ist, sich leicht weiterverarbeiten l{\"a}sst und die einem Zustand entspricht, in dem sich das System einmal befunden haben k{\"o}nnte. Nachdem wir das Verfahren dann in Software umgesetzt haben, werten wir die Ergebnisse hinsichtlich des Einflusses auf die Performanz des Gesamtsystems aus und diskutieren m{\"o}gliche Weiterentwicklungen.}, language = {de} } @misc{PfetschFuegenschuhGeissleretal., author = {Pfetsch, Marc and F{\"u}genschuh, Armin and Geißler, Bj{\"o}rn and Geißler, Nina and Gollmer, Ralf and Hiller, Benjamin and Humpola, Jesco and Koch, Thorsten and Lehmann, Thomas and Martin, Alexander and Morsi, Antonio and R{\"o}vekamp, Jessica and Schewe, Lars and Schmidt, Martin and Schultz, R{\"u}diger and Schwarz, Robert and Schweiger, Jonas and Stangl, Claudia and Steinbach, Marc and Vigerske, Stefan and Willert, Bernhard}, title = {Validation of Nominations in Gas Network Optimization: Models, Methods, and Solutions}, issn = {1438-0064}, doi = {10.1080/10556788.2014.888426}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-16531}, abstract = {In this article we investigate methods to solve a fundamental task in gas transportation, namely the validation of nomination problem: Given a gas transmission network consisting of passive pipelines and active, controllable elements and given an amount of gas at every entry and exit point of the network, find operational settings for all active elements such that there exists a network state meeting all physical, technical, and legal constraints. We describe a two-stage approach to solve the resulting complex and numerically difficult mixed-integer non-convex nonlinear feasibility problem. The first phase consists of four distinct algorithms facilitating mixed-integer linear, mixed-integer nonlinear, reduced nonlinear, and complementarity constrained methods to compute possible settings for the discrete decisions. The second phase employs a precise continuous nonlinear programming model of the gas network. Using this setup, we are able to compute high quality solutions to real-world industrial instances whose size is significantly larger than networks that have appeared in the literature previously.}, language = {en} } @misc{Schlechte, author = {Schlechte, Thomas}, title = {Railway Track Allocation - Simulation and Optimization}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-13632}, number = {11-32}, abstract = {Today the railway timetabling process and the track allocation is one of the most challenging problems to solve by a railway infrastructure provider. Especially due to the deregulation of the transport market in the recent years several suppliers of railway traffic have entered the market. This leads to an increase of slot requests and then it is natural that conflicts occur among them. Furthermore, railway infrastructure networks consist of very expensive assets, even more they are rigid due to the long-term upgrade process. In order to make best use of these valuable infrastructure and to ensure economic operation, efficient planning of the railway operation is indispensable. Mathematical optimization models and algorithmic methodology can help to automatize and tackle these challenges. Our contribution in this paper is to present a renewed planning process due to the liberalization in Europe and a general framework to support the integration of simulation and optimization for railway capacity allocation.}, language = {en} } @misc{ShinanoHeinzVigerskeetal., author = {Shinano, Yuji and Heinz, Stefan and Vigerske, Stefan and Winkler, Michael}, title = {FiberSCIP - A shared memory parallelization of SCIP}, issn = {1438-0064}, doi = {10.1287/ijoc.2017.0762}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42595}, abstract = {Recently, parallel computing environments have become significantly popular. In order to obtain the benefit of using parallel computing environments, we have to deploy our programs for these effectively. This paper focuses on a parallelization of SCIP (Solving Constraint Integer Programs), which is a MIP solver and constraint integer programming framework available in source code. There is a parallel extension of SCIP named ParaSCIP, which parallelizes SCIP on massively parallel distributed memory computing environments. This paper describes FiberSCIP, which is yet another parallel extension of SCIP to utilize multi-threaded parallel computation on shared memory computing environments, and has the following contributions: First, the basic concept of having two parallel extensions and the relationship between them and the parallelization framework provided by UG (Ubiquity Generator) is presented, including an implementation of deterministic parallelization. Second, the difficulties to achieve a good performance that utilizes all resources on an actual computing environment and the difficulties of performance evaluation of the parallel solvers are discussed. Third, a way to evaluate the performance of new algorithms and parameter settings of the parallel extensions is presented. Finally, current performance of FiberSCIP for solving mixed-integer linear programs (MIPs) and mixed-integer non-linear programs (MINLPs) in parallel is demonstrated.}, language = {en} } @misc{AchterbergBixbyGuetal., author = {Achterberg, Tobias and Bixby, Robert E. and Gu, Zonghao and Rothberg, Edward and Weninger, Dieter}, title = {Presolve Reductions in Mixed Integer Programming}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-60370}, abstract = {Mixed integer programming has become a very powerful tool for modeling and solving real-world planning and scheduling problems, with the breadth of applications appearing to be almost unlimited. A critical component in the solution of these mixed-integer programs is a set of routines commonly referred to as presolve. Presolve can be viewed as a collection of preprocessing techniques that reduce the size of and, more importantly, improve the ``strength'' of the given model formulation, that is, the degree to which the constraints of the formulation accurately describe the underlying polyhedron of integer-feasible solutions. As our computational results will show, presolve is a key factor in the speed with which we can solve mixed-integer programs, and is often the difference between a model being intractable and solvable, in some cases easily solvable. In this paper we describe the presolve functionality in the Gurobi commercial mixed-integer programming code. This includes an overview, or taxonomy of the different methods that are employed, as well as more-detailed descriptions of several of the techniques, with some of them appearing, to our knowledge, for the first time in the literature.}, language = {en} } @misc{ShinanoAchterbergBertholdetal., author = {Shinano, Yuji and Achterberg, Tobias and Berthold, Timo and Heinz, Stefan and Koch, Thorsten and Winkler, Michael}, title = {Solving Previously Unsolved MIP Instances with ParaSCIP on Supercomputers by using up to 80,000 Cores}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-78393}, abstract = {Mixed-integer programming (MIP) problem is arguably among the hardest classes of optimization problems. This paper describes how we solved 21 previously unsolved MIP instances from the MIPLIB benchmark sets. To achieve these results we used an enhanced version of ParaSCIP, setting a new record for the largest scale MIP computation: up to 80,000 cores in parallel on the Titan supercomputer. In this paper, we describe the basic parallelization mechanism of ParaSCIP, improvements of the dynamic load balancing and novel techniques to exploit the power of parallelization for MIP solving. We give a detailed overview of computing times and statistics for solving open MIPLIB instances.}, language = {en} } @misc{ShinanoAchterbergBertholdetal., author = {Shinano, Yuji and Achterberg, Tobias and Berthold, Timo and Heinz, Stefan and Koch, Thorsten and Winkler, Michael}, title = {Solving Open MIP Instances with ParaSCIP on Supercomputers using up to 80,000 Cores}, issn = {1438-0064}, doi = {10.1109/IPDPS.2016.56}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-56404}, abstract = {This paper describes how we solved 12 previously unsolved mixed-integer program- ming (MIP) instances from the MIPLIB benchmark sets. To achieve these results we used an enhanced version of ParaSCIP, setting a new record for the largest scale MIP computation: up to 80,000 cores in parallel on the Titan supercomputer. In this paper we describe the basic parallelization mechanism of ParaSCIP, improvements of the dynamic load balancing and novel techniques to exploit the power of parallelization for MIP solving. We give a detailed overview of computing times and statistics for solving open MIPLIB instances.}, language = {en} } @misc{DresslerSteinke, author = {Dreßler, Sebastian and Steinke, Thomas}, title = {A Novel Hybrid Approach to Automatically Determine Kernel Interface Data Volumes}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-15569}, abstract = {Scheduling algorithms for heterogeneous platforms make scheduling decisions based on several metrics. One of these metrics is the amount of data to be transferred from and to the accelerator. However, the automated determination of this metric is not a simple task. A few schedulers and runtime systems solve this problem by using regression models, which are imprecise though. Our novel approach for the determination of data volumes removes this limitation and thus provides a solution to obtain exact information.}, language = {en} } @misc{DercksenHegeOberlaender2013, author = {Dercksen, Vincent J. and Hege, Hans-Christian and Oberlaender, Marcel}, title = {The Filament Editor: An Interactive Software Environment for Visualization, Proof-Editing and Analysis of 3D Neuron Morphology}, issn = {1438-0064}, doi = {10.1007/s12021-013-9213-2}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43157}, year = {2013}, abstract = {Neuroanatomical analysis, such as classification of cell types, depends on reliable reconstruction of large numbers of complete 3D dendrite and axon morphologies. At present, the majority of neuron reconstructions are obtained from preparations in a single tissue slice in vitro, thus suffering from cut off dendrites and, more dramatically, cut off axons. In general, axons can innervate volumes of several cubic millimeters and may reach path lengths of tens of centimeters. Thus, their complete reconstruction requires in vivo labeling, histological sectioning and imaging of large fields of view. Unfortunately, anisotropic background conditions across such large tissue volumes, as well as faintly labeled thin neurites, result in incomplete or erroneous automated tracings and even lead experts to make annotation errors during manual reconstructions. Consequently, tracing reliability renders the major bottleneck for reconstructing complete 3D neuron morphologies. Here, we present a novel set of tools, integrated into a software environment named 'Filament Editor', for creating reliable neuron tracings from sparsely labeled in vivo datasets. The Filament Editor allows for simultaneous visualization of complex neuronal tracings and image data in a 3D viewer, proof-editing of neuronal tracings, alignment and interconnection across sections, and morphometric analysis in relation to 3D anatomical reference structures. We illustrate the functionality of the Filament Editor on the example of in vivo labeled axons and demonstrate that for the exemplary dataset the final tracing results after proof-editing are independent of the expertise of the human operator.}, language = {en} } @misc{Shinano, author = {Shinano, Yuji}, title = {The Ubiquity Generator Framework: 7 Years of Progress in Parallelizing Branch-and-Bound}, issn = {1438-0064}, doi = {10.1007/978-3-319-89920-6_20}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65545}, abstract = {Mixed integer linear programming (MIP) is a general form to model combinatorial optimization problems and has many industrial applications. The performance of MIP solvers has improved tremendously in the last two decades and these solvers have been used to solve many real-word problems. However, against the backdrop of modern computer technology, parallelization is of pivotal importance. In this way, ParaSCIP is the most successful parallel MIP solver in terms of solving previously unsolvable instances from the well-known benchmark instance set MIPLIB by using supercomputers. It solved two instances from MIPLIB2003 and 12 from MIPLIB2010 for the first time to optimality by using up to 80,000 cores on supercomputers. ParaSCIP has been developed by using the Ubiquity Generator (UG) framework, which is a general software package to parallelize any state-of-the-art branch-and-bound based solver. This paper discusses 7 years of progress in parallelizing branch-and-bound solvers with UG.}, language = {en} } @misc{BreuerBussieckCaoetal., author = {Breuer, Thomas and Bussieck, Michael and Cao, Karl-Kien and Cebulla, Felix and Fiand, Frederik and Gils, Hans Christian and Gleixner, Ambros and Khabi, Dmitry and Koch, Thorsten and Rehfeldt, Daniel and Wetzel, Manuel}, title = {Optimizing Large-Scale Linear Energy System Problems with Block Diagonal Structure by Using Parallel Interior-Point Methods}, issn = {1438-0064}, doi = {10.1007/978-3-319-89920-6_85}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-66183}, abstract = {Current linear energy system models (ESM) acquiring to provide sufficient detail and reliability frequently bring along problems of both high intricacy and increasing scale. Unfortunately, the size and complexity of these problems often prove to be intractable even for commercial state-of-the-art linear programming solvers. This article describes an interdisciplinary approach to exploit the intrinsic structure of these large-scale linear problems to be able to solve them on massively parallel high-performance computers. A key aspect are extensions to the parallel interior-point solver PIPS-IPM originally developed for stochastic optimization problems. Furthermore, a newly developed GAMS interface to the solver as well as some GAMS language extensions to model block-structured problems will be described.}, language = {en} } @misc{Sagnol, author = {Sagnol, Guillaume}, title = {Picos Documentation. Release 0.1.1.}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-17396}, abstract = {PICOS is a user friendly interface to several conic and integer programming solvers, very much like YALMIP under MATLAB. The main motivation for PICOS is to have the possibility to enter an optimization problem as a high level model, and to be able to solve it with several different solvers. Multidimensional and matrix variables are handled in a natural fashion, which makes it painless to formulate a SDP or a SOCP. This is very useful for educational purposes, and to quickly implement some models and test their validity on simple examples. Furthermore, with PICOS you can take advantage of the python programming language to read and write data, construct a list of constraints by using python list comprehensions, take slices of multidimensional variables, etc.}, language = {en} } @misc{GamrathKochMaheretal., author = {Gamrath, Gerald and Koch, Thorsten and Maher, Stephen J. and Rehfeldt, Daniel and Shinano, Yuji}, title = {SCIP-Jack - A solver for STP and variants with parallelization extensions}, issn = {1438-0064}, doi = {10.1007/s12532-016-0114-x}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-60170}, abstract = {The Steiner tree problem in graphs is a classical problem that commonly arises in practical applications as one of many variants. While often a strong relationship between different Steiner tree problem variants can be observed, solution approaches employed so far have been prevalently problem-specific. In contrast, this paper introduces a general-purpose solver that can be used to solve both the classical Steiner tree problem and many of its variants without modification. This versatility is achieved by transforming various problem variants into a general form and solving them by using a state-of-the-art MIP-framework. The result is a high-performance solver that can be employed in massively parallel environments and is capable of solving previously unsolved instances.}, language = {en} } @misc{RehfeldtKochMaher, author = {Rehfeldt, Daniel and Koch, Thorsten and Maher, Stephen J.}, title = {Reduction Techniques for the Prize-Collecting Steiner Tree Problem and the Maximum-Weight Connected Subgraph Problem}, issn = {1438-0064}, doi = {10.1002/net.21857}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-60420}, abstract = {The concept of reduction has frequently distinguished itself as a pivotal ingredient of exact solving approaches for the Steiner tree problem in graphs. In this paper we broaden the focus and consider reduction techniques for three Steiner problem variants that have been extensively discussed in the literature and entail various practical applications: The prize-collecting Steiner tree problem, the rooted prize-collecting Steiner tree problem and the maximum-weight connected subgraph problem. By introducing and subsequently deploying numerous new reduction methods, we are able to drastically decrease the size of a large number of benchmark instances, already solving more than 90 percent of them to optimality. Furthermore, we demonstrate the impact of these techniques on exact solving, using the example of the state-of-the-art Steiner problem solver SCIP-Jack.}, language = {en} } @misc{TateiwaShinanoYasudaetal., author = {Tateiwa, Nariaki and Shinano, Yuji and Yasuda, Masaya and Kaji, Shizuo and Yamamura, Keiichiro and Fujisawa, Katsuki}, title = {Massively parallel sharing lattice basis reduction}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-85209}, abstract = {For cryptanalysis in lattice-based schemes, the performance evaluation of lattice basis reduction using high-performance computers is becoming increasingly important for the determination of the security level. We propose a distributed and asynchronous parallel reduction algorithm based on randomization and DeepBKZ, which is an improved variant of the block Korkine-Zolotarev (BKZ) reduction algorithm. Randomized copies of a lattice basis are distributed to up to 103,680 cores and independently reduced in parallel, while some basis vectors are shared asynchronously among all processes via MPI. There is a trade-off between randomization and information sharing; if a substantial amount of information is shared, all processes will work on the same problem, thereby diminishing the benefit of parallelization. To monitor this balance between randomness and sharing, we propose a metric to quantify the variety of lattice bases. We empirically find an optimal parameter of sharing for high-dimensional lattices. We demonstrate the efficacy of our proposed parallel algorithm and implementation with respect to both performance and scalability through our experiments.}, language = {en} }