@misc{TateiwaShinanoYamamuraetal., author = {Tateiwa, Nariaki and Shinano, Yuji and Yamamura, Keiichiro and Yoshida, Akihiro and Kaji, Shizuo and Yasuda, Masaya and Fujisawa, Katsuki}, title = {CMAP-LAP: Configurable Massively Parallel Solver for Lattice Problems}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-82802}, abstract = {Lattice problems are a class of optimization problems that are notably hard. There are no classical or quantum algorithms known to solve these problems efficiently. Their hardness has made lattices a major cryptographic primitive for post-quantum cryptography. Several different approaches have been used for lattice problems with different computational profiles; some suffer from super-exponential time, and others require exponential space. This motivated us to develop a novel lattice problem solver, CMAP-LAP, based on the clever coordination of different algorithms that run massively in parallel. With our flexible framework, heterogeneous modules run asynchronously in parallel on a large-scale distributed system while exchanging information, which drastically boosts the overall performance. We also implement full checkpoint-and-restart functionality, which is vital to high-dimensional lattice problems. Through numerical experiments with up to 103,680 cores, we evaluated the performance and stability of our system and demonstrated its high capability for future massive-scale experiments.}, language = {en} } @misc{BorndoerferEgererKarbsteinetal., author = {Bornd{\"o}rfer, Ralf and Egerer, Ascan and Karbstein, Marika and Messerschmidt, Ralf and Perez, Marc and Pfisterer, Steven and Strauß, Petra}, title = {Kombil{\"o}sung: Optimierung des Liniennetzes in Karlsruhe}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69677}, abstract = {Wir beschreiben die Optimierung des Nahverkehrsnetzes der Stadt Karlsruhe im Zusammmenhang mit den Baumaßnahmen der sogenannten Kombil{\"o}sung.}, language = {de} } @misc{ItoShinano, author = {Ito, Satoshi and Shinano, Yuji}, title = {Calculation of clinch and elimination numbers for sports leagues with multiple tiebreaking criteria}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-70591}, abstract = {The clinch (elimination) number is a minimal number of future wins (losses) needed to clinch (to be eliminated from) a specified place in a sports league. Several optimization models and computational results are shown in this paper for calculating clinch and elimination numbers in the presence of predefined multiple tiebreaking criteria. The main subject of this paper is to provide a general algorithmic framework based on integer programming with utilizing possibly multilayered upper and lower bounds.}, language = {en} } @misc{ReutherBorndoerferSchlechteetal., author = {Reuther, Markus and Bornd{\"o}rfer, Ralf and Schlechte, Thomas and Weider, Steffen}, title = {Integrated Optimization of Rolling Stock Rotations for Intercity Railways}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-16424}, abstract = {This paper provides a highly integrated solution approach for rolling stock planning problems in the context of intercity passenger traffic. The main contributions are a generic hypergraph based mixed integer programming model and an integrated algorithm for the considered rolling stock rotation planning problem. The new developed approach is able to handle a very large set of industrial railway requirements, such as vehicle composition, maintenance constraints, infrastructure capacity, and regularity aspects. By the integration of this large bundle of technical railway aspects, we show that our approach has the power to produce implementable rolling stock rotations for our industrial cooperation partner DB Fernverkehr. This is the first time that the rolling stock rotations at DB Fernverkehr could be optimized by an automated system utilizing advanced mathematical programming techniques.}, language = {en} } @misc{GuentherKuhnHegeetal., author = {G{\"u}nther, Tobias and Kuhn, Alexander and Hege, Hans-Christian and Theisel, Holger}, title = {MCFTLE: Monte Carlo Rendering of Finite-Time Lyapunov Exponent Fields}, issn = {1438-0064}, doi = {10.1111/cgf.12914}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59054}, abstract = {Traditionally, Lagrangian fields such as finite-time Lyapunov exponents (FTLE) are precomputed on a discrete grid and are ray casted afterwards. This, however, introduces both grid discretization errors and sampling errors during ray marching. In this work, we apply a progressive, view-dependent Monte Carlo-based approach for the visualization of such Lagrangian fields in time-dependent flows. Our ap- proach avoids grid discretization and ray marching errors completely, is consistent, and has a low memory consumption. The system provides noisy previews that con- verge over time to an accurate high-quality visualization. Compared to traditional approaches, the proposed system avoids explicitly predefined fieldline seeding structures, and uses a Monte Carlo sampling strategy named Woodcock tracking to distribute samples along the view ray. An acceleration of this sampling strategy requires local upper bounds for the FTLE values, which we progressively acquire during the rendering. Our approach is tailored for high-quality visualizations of complex FTLE fields and is guaranteed to faithfully represent detailed ridge surface structures as indicators for Lagrangian coherent structures (LCS). We demonstrate the effectiveness of our approach by using a set of analytic test cases and real-world numerical simulations.}, language = {en} } @misc{KuhnEngelkeFlatkenetal., author = {Kuhn, Alexander and Engelke, Wito and Flatken, Markus and Hege, Hans-Christian and Hotz, Ingrid}, title = {Topology-based Analysis for Multimodal Atmospheric Data of Volcano Eruptions}, issn = {1438-0064}, doi = {10.1007/978-3-319-44684-4_2}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-57043}, abstract = {Many scientific applications deal with data from a multitude of different sources, e.g., measurements, imaging and simulations. Each source provides an additional perspective on the phenomenon of interest, but also comes with specific limitations, e.g. regarding accuracy, spatial and temporal availability. Effectively combining and analyzing such multimodal and partially incomplete data of limited accuracy in an integrated way is challenging. In this work, we outline an approach for an integrated analysis and visualization of the atmospheric impact of volcano eruptions. The data sets comprise observation and imaging data from satellites as well as results from numerical particle simulations. To analyze the clouds from the volcano eruption in the spatiotemporal domain we apply topological methods. Extremal structures reveal structures in the data that support clustering and comparison. We further discuss the robustness of those methods with respect to different properties of the data and different parameter setups. Finally we outline open challenges for the effective integrated visualization using topological methods.}, language = {en} } @misc{ScheumannFuegenschuhSchenkeretal., author = {Scheumann, Rene and F{\"u}genschuh, Armin and Schenker, Sebastian and Vierhaus, Ingmar and Bornd{\"o}rfer, Ralf and Finkbeiner, Matthias}, title = {Global Manufacturing: How to Use Mathematical Optimisation Methods to Transform to Sustainable Value Creation}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-15703}, abstract = {It is clear that a transformation to sustainable value creation is needed, because business as usual is not an option for preserving competitive advantages of leading industries. What does that mean? This contribution proposes possible approaches for a shift in existing manufacturing paradigms. In a first step, sustainability aspects from the German Sustainability Strategy and from the tools of life cycle sustainability assessment are chosen to match areas of a value creation process. Within these aspects are indicators, which can be measured within a manufacturing process. Once these data are obtained they can be used to set up a mathematical linear pulse model of manufacturing in order to analyse the evolution of the system over time, that is the transition process, by using a system dynamics approach. An increase of technology development by a factor of 2 leads to an increase of manufacturing but also to an increase of climate change. Compensation measures need to be taken. This can be done by e.g. taking money from the GDP (as an indicator of the aspect ``macroeconomic performance''). The value of the arc from that building block towards climate change must then be increased by a factor of 10. The choice of independent and representative indicators or aspects shall be validated and double-checked for their significance with the help of multi-criteria mixed-integer programming optimisation methods.}, language = {en} } @misc{Pulaj, author = {Pulaj, Jonad}, title = {Cutting Planes for Families Implying Frankl's Conjecture}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-60626}, abstract = {We find previously unknown families which imply Frankl's conjecture using an algorithmic framework. The conjecture states that for any non-empty union-closed (or Frankl) family there exists an element in at least half of the sets. Poonen's Theorem characterizes the existence of weights which determine whether a given Frankl family implies the conjecture for all Frankl families which contain it. A Frankl family is Non-Frankl-Complete (Non-FC), if it does not imply the conjecture in its elements for some Frankl family that contains it. We design a cutting-plane method that computes the explicit weights which imply the existence conditions of Poonen's Theorem. This method allows us to find a counterexample to a ten-year-old conjecture by R. Morris about the structure of generators for Non-FC-families.}, language = {en} } @misc{Shinano, author = {Shinano, Yuji}, title = {The Ubiquity Generator Framework: 7 Years of Progress in Parallelizing Branch-and-Bound}, issn = {1438-0064}, doi = {10.1007/978-3-319-89920-6_20}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65545}, abstract = {Mixed integer linear programming (MIP) is a general form to model combinatorial optimization problems and has many industrial applications. The performance of MIP solvers has improved tremendously in the last two decades and these solvers have been used to solve many real-word problems. However, against the backdrop of modern computer technology, parallelization is of pivotal importance. In this way, ParaSCIP is the most successful parallel MIP solver in terms of solving previously unsolvable instances from the well-known benchmark instance set MIPLIB by using supercomputers. It solved two instances from MIPLIB2003 and 12 from MIPLIB2010 for the first time to optimality by using up to 80,000 cores on supercomputers. ParaSCIP has been developed by using the Ubiquity Generator (UG) framework, which is a general software package to parallelize any state-of-the-art branch-and-bound based solver. This paper discusses 7 years of progress in parallelizing branch-and-bound solvers with UG.}, language = {en} } @misc{FujiiItoKimetal., author = {Fujii, Koichi and Ito, Naoki and Kim, Sunyoung and Kojima, Masakazu and Shinano, Yuji and Toh, Kim-Chuan}, title = {大規模二次割当問題への挑戦}, series = {統計数理研究所共同研究リポート 453 最適化:モデリングとアルゴリズム33 2022年3月 「大規模二次割当問題への挑戦」 p.84-p.92}, journal = {統計数理研究所共同研究リポート 453 最適化:モデリングとアルゴリズム33 2022年3月 「大規模二次割当問題への挑戦」 p.84-p.92}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-86779}, abstract = {二次割当問題は線形緩和が弱いことが知られ,強化のため多様な緩和手法が考案されているが,その一つである二重非負値計画緩和( DNN 緩和)及びその解法として近年研究が進んでいるニュートン・ブラケット法を紹介し,それらに基づく分枝限定法の実装及び数値実験結果について報告する.}, language = {ja} } @misc{Lehmann, type = {Master Thesis}, author = {Lehmann, Felix}, title = {Inexaktheit in Newton-Lagrange-Verfahren f{\"u}r Optimierungsprobleme mit Elliptischen PDGL-Nebenbedingungen}, pages = {73}, abstract = {Bei der numerischen L{\"o}sung von Optimalsteuerungsproblemen mit elliptischen partiellen Differentialgleichungen als Nebenbedingung treten unvermeidlich Diskretisierungs- und Iterationsfehler auf. Man ist aus Aufwandsgr{\"u}nden daran interessiert die dabei entstehenden Fehler nicht sehr klein w{\"a}hlen zu m{\"u}ssen. In der Folge werden die linearisierten Nebenbedingungen in einem Composite-Step-Verfahren nicht exakt erf{\"u}llt. In dieser Arbeit wird der Einfluss dieser Ungenauigkeit auf das Konvergenzverhalten von Newton-Lagrange-Verfahren untersucht. Dabei sollen mehrere einschl{\"a}gige lokale Konvergenzresultate diskutiert werden. Anschließend wird ein konkretes Composite-Step-Verfahren formuliert, in dem die Genauigkeit der inneren Iterationsverfahren adaptiv gesteuert werden kann. Am Ende der Arbeit wird an zwei Musterproblemen die hohe {\"U}bereinstimmung der analytischen Voraussagen und der tats{\"a}chlichen Performanz der dargestellten Methoden demonstriert.}, language = {de} } @masterthesis{Wende, type = {Bachelor Thesis}, author = {Wende, Florian}, title = {Dynamic Load Balancing on Massively Parallel Computer Architectures}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42166}, school = {Zuse Institute Berlin (ZIB)}, pages = {100}, abstract = {This thesis reports on using dynamic load balancing methods on massively parallel computers in the context of multithreaded computations. In particular we investigate the applicability of a randomized work stealing algorithm to ray tracing and breadth-first search as representatives of real-world applications with dynamic work creation. For our considerations we made use of current massively parallel hardware accelerators: Nvidia Tesla M2090, and Intel Xeon Phi. For both of the two we demonstrate the suitability of the work stealing scheme for the said real-world applications. Also the necessity of dynamic load balancing for irregular computations on such hardware is illustrated.}, language = {en} } @misc{WendeSteinkeReinefeld, author = {Wende, Florian and Steinke, Thomas and Reinefeld, Alexander}, title = {The Impact of Process Placement and Oversubscription on Application Performance: A Case Study for Exascale Computing}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53560}, abstract = {With the growing number of hardware components and the increasing software complexity in the upcoming exascale computers, system failures will become the norm rather than an exception for long-running applications. Fault-tolerance can be achieved by the creation of checkpoints during the execution of a parallel program. Checkpoint/Restart (C/R) mechanisms allow for both task migration (even if there were no hardware faults) and restarting of tasks after the occurrence of hardware faults. Affected tasks are then migrated to other nodes which may result in unfortunate process placement and/or oversubscription of compute resources. In this paper we analyze the impact of unfortunate process placement and oversubscription of compute resources on the performance and scalability of two typical HPC application workloads, CP2K and MOM5. Results are given for a Cray XC30/40 with Aries dragonfly topology. Our results indicate that unfortunate process placement has only little negative impact while oversubscription substantially degrades the performance. The latter might be only (partially) beneficial when placing multiple applications with different computational characteristics on the same node.}, language = {en} } @misc{Wende, author = {Wende, Florian}, title = {SIMD Enabled Functions on Intel Xeon CPU and Intel Xeon Phi Coprocessor}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-54163}, abstract = {To achieve high floating point compute performance, modern processors draw on short vector SIMD units, as found e.g. in Intel CPUs (SSE, AVX1, AVX2 as well as AVX-512 on the roadmap) and the Intel Xeon Phi coprocessor, to operate an increasingly larger number of operands simultaneously. Making use of SIMD vector operations therefore is essential to get close to the processor's floating point peak performance. Two approaches are typically used by programmers to utilize the vector units: compiler driven vectorization via directives and code annotations, and manual vectorization by means of SIMD intrinsic operations or assembly. In this paper, we investigate the capabilities of the current Intel compiler (version 15 and later) to generate vector code for non-trivial coding patterns within loops. Beside the more or less uniform data-parallel standard loops or loop nests, which are typical candidates for SIMDfication, the occurrence of e.g. (conditional) function calls including branching, and early returns from functions may pose difficulties regarding the effective use of vector operations. Recent improvements of the compiler's capabilities involve the generation of SIMD-enabled functions. We will study the effectiveness of the vector code generated by the compiler by comparing it against hand-coded intrinsics versions of different kinds of functions that are invoked within innermost loops.}, language = {en} } @misc{HosodaMaherShinanoetal., author = {Hosoda, Junko and Maher, Stephen J. and Shinano, Yuji and Villumsen, Jonas Christoffer}, title = {A parallel branch-and-bound heuristic for the integrated long-haul and local vehicle routing problem on an adaptive transportation network}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-89700}, abstract = {Consolidation of commodities and coordination of vehicle routes are fundamental features of supply chain management problems. While locations for consolidation and coordination are typically known a priori, in adaptive transportation networks this is not the case. The identification of such consolidation locations forms part of the decision making process. Supply chain management problems integrating the designation of consolidation locations with the coordination of long haul and local vehicle routing is not only challenging to solve, but also very difficult to formulate mathematically. In this paper, the first mathematical model integrating location clustering with long haul and local vehicle routing is proposed. This mathematical formulation is used to develop algorithms to find high quality solutions. A novel parallel framework is developed that combines exact and heuristic methods to improve the search for high quality solutions and provide valid bounds. The results demonstrate that using exact methods to guide heuristic search is an effective approach to find high quality solutions for difficult supply chain management problems.}, language = {en} } @misc{MunguiaOxberryRajanetal., author = {Munguia, Lluis-Miquel and Oxberry, Geoffrey and Rajan, Deepak and Shinano, Yuji}, title = {Parallel PIPS-SBB: Multi-Level Parallelism For Stochastic Mixed-Integer Programs}, number = {ZIB-Report 17-58}, issn = {1438-0064}, doi = {10.1007/s10589-019-00074-0}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65517}, abstract = {PIPS-SBB is a distributed-memory parallel solver with a scalable data distribution paradigm. It is designed to solve MIPs with a dual-block angular structure, which is characteristic of deterministic-equivalent Stochastic Mixed-Integer Programs (SMIPs). In this paper, we present two different parallelizations of Branch \& Bound (B\&B), implementing both as extensions of PIPS-SBB, thus adding an additional layer of parallelism. In the first of the proposed frameworks, PIPS-PSBB, the coordination and load-balancing of the different optimization workers is done in a decentralized fashion. This new framework is designed to ensure all available cores are processing the most promising parts of the B\&B tree. The second, ug[PIPS-SBB,MPI], is a parallel implementation using the Ubiquity Generator (UG), a universal framework for parallelizing B\&B tree search that has been successfully applied to other MIP solvers. We show the effects of leveraging multiple levels of parallelism in potentially improving scaling performance beyond thousands of cores.}, language = {en} } @misc{TateiwaShinanoYasudaetal., author = {Tateiwa, Nariaki and Shinano, Yuji and Yasuda, Masaya and Kaji, Shizuo and Yamamura, Keiichiro and Fujisawa, Katsuki}, title = {Massively parallel sharing lattice basis reduction}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-85209}, abstract = {For cryptanalysis in lattice-based schemes, the performance evaluation of lattice basis reduction using high-performance computers is becoming increasingly important for the determination of the security level. We propose a distributed and asynchronous parallel reduction algorithm based on randomization and DeepBKZ, which is an improved variant of the block Korkine-Zolotarev (BKZ) reduction algorithm. Randomized copies of a lattice basis are distributed to up to 103,680 cores and independently reduced in parallel, while some basis vectors are shared asynchronously among all processes via MPI. There is a trade-off between randomization and information sharing; if a substantial amount of information is shared, all processes will work on the same problem, thereby diminishing the benefit of parallelization. To monitor this balance between randomness and sharing, we propose a metric to quantify the variety of lattice bases. We empirically find an optimal parameter of sharing for high-dimensional lattices. We demonstrate the efficacy of our proposed parallel algorithm and implementation with respect to both performance and scalability through our experiments.}, language = {en} } @misc{AndersonHiller, author = {Anderson, Lovis and Hiller, Benjamin}, title = {A Sweep-Plane Algorithm for the Computation of the Volume of a Union of Polytopes}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69489}, abstract = {Optimization models often feature disjunctions of polytopes as submodels. Such a disjunctive set is initially (at best) relaxed to its convex hull, which is then refined by branching. To measure the error of the convex relaxation, the (relative) difference between the volume of the convex hull and the volume of the disjunctive set may be used. This requires a method to compute the volume of the disjunctive set. Naively, this can be done via inclusion/exclusion and leveraging the existing code for the volume of polytopes. However, this is often inefficient. We propose a revised variant of an old algorithm by Bieri and Nef (1983) for this purpose. The algorithm uses a sweep-plane to incrementally calculate the volume of the disjunctive set as a function of the offset parameter of the sweep-plane.}, language = {en} } @misc{KuhnEngelkeRoessletal., author = {Kuhn, Alexander and Engelke, Wito and R{\"o}ssl, Christian and Hadwiger, Markus and Theisel, Holger}, title = {Time Line Cell Tracking for the Approximation of Lagrangian Coherent Structures with Subgrid Accuracy}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43050}, abstract = {Lagrangian Coherent Structures (LCS) have become a widespread and powerful method to describe dynamic motion patterns in time-dependent flow fields. The standard way to extract LCS is to compute height ridges in the Finite Time Lyapunov Exponent (FTLE) field. In this work, we present an alternative method to approximate Lagrangian features for 2D unsteady flow fields that achieves subgrid accuracy without additional particle sampling. We obtain this by a geometric reconstruction of the flow map using additional material constraints for the available samples. In comparison to the standard method, this allows for a more accurate global approximation of LCS on sparse grids and for long integration intervals. The proposed algorithm works directly on a set of given particle trajectories and without additional flow map derivatives. We demonstrate its application for a set of computational fluid dynamic examples, as well as trajectories acquired by Lagrangian methods, and discuss its benefits and limitations.}, language = {en} }