@misc{PfeufferWerner, author = {Pfeuffer, Frank and Werner, Axel}, title = {Adaptive telecommunication network operation with a limited number of reconfigurations}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-55547}, abstract = {Rising traffic in telecommunication networks lead to rising energy costs for the network operators. Meanwhile, increased flexibility of the networking hardware may help to realize load-adaptive operation of the networks to cut operation costs. To meet network operators' concerns over stability, we propose to switch network configurations only a limited number of times per day. We present a method for the integrated computation of optimal switching times and network configurations that alternatingly solves mixed-integer programs and constrained shortest cycle problems in a certain graph. Similarly to the Branch \& Bound Algorithm, it uses lower and upper bounds on the optimum value and allows for pivoting strategies to guide the computation and avoid the solution of irrelevant subproblems. The algorithm can act as a framework to be adapted and applied to suitable problems of different origin.}, language = {en} } @misc{Paskin, type = {Master Thesis}, author = {Paskin, Martha}, title = {Estimating 3D Shape of the Head Skeleton of Basking Sharks Using Annotated Landmarks on a 2D Image}, abstract = {Basking sharks are thought to be one of the most efficient filter-feeding fish in terms of the throughput of water filtered through their gills. Details about the underlying morphology of their branchial region have not been studied due to various challenges in acquiring real-world data. The present thesis aims to facilitate this, by developing a mathematical shape model which constructs the 3D structure of the head skeleton of a basking shark using annotated landmarks on a single 2D image. This is an ill-posed problem as estimating the depth of a 3D object from a single 2D view is, in general, not possible. To reduce this ambiguity, we create a set of pre-defined training shapes in 3D from CT scans of basking sharks. First, the damaged structures of the sharks in the scans are corrected via solving a set of optimization problems, before using them as accurate 3D representations of the object. Then, two approaches are employed for the 2D-to-3D shape fitting problem-an Active Shape Model approach and a Kendall's Shape Space approach. The former represents a shape as a point on a high-dimensional Euclidean space, whereas the latter represents a shape as an equivalence class of points in this Euclidean space. Kendall's shape space approach is a novel technique that has not yet been applied in this context, and a comprehensive comparison of the two approaches suggests this approach to be superior for the problem at hand. This can be credited to an improved interpolation of the training shapes.}, language = {en} } @misc{OrlowskiWernerWessaely, author = {Orlowski, Sebastian and Werner, Axel and Wess{\"a}ly, Roland}, title = {Estimating trenching costs in FTTx network planning}, issn = {1438-0064}, doi = {10.1007/978-3-642-29210-1_15}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14884}, abstract = {In this paper we assess to which extent trenching costs of an FTTx network are unavoidable, even if technical side constraints are neglected. For that purpose we present an extended Steiner tree model. Using a variety of realistic problem instances we demonstrate that the total trenching cost can only be reduced by about 5 percent in realistic scenarios. This work has been funded by BMBF (German Federal Ministry of Education and Research) within the program "KMU-innovativ".}, language = {en} } @misc{NielsenWeber, author = {Nielsen, Adam and Weber, Marcus}, title = {Computing the nearest reversible Markov chain}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53292}, abstract = {Reversible Markov chains are the basis of many applications. However, computing transition probabilities by a finite sampling of a Markov chain can lead to truncation errors. Even if the original Markov chain is reversible, the approximated Markov chain might be non-reversible and will lose important properties, like the real valued spectrum. In this paper, we show how to find the closest reversible Markov chain to a given transition matrix. It turns out that this matrix can be computed by solving a convex minimization problem.}, language = {en} } @misc{MunguiaOxberryRajanetal., author = {Munguia, Lluis-Miquel and Oxberry, Geoffrey and Rajan, Deepak and Shinano, Yuji}, title = {Parallel PIPS-SBB: Multi-Level Parallelism For Stochastic Mixed-Integer Programs}, number = {ZIB-Report 17-58}, issn = {1438-0064}, doi = {10.1007/s10589-019-00074-0}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65517}, abstract = {PIPS-SBB is a distributed-memory parallel solver with a scalable data distribution paradigm. It is designed to solve MIPs with a dual-block angular structure, which is characteristic of deterministic-equivalent Stochastic Mixed-Integer Programs (SMIPs). In this paper, we present two different parallelizations of Branch \& Bound (B\&B), implementing both as extensions of PIPS-SBB, thus adding an additional layer of parallelism. In the first of the proposed frameworks, PIPS-PSBB, the coordination and load-balancing of the different optimization workers is done in a decentralized fashion. This new framework is designed to ensure all available cores are processing the most promising parts of the B\&B tree. The second, ug[PIPS-SBB,MPI], is a parallel implementation using the Ubiquity Generator (UG), a universal framework for parallelizing B\&B tree search that has been successfully applied to other MIP solvers. We show the effects of leveraging multiple levels of parallelism in potentially improving scaling performance beyond thousands of cores.}, language = {en} } @misc{MasingLindnerEbert, author = {Masing, Berenike and Lindner, Niels and Ebert, Patricia}, title = {Forward and Line-Based Cycle Bases for Periodic Timetabling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-89731}, abstract = {The optimization of periodic timetables is an indispensable planning task in public transport. Although the periodic event scheduling problem (PESP) provides an elegant mathematical formulation of the periodic timetabling problem that led to many insights for primal heuristics, it is notoriously hard to solve to optimality. One reason is that for the standard mixed-integer linear programming formulations, linear programming relaxations are weak and the integer variables are of pure technical nature and in general do not correlate with the objective value. While the first problem has been addressed by developing several families of cutting planes, we focus on the second aspect. We discuss integral forward cycle bases as a concept to compute improved dual bounds for PESP instances. To this end, we develop the theory of forward cycle bases on general digraphs. Specifically for the application of timetabling, we devise a generic procedure to construct line-based event-activity networks, and give a simple recipe for an integral forward cycle basis on such networks. Finally, we analyze the 16 railway instances of the benchmark library PESPlib, match them to the line-based structure and use forward cycle bases to compute better dual bounds for 14 out of the 16 instances.}, language = {en} } @misc{LindnerReisch, author = {Lindner, Niels and Reisch, Julian}, title = {Parameterized Complexity of Periodic Timetabling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-78314}, abstract = {Public transportation networks are typically operated with a periodic timetable. The Periodic Event Scheduling Problem (PESP) is the standard mathematical modelling tool for periodic timetabling. Since PESP can be solved in linear time on trees, it is a natural question to ask whether there are polynomial-time algorithms for input networks of bounded treewidth. We show that deciding the feasibility of a PESP instance is NP-hard even when the treewidth is 2, the branchwidth is 2, or the carvingwidth is 3. Analogous results hold for the optimization of reduced PESP instances, where the feasibility problem is trivial. To complete the picture, we present two pseudo-polynomial-time dynamic programming algorithms solving PESP on input networks with bounded tree- or branchwidth. We further analyze the parameterized complexity of PESP with bounded cyclomatic number, diameter, or vertex cover number. For event-activity networks with a special -- but standard -- structure, we give explicit and sharp bounds on the branchwidth in terms of the maximum degree and the carvingwidth of an underlying line network. Finally, we investigate several parameters on the smallest instance of the benchmarking library PESPlib.}, language = {en} } @misc{LieSullivanTeckentrup, author = {Lie, Han Cheng and Sullivan, T. J. and Teckentrup, Aretha}, title = {Random forward models and log-likelihoods in Bayesian inverse problems}, series = {SIAM/ASA Journal on Uncertainty Quantification}, volume = {6}, journal = {SIAM/ASA Journal on Uncertainty Quantification}, number = {4}, issn = {1438-0064}, doi = {10.1137/18M1166523}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-66324}, pages = {1600 -- 1629}, abstract = {We consider the use of randomised forward models and log-likelihoods within the Bayesian approach to inverse problems. Such random approximations to the exact forward model or log-likelihood arise naturally when a computationally expensive model is approximated using a cheaper stochastic surrogate, as in Gaussian process emulation (kriging), or in the field of probabilistic numerical methods. We show that the Hellinger distance between the exact and approximate Bayesian posteriors is bounded by moments of the difference between the true and approximate log-likelihoods. Example applications of these stability results are given for randomised misfit models in large data applications and the probabilistic solution of ordinary differential equations.}, language = {en} } @misc{Keidel, type = {Master Thesis}, author = {Keidel, Stefan}, title = {Snapshots in Scalaris}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42282}, school = {Zuse Institute Berlin (ZIB)}, pages = {87}, abstract = {Eines der gr{\"o}ßten Hindernisse beim praktischen Einsatz von Scalaris, einer skalierbaren Implementierung einer verteilten Hashtabelle mit Unterst{\"u}tzung f{\"u}r Transaktionen, ist das Fehlen eines Verfahrens zur Aufnahme eines konsistenten Zustandes des gesamten Systems. Wir stellen in dieser Arbeit ein einfaches Protokoll vor, dass diese Aufgabe erf{\"u}llt und sich, auf Grund der von uns gew{\"a}hlten Herangehensweise, leicht implementieren l{\"a}sst. Als Ausgangspunkt daf{\"u}r w{\"a}hlen wir aus einer Reihe von „klassischen" Snapshot-Algorithmen ein 1993 von Mattern entworfenes Verfahren, welches auf dem Algorithmus von Lai und Yang basiert, aus. Diese Entscheidung basiert auf einer gr{\"u}ndlichen Analyse der Protokolle unter Ber{\"u}cksichtigung der Architektur der existierenden Software. Im n{\"a}chsten Arbeitsschritt benutzen wir unser vollst{\"a}ndiges Wissen {\"u}ber die Interna des Transaktionssystems von Scalaris und vereinfachen damit das Verfahren hinsichtlich Benutzbarkeit und Implementierungskomplexit{\"a}t, ohne die Anforderungen an den aufgenommenen Zustand aufzuweichen. Statt einer losen Anh{\"a}ufung lokaler Zust{\"a}nde der einzelnen Teilnehmerknoten k{\"o}nnen wir am Ende eine große Schl{\"u}ssel-Wert-Tabelle als Ergebnis erzeugen, die konsistent ist, sich leicht weiterverarbeiten l{\"a}sst und die einem Zustand entspricht, in dem sich das System einmal befunden haben k{\"o}nnte. Nachdem wir das Verfahren dann in Software umgesetzt haben, werten wir die Ergebnisse hinsichtlich des Einflusses auf die Performanz des Gesamtsystems aus und diskutieren m{\"o}gliche Weiterentwicklungen.}, language = {de} } @misc{KaplanLauferProhaskaetal., author = {Kaplan, Bernhard and Laufer, Jan and Prohaska, Steffen and Buchmann, Jens}, title = {Monte-Carlo-based inversion scheme for 3D quantitative photoacoustic tomography}, issn = {1438-0064}, doi = {10.1117/12.2251945}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-62318}, abstract = {The goal of quantitative photoacoustic tomography (qPAT) is to recover maps of the chromophore distributions from multiwavelength images of the initial pressure. Model-based inversions that incorporate the physical processes underlying the photoacoustic (PA) signal generation represent a promising approach. Monte-Carlo models of the light transport are computationally expensive, but provide accurate fluence distributions predictions, especially in the ballistic and quasi-ballistic regimes. Here, we focus on the inverse problem of 3D qPAT of blood oxygenation and investigate the application of the Monte-Carlo method in a model-based inversion scheme. A forward model of the light transport based on the MCX simulator and acoustic propagation modeled by the k-Wave toolbox was used to generate a PA image data set acquired in a tissue phantom over a planar detection geometry. The combination of the optical and acoustic models is shown to account for limited-view artifacts. In addition, the errors in the fluence due to, for example, partial volume artifacts and absorbers immediately adjacent to the region of interest are investigated. To accomplish large-scale inversions in 3D, the number of degrees of freedom is reduced by applying image segmentation to the initial pressure distribution to extract a limited number of regions with homogeneous optical parameters. The absorber concentration in the tissue phantom was estimated using a coordinate descent parameter search based on the comparison between measured and modeled PA spectra. The estimated relative concentrations using this approach lie within 5 \% compared to the known concentrations. Finally, we discuss the feasibility of this approach to recover the blood oxygenation from experimental data.}, language = {en} } @misc{ItoShinano, author = {Ito, Satoshi and Shinano, Yuji}, title = {Calculation of clinch and elimination numbers for sports leagues with multiple tiebreaking criteria}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-70591}, abstract = {The clinch (elimination) number is a minimal number of future wins (losses) needed to clinch (to be eliminated from) a specified place in a sports league. Several optimization models and computational results are shown in this paper for calculating clinch and elimination numbers in the presence of predefined multiple tiebreaking criteria. The main subject of this paper is to provide a general algorithmic framework based on integer programming with utilizing possibly multilayered upper and lower bounds.}, language = {en} } @misc{HosodaMaherShinanoetal., author = {Hosoda, Junko and Maher, Stephen J. and Shinano, Yuji and Villumsen, Jonas Christoffer}, title = {A parallel branch-and-bound heuristic for the integrated long-haul and local vehicle routing problem on an adaptive transportation network}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-89700}, abstract = {Consolidation of commodities and coordination of vehicle routes are fundamental features of supply chain management problems. While locations for consolidation and coordination are typically known a priori, in adaptive transportation networks this is not the case. The identification of such consolidation locations forms part of the decision making process. Supply chain management problems integrating the designation of consolidation locations with the coordination of long haul and local vehicle routing is not only challenging to solve, but also very difficult to formulate mathematically. In this paper, the first mathematical model integrating location clustering with long haul and local vehicle routing is proposed. This mathematical formulation is used to develop algorithms to find high quality solutions. A novel parallel framework is developed that combines exact and heuristic methods to improve the search for high quality solutions and provide valid bounds. The results demonstrate that using exact methods to guide heuristic search is an effective approach to find high quality solutions for difficult supply chain management problems.}, language = {en} } @misc{Hoffmann, type = {Master Thesis}, author = {Hoffmann, Marie}, title = {Approximate Algorithms for Distributed Systems}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42370}, school = {Zuse Institute Berlin (ZIB)}, pages = {75}, abstract = {Peer-to-peer (P2P) systems form a special class of distributed systems. Typically, nodes in a P2P system are flat and share the same responsabilities. In this thesis we focus on three problems that occur in P2P systems: the storage of data replicates, quantile computation on distributed data streams, and churn rate estimation. Data replication is one of the oldest techniques to maintain stored data in a P2P system and to reply to read requests. Applications, which use data replication are distributed databases. They are part of an abstract overlay network and do not see the underlying network topology. The question is how to place a set of data replicates in a distributed system such that response times and failure probabilities become minimal without a priori knowledge of the topology of the underlying hardware nodes? We show how to utilize an agglomerative clustering procedure to reach this goal. State-of-the-art algorithms for aggregation of distributed data or data streams require at some point synchronization, or merge data aggregates hierarchically, which does not accompany the basic principle of P2P systems. We test whether randomized communication and merging of data aggregates are able to produce the same results. These data aggregates serve for quantile queries. Constituting and maintaining a P2P overlay network requires frequent message passing. It is a goal to minimize the number of maintenance messages since they consume bandwidth which might be missing for other applications. The lower bound of the frequency for mainte- nance messages is highly dependent on the churn rate of peers. We show how to estimate the mean lifetime of peers and to reduce the frequency for maintenance messages without destabilizing the infrastructure of the constituting overlay.}, language = {en} } @misc{HillerVredeveld, author = {Hiller, Benjamin and Vredeveld, Tjark}, title = {Stochastic dominance analysis of Online Bin Coloring algorithms}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-16502}, abstract = {This paper proposes a new method for probabilistic analysis of online algorithms. It is based on the notion of stochastic dominance. We develop the method for the online bin coloring problem introduced by Krumke et al (2008). Using methods for the stochastic comparison of Markov chains we establish the result that the performance of the online algorithm GreedyFit is stochastically better than the performance of the algorithm OneBin for any number of items processed. This result gives a more realistic picture than competitive analysis and explains the behavior observed in simulations.}, language = {en} } @misc{HillerVredeveld, author = {Hiller, Benjamin and Vredeveld, Tjark}, title = {Probabilistic alternatives for competitive analysis}, issn = {1438-0064}, doi = {10.1007/s00450-011-0149-1}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-15131}, abstract = {In the last 20 years competitive analysis has become the main tool for analyzing the quality of online algorithms. Despite of this, competitive analysis has also been criticized: It sometimes cannot discriminate between algorithms that exhibit significantly different empirical behavior, or it even favors an algorithm that is worse from an empirical point of view. Therefore, there have been several approaches to circumvent these drawbacks. In this survey, we discuss probabilistic alternatives for competitive analysis.}, language = {en} } @misc{HaslerPetersKottig, author = {Hasler, Tim and Peters-Kottig, Wolfgang}, title = {Vorschrift oder Thunfisch? - Zur Langzeitverf{\"u}gbarkeit von Forschungsdaten}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43010}, abstract = {„Ich mache ihm ein Angebot, das er nicht ablehnen kann." Diese Aussage aus einem g{\"a}nzlich anderen Kontext l{\"a}sst sich recht treffend {\"u}bertragen als Wunsch von Dienstleistern und Zweck von Dienstleistungen f{\"u}r Datenproduzenten im Forschungsdatenmanagement. Zwar wirkt Druck zur Daten{\"u}bergabe nicht f{\"o}rderlich, die Er{\"o}ffnung einer Option aber sehr wohl. Im vorliegenden Artikel geht es um das Verst{\"a}ndnis der Nachhaltigkeit von Forschung und ihren Daten anhand der Erkenntnisse und Erfahrungen aus der ersten Phase des DFG-Projekts EWIG. [Fn 01] Eine Auswahl von Fallstricken beim Forschungsdatenmanagement wird anhand der Erkenntnisse aus Expertengespr{\"a}chen und eigenen Erfahrungen beim Aufbau von LZA-Workflows vorgestellt. Erste Konzepte in EWIG zur Daten{\"u}bertragung aus unterschiedlich strukturierten Datenquellen in die „Langfristige Dom{\"a}ne" werden beschrieben.}, language = {de} } @misc{GriewankStreubelLehmannetal., author = {Griewank, Andreas and Streubel, Tom and Lehmann, Lutz and Hasenfelder, Richard and Radons, Manuel}, title = {Piecewise linear secant approximation via Algorithmic Piecewise Differentiation}, issn = {1438-0064}, doi = {10.1080/10556788.2017.1387256}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61642}, abstract = {It is shown how piecewise differentiable functions \(F: R^n → R^m\) that are defined by evaluation programs can be approximated locally by a piecewise linear model based on a pair of sample points x̌ and x̂. We show that the discrepancy between function and model at any point x is of the bilinear order O(||x - x̌|| ||x - x̂||). This is a little surprising since x ∈ R^n may vary over the whole Euclidean space, and we utilize only two function samples F̌ = F(x̌) and F̂ = F(x̂), as well as the intermediates computed during their evaluation. As an application of the piecewise linearization procedure we devise a generalized Newton's method based on successive piecewise linearization and prove for it sufficient conditions for convergence and convergence rates equaling those of semismooth Newton. We conclude with the derivation of formulas for the numerically stable implementation of the aforedeveloped piecewise linearization methods.}, language = {en} } @misc{FujiiKimKojimaetal., author = {Fujii, Koichi and Kim, Sunyoung and Kojima, Masakazu and Mittelmann, Hans D. and Shinano, Yuji}, title = {An Exceptionally Difficult Binary Quadratic Optimization Problem with Symmetry: a Challenge for The Largest Unsolved QAP Instance Tai256c}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-93072}, abstract = {Tai256c is the largest unsolved quadratic assignment problem (QAP) instance in QAPLIB. It is known that QAP tai256c can be converted into a 256 dimensional binary quadratic optimization problem (BQOP) with a single cardinality constraint which requires the sum of the binary variables to be 92. As the BQOP is much simpler than the original QAP, the conversion increases the possibility to solve the QAP. Solving exactly the BQOP, however, is still very difficult. Indeed, a 1.48\% gap remains between the best known upper bound (UB) and lower bound (LB) of the unknown optimal value. This paper shows that the BQOP admits a nontrivial symmetry, a property that makes the BQOP very hard to solve. The symmetry induces equivalent subproblems in branch and bound (BB) methods. To effectively improve the LB, we propose an efficient BB method that incorporates a doubly nonnegative relaxation, the standard orbit branching and a technique to prune equivalent subproblems. With this BB method, a new LB with 1.25\% gap is successfully obtained, and computing an LB with 1.0\% gap is shown to be still quite difficult.}, language = {en} } @misc{FujiiItoKimetal., author = {Fujii, Koichi and Ito, Naoki and Kim, Sunyoung and Kojima, Masakazu and Shinano, Yuji and Toh, Kim-Chuan}, title = {Solving Challenging Large Scale QAPs}, issn = {1438-0064}, doi = {10.12752/8130}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-81303}, abstract = {We report our progress on the project for solving larger scale quadratic assignment problems (QAPs). Our main approach to solve large scale NP-hard combinatorial optimization problems such as QAPs is a parallel branch-and-bound method efficiently implemented on a powerful computer system using the Ubiquity Generator(UG) framework that can utilize more than 100,000 cores. Lower bounding procedures incorporated in the branch-and-bound method play a crucial role in solving the problems. For a strong lower bounding procedure, we employ the Lagrangian doubly nonnegative (DNN) relaxation and the Newton-bracketing method developed by the authors' group. In this report, we describe some basic tools used in the project including the lower bounding procedure and branching rules, and present some preliminary numerical results. Our next target problem is QAPs with dimension at least 50, as we have succeeded to solve tai30a and sko42 from QAPLIB for the first time.}, language = {en} } @misc{FujiiItoKimetal., author = {Fujii, Koichi and Ito, Naoki and Kim, Sunyoung and Kojima, Masakazu and Shinano, Yuji and Toh, Kim-Chuan}, title = {大規模二次割当問題への挑戦}, series = {統計数理研究所共同研究リポート 453 最適化:モデリングとアルゴリズム33 2022年3月 「大規模二次割当問題への挑戦」 p.84-p.92}, journal = {統計数理研究所共同研究リポート 453 最適化:モデリングとアルゴリズム33 2022年3月 「大規模二次割当問題への挑戦」 p.84-p.92}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-86779}, abstract = {二次割当問題は線形緩和が弱いことが知られ,強化のため多様な緩和手法が考案されているが,その一つである二重非負値計画緩和( DNN 緩和)及びその解法として近年研究が進んでいるニュートン・ブラケット法を紹介し,それらに基づく分枝限定法の実装及び数値実験結果について報告する.}, language = {ja} }