@misc{KaplanLauferProhaskaetal.2017, author = {Kaplan, Bernhard and Laufer, Jan and Prohaska, Steffen and Buchmann, Jens}, title = {Monte-Carlo-based inversion scheme for 3D quantitative photoacoustic tomography}, issn = {1438-0064}, doi = {10.1117/12.2251945}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-62318}, year = {2017}, abstract = {The goal of quantitative photoacoustic tomography (qPAT) is to recover maps of the chromophore distributions from multiwavelength images of the initial pressure. Model-based inversions that incorporate the physical processes underlying the photoacoustic (PA) signal generation represent a promising approach. Monte-Carlo models of the light transport are computationally expensive, but provide accurate fluence distributions predictions, especially in the ballistic and quasi-ballistic regimes. Here, we focus on the inverse problem of 3D qPAT of blood oxygenation and investigate the application of the Monte-Carlo method in a model-based inversion scheme. A forward model of the light transport based on the MCX simulator and acoustic propagation modeled by the k-Wave toolbox was used to generate a PA image data set acquired in a tissue phantom over a planar detection geometry. The combination of the optical and acoustic models is shown to account for limited-view artifacts. In addition, the errors in the fluence due to, for example, partial volume artifacts and absorbers immediately adjacent to the region of interest are investigated. To accomplish large-scale inversions in 3D, the number of degrees of freedom is reduced by applying image segmentation to the initial pressure distribution to extract a limited number of regions with homogeneous optical parameters. The absorber concentration in the tissue phantom was estimated using a coordinate descent parameter search based on the comparison between measured and modeled PA spectra. The estimated relative concentrations using this approach lie within 5 \% compared to the known concentrations. Finally, we discuss the feasibility of this approach to recover the blood oxygenation from experimental data.}, language = {en} } @misc{Keidel2013, type = {Master Thesis}, author = {Keidel, Stefan}, title = {Snapshots in Scalaris}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42282}, school = {Zuse Institute Berlin (ZIB)}, pages = {87}, year = {2013}, abstract = {Eines der gr{\"o}ßten Hindernisse beim praktischen Einsatz von Scalaris, einer skalierbaren Implementierung einer verteilten Hashtabelle mit Unterst{\"u}tzung f{\"u}r Transaktionen, ist das Fehlen eines Verfahrens zur Aufnahme eines konsistenten Zustandes des gesamten Systems. Wir stellen in dieser Arbeit ein einfaches Protokoll vor, dass diese Aufgabe erf{\"u}llt und sich, auf Grund der von uns gew{\"a}hlten Herangehensweise, leicht implementieren l{\"a}sst. Als Ausgangspunkt daf{\"u}r w{\"a}hlen wir aus einer Reihe von „klassischen" Snapshot-Algorithmen ein 1993 von Mattern entworfenes Verfahren, welches auf dem Algorithmus von Lai und Yang basiert, aus. Diese Entscheidung basiert auf einer gr{\"u}ndlichen Analyse der Protokolle unter Ber{\"u}cksichtigung der Architektur der existierenden Software. Im n{\"a}chsten Arbeitsschritt benutzen wir unser vollst{\"a}ndiges Wissen {\"u}ber die Interna des Transaktionssystems von Scalaris und vereinfachen damit das Verfahren hinsichtlich Benutzbarkeit und Implementierungskomplexit{\"a}t, ohne die Anforderungen an den aufgenommenen Zustand aufzuweichen. Statt einer losen Anh{\"a}ufung lokaler Zust{\"a}nde der einzelnen Teilnehmerknoten k{\"o}nnen wir am Ende eine große Schl{\"u}ssel-Wert-Tabelle als Ergebnis erzeugen, die konsistent ist, sich leicht weiterverarbeiten l{\"a}sst und die einem Zustand entspricht, in dem sich das System einmal befunden haben k{\"o}nnte. Nachdem wir das Verfahren dann in Software umgesetzt haben, werten wir die Ergebnisse hinsichtlich des Einflusses auf die Performanz des Gesamtsystems aus und diskutieren m{\"o}gliche Weiterentwicklungen.}, language = {de} } @misc{Hoffmann2013, type = {Master Thesis}, author = {Hoffmann, Marie}, title = {Approximate Algorithms for Distributed Systems}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42370}, school = {Zuse Institute Berlin (ZIB)}, pages = {75}, year = {2013}, abstract = {Peer-to-peer (P2P) systems form a special class of distributed systems. Typically, nodes in a P2P system are flat and share the same responsabilities. In this thesis we focus on three problems that occur in P2P systems: the storage of data replicates, quantile computation on distributed data streams, and churn rate estimation. Data replication is one of the oldest techniques to maintain stored data in a P2P system and to reply to read requests. Applications, which use data replication are distributed databases. They are part of an abstract overlay network and do not see the underlying network topology. The question is how to place a set of data replicates in a distributed system such that response times and failure probabilities become minimal without a priori knowledge of the topology of the underlying hardware nodes? We show how to utilize an agglomerative clustering procedure to reach this goal. State-of-the-art algorithms for aggregation of distributed data or data streams require at some point synchronization, or merge data aggregates hierarchically, which does not accompany the basic principle of P2P systems. We test whether randomized communication and merging of data aggregates are able to produce the same results. These data aggregates serve for quantile queries. Constituting and maintaining a P2P overlay network requires frequent message passing. It is a goal to minimize the number of maintenance messages since they consume bandwidth which might be missing for other applications. The lower bound of the frequency for mainte- nance messages is highly dependent on the churn rate of peers. We show how to estimate the mean lifetime of peers and to reduce the frequency for maintenance messages without destabilizing the infrastructure of the constituting overlay.}, language = {en} } @misc{LieSullivanTeckentrup2018, author = {Lie, Han Cheng and Sullivan, T. J. and Teckentrup, Aretha}, title = {Random forward models and log-likelihoods in Bayesian inverse problems}, volume = {6}, journal = {SIAM/ASA Journal on Uncertainty Quantification}, number = {4}, issn = {1438-0064}, arxiv = {http://arxiv.org/abs/1712.05717}, doi = {10.1137/18M1166523}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-66324}, pages = {1600 -- 1629}, year = {2018}, abstract = {We consider the use of randomised forward models and log-likelihoods within the Bayesian approach to inverse problems. Such random approximations to the exact forward model or log-likelihood arise naturally when a computationally expensive model is approximated using a cheaper stochastic surrogate, as in Gaussian process emulation (kriging), or in the field of probabilistic numerical methods. We show that the Hellinger distance between the exact and approximate Bayesian posteriors is bounded by moments of the difference between the true and approximate log-likelihoods. Example applications of these stability results are given for randomised misfit models in large data applications and the probabilistic solution of ordinary differential equations.}, language = {en} } @misc{Shinano2020, author = {Shinano, Yuji}, title = {UG - Ubiquity Generator Framework v0.9.1}, doi = {10.12752/8508}, year = {2020}, abstract = {UG is a generic framework to parallelize branch-and-bound based solvers (e.g., MIP, MINLP, ExactIP) in a distributed or shared memory computing environment. It exploits the powerful performance of state-of-the-art "base solvers", such as SCIP, CPLEX, etc. without the need for base solver parallelization. UG framework, ParaSCIP(ug[SCIP,MPI]) and FiberSCIP (ug[SCIP,Pthreads]) are available as a beta version. For MIP solving, ParaSCIP and FiberSCIP are well debugged and should be stable. For MINLP solving, they are relatively stable, but not as thoroughly debugged. This release version should handle branch-and-cut approaches where subproblems are defined by variable bounds and also by constrains for ug[SCIP,*] ParaSCIP and FiberSCIP). Therefore, problem classes other than MIP or MINLP can be handled, but they have not been tested yet. v0.9.1: Update orbitope cip files.}, language = {en} } @misc{RiberaBorrellQuerRichteretal.2021, author = {Ribera Borrell, Enric and Quer, Jannes and Richter, Lorenz and Sch{\"u}tte, Christof}, title = {Improving control based importance sampling strategies for metastable diffusions via adapted metadynamics}, issn = {1438-0064}, year = {2021}, abstract = {Sampling rare events in metastable dynamical systems is often a computationally expensive task and one needs to resort to enhanced sampling methods such as importance sampling. Since we can formulate the problem of finding optimal importance sampling controls as a stochastic optimization problem, this then brings additional numerical challenges and the convergence of corresponding algorithms might as well suffer from metastabilty. In this article we address this issue by combining systematic control approaches with the heuristic adaptive metadynamics method. Crucially, we approximate the importance sampling control by a neural network, which makes the algorithm in principle feasible for high dimensional applications. We can numerically demonstrate in relevant metastable problems that our algorithm is more effective than previous attempts and that only the combination of the two approaches leads to a satisfying convergence and therefore to an efficient sampling in certain metastable settings.}, language = {en} } @misc{Paskin2022, type = {Master Thesis}, author = {Paskin, Martha}, title = {Estimating 3D Shape of the Head Skeleton of Basking Sharks Using Annotated Landmarks on a 2D Image}, year = {2022}, abstract = {Basking sharks are thought to be one of the most efficient filter-feeding fish in terms of the throughput of water filtered through their gills. Details about the underlying morphology of their branchial region have not been studied due to various challenges in acquiring real-world data. The present thesis aims to facilitate this, by developing a mathematical shape model which constructs the 3D structure of the head skeleton of a basking shark using annotated landmarks on a single 2D image. This is an ill-posed problem as estimating the depth of a 3D object from a single 2D view is, in general, not possible. To reduce this ambiguity, we create a set of pre-defined training shapes in 3D from CT scans of basking sharks. First, the damaged structures of the sharks in the scans are corrected via solving a set of optimization problems, before using them as accurate 3D representations of the object. Then, two approaches are employed for the 2D-to-3D shape fitting problem-an Active Shape Model approach and a Kendall's Shape Space approach. The former represents a shape as a point on a high-dimensional Euclidean space, whereas the latter represents a shape as an equivalence class of points in this Euclidean space. Kendall's shape space approach is a novel technique that has not yet been applied in this context, and a comprehensive comparison of the two approaches suggests this approach to be superior for the problem at hand. This can be credited to an improved interpolation of the training shapes.}, language = {en} } @misc{HillerVredeveld2012, author = {Hiller, Benjamin and Vredeveld, Tjark}, title = {Probabilistic alternatives for competitive analysis}, issn = {1438-0064}, doi = {10.1007/s00450-011-0149-1}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-15131}, year = {2012}, abstract = {In the last 20 years competitive analysis has become the main tool for analyzing the quality of online algorithms. Despite of this, competitive analysis has also been criticized: It sometimes cannot discriminate between algorithms that exhibit significantly different empirical behavior, or it even favors an algorithm that is worse from an empirical point of view. Therefore, there have been several approaches to circumvent these drawbacks. In this survey, we discuss probabilistic alternatives for competitive analysis.}, language = {en} } @misc{D'AndreagiovanniKrolikowskiPulaj2013, author = {D'Andreagiovanni, Fabio and Krolikowski, Jonatan and Pulaj, Jonad}, title = {A hybrid primal heuristic for Robust Multiperiod Network Design}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-44081}, year = {2013}, abstract = {We investigate the Robust Multiperiod Network Design Problem, a generalization of the classical Capacitated Network Design Problem that additionally considers multiple design periods and provides solutions protected against traffic uncertainty. Given the intrinsic difficulty of the problem, which proves challenging even for state-of-the art commercial solvers, we propose a hybrid primal heuristic based on the combination of ant colony optimization and an exact large neighborhood search. Computational experiments on a set of realistic instances from the SNDlib show that our heuristic can find solutions of extremely good quality with low optimality gap.}, language = {en} } @misc{D'AndreagiovanniRaymond2013, author = {D'Andreagiovanni, Fabio and Raymond, Annie}, title = {Multiband Robust Optimization and its Adoption in Harvest Scheduling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43380}, year = {2013}, abstract = {A central assumption in classical optimization is that all the input data of a problem are exact. However, in many real-world problems, the input data are subject to uncertainty. In such situations, neglecting uncertainty may lead to nominally optimal solutions that are actually suboptimal or even infeasible. Robust optimization offers a remedy for optimization under uncertainty by considering only the subset of solutions protected against the data deviations. In this paper, we provide an overview of the main theoretical results of multiband robustness, a new robust optimization model that extends and refines the classical theory introduced by Bertsimas and Sim. After introducing some new results for the special case of pure binary programs, we focus on the harvest scheduling problem and show how multiband robustness can be adopted to tackle the uncertainty affecting the volume of produced timber and grant a reduction in the price of robustness.}, language = {en} } @misc{BuesingD'AndreagiovanniRaymond2013, author = {B{\"u}sing, Christina and D'Andreagiovanni, Fabio and Raymond, Annie}, title = {0-1 Multiband Robust Optimization}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-44093}, year = {2013}, abstract = {We provide an overview of new theoretical results that we obtained while further investigating multiband robust optimization, a new model for robust optimization that we recently proposed to tackle uncertainty in mixed-integer linear programming. This new model extends and refines the classical Gamma-robustness model of Bertsimas and Sim and is particularly useful in the common case of arbitrary asymmetric distributions of the uncertainty. Here, we focus on uncertain 0-1 programs and we analyze their robust counterparts when the uncertainty is represented through a multiband set. Our investigations were inspired by the needs of our industrial partners in the research project ROBUKOM.}, language = {en} } @misc{BleyD'AndreagiovanniKarch2013, author = {Bley, Andreas and D'Andreagiovanni, Fabio and Karch, Daniel}, title = {Scheduling technology migration in WDM Networks}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42654}, year = {2013}, abstract = {The rapid technological evolution of telecommunication networks demands service providers to regularly update their technology, with the aim of remaining competitive in the marketplace. However, upgrading the technology in a network is not a trivial task. New hardware components need to be installed in the network and during the installation network connectivity may be temporarily compromised. The Wavelength Division Multiplexing (WDM) technology, whose upgrade is considered in here, shares fiber links among several optical connections and tearing down a single link may disrupt several optical connections at once. When the upgrades involve large parts of a network, typically not all links can be upgraded in parallel, which may lead to an unavoidable longer disruption of some connections. A bad scheduling of the overall endeavor, however, can dramatically increase the disconnection time of parts of the networks, causing extended service disruption. In this contribution, we study the problem of finding a schedule of the fiber link upgrades that minimizes the total service disruption time. To the best of our knowledge, this problem has not yet been formalized and investigated. The aim of our work is to close this gap by presenting a mathematical optimization model for the problem and an innovative solution algorithm that tackles the intrinsic difficulties of the problem. Computational experience on realistic instances completes our study. Our original investigations have been driven by real needs of DFN, operator of the German National Research and Education Network and our partner in the BMBF research project ROBUKOM (http://www.robukom.de/).}, language = {en} } @misc{BuesingD'Andreagiovanni2013, author = {B{\"u}sing, Christina and D'Andreagiovanni, Fabio}, title = {A new theoretical framework for Robust Optimization under multi-band uncertainty}, issn = {1438-0064}, doi = {10.1007/978-3-319-00795-3_17}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42644}, year = {2013}, abstract = {We provide an overview of our main results about studying Linear Programming Problems whose coefficient matrix is subject to uncertainty and the uncertainty is modeled through a multi-band set. Such an uncertainty set generalizes the classical one proposed by Bertsimas and Sim and is particularly suitable in the common case of arbitrary non-symmetric distributions of the parameters. Our investigations were inspired by practical needs of our industrial partner in ongoing projects with focus on the design of robust telecommunications networks.}, language = {en} } @misc{ZakrzewskaD'AndreagiovanniRueppetal.2013, author = {Zakrzewska, Anna and D'Andreagiovanni, Fabio and Ruepp, Sarah and Berger, Michael S.}, title = {Biobjective Optimization of Radio Access Technology Selection and Resource Allocation in Heterogeneous Wireless Networks}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42675}, year = {2013}, abstract = {We propose a novel optimization model for resource assignment in heterogeneous wireless network. The model adopts two objective functions maximizing the number of served users and the minimum granted utility at once. A distinctive feature of our new model is to consider two consecutive time slots, in order to include handover as an additional decision dimension. Furthermore, the solution algorithm that we propose refines a heuristic solution approach recently proposed in literature, by considering a real joint optimization of the considered resources. The simulation study shows that the new model leads to a significant reduction in handover frequency, when compared to a traditional scheme based on maximum SNR.}, language = {en} } @misc{BauschertBuesingD'Andreagiovannietal.2013, author = {Bauschert, Thomas and B{\"u}sing, Christina and D'Andreagiovanni, Fabio and Koster, Arie M.C.A. and Kutschka, Manuel and Steglich, Uwe}, title = {Network Planning under Demand Uncertainty with Robust Optimization}, issn = {1438-0064}, doi = {10.1109/MCOM.2014.6736760}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42557}, year = {2013}, abstract = {The planning of a communication network is inevitably depending on the quality of both the planning tool and the demand forecast used. In this article, we show exemplarily how the emerging area of Robust Optimization can advance the network planning by a more accurate mathematical description of the demand uncertainty. After a general introduction of the concept and its application to a basic network design problem, we present two applications: multi-layer and mixed-line-rate network design. We conclude with a discussion of extensions of the robustness concept to increase the accuracy of handling uncertainties.}, language = {en} } @masterthesis{Witzig2013, type = {Bachelor Thesis}, author = {Witzig, Jakob}, title = {Effiziente Reoptimierung in Branch\&Bound-Verfahren f{\"u}r die Steuerung von Aufz{\"u}gen}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42210}, school = {Zuse Institute Berlin (ZIB)}, pages = {111}, year = {2013}, abstract = {Heutzutage ist eine Vielzahl der mehrst{\"o}ckigen Geb{\"a}ude mit Personenaufzugsgruppen ausgestattet. Uns wohl bekannt sind die sogenannten konventionellen Systeme. Bei diesen Systemen bet{\"a}tigt jeder ankommende Passagier eine der beiden Richtungstasten und teilt dem dahinterstehenden Steuerungsalgorithmus seine gew{\"u}nschte Startetage und Fahrtrichtung mit. Betreten wird der zuerst auf der Startetage ankommende Aufzug mit gleicher Fahrtrichtung und ausreichend Kapazit{\"a}t. Die entsprechende Zieletage wird dem System erst nach dem Betreten der Fahrgastkabine mitgeteilt. Neben diesen konventionellen Systemen gibt es Aufzugsgruppen mit Zielrufsteuerung. Die Besonderheit eines zielrufgesteuerten Systems ist, dass ein ankommender Passagier bereits auf der Startetage seine gew{\"u}nschte Zieletage angibt und eine R{\"u}ckmeldung vom System erh{\"a}lt, welchen Aufzug er nutzen soll. Diese Zuweisung durch das System hat das Ziel, die Warte- und Reisezeiten der Passagiere zu minimieren. Ein wesentlicher Faktor bei der Berechnung warte- und reisezeitminimaler Fahrpl{\"a}ne ist das momentane Verkehrsmuster. Eine Einteilung der Verkehrsszenarien l{\"a}sst sich am besten bei B{\"u}rogeb{\"a}uden vornehmen. So ist es typisch f{\"u}r die Morgenstunden, dass jeder Passagier auf einer Zugangsebene seine Fahrt beginnt und alle Passagiere die gleiche Fahrtrichtung haben. Unter einer Zugangsebene ist z. B. der Haupteingang oder ein Parkdeck zu verstehen. Ein weiterer wesentlicher Punkt bei Zielrufsystemen ist die Art der Zuweisung der Passagiere durch das System. Zum einen gibt es unmittelbar zuweisende (UZ-) Systeme. In einem UZ-System wird nach jeder Ankunft eines Passagiers eine Momentaufnahme des momentanen Verkehrs erstellt und es findet eine Neuplanung und Zuweisung statt. Eine solche Momentaufnahme werden wir im sp{\"a}teren Verkauf als Schnappschussproblem bezeichnen. Jeder Passagier bekommt im Anschluss an die L{\"o}sung des Schnappschussproblems eine Mitteilung vom System, z. B. {\"u}ber ein Display, welchen Aufzug er benutzen soll. Zum anderen gibt es verz{\"o}gert zuweisende (VZ-) Systeme. In diesen Systemen wird die Erstellung und L{\"o}sung eines Schnappschussproblems bis kurz vor Ankunft eines Aufzuges auf einer Etage verz{\"o}gert. In einem VZ-System teilt das System allen wartenden Passagieren die geplanten Zieletagen des ankommenden Aufzugs mit. Jeder Passagier, der einen Ruf get{\"a}tigt hat und zu einer dieser Zieletagen fahren will, kann jetzt diesen Aufzug betreten. Durch die Verz{\"o}gerung muss im Vergleich zu einem UZ-System eine weitaus gr{\"o}ßere Menge von Passagieren zugewiesen werden. Dadurch kann der L{\"o}sungsprozess bedeutend aufw{\"a}ndiger werden. Vorteil eines VZ-Systems ist hingegen der gr{\"o}ßere Freiheitsgrad bei der Optimierung, da aufgrund der sp{\"a}ten Zuweisung die weitere Verkehrsentwicklung mit einbezogen werden kann. VZ-Systeme sind aufgrund des gr{\"o}ßeren Freiheitsgrades interessant f{\"u}r die Praxis ist, wir uns demzufolge in dieser Arbeit mit einer effizienteren L{\"o}sung dieser Art von Schnappschussproblemen befassen. Es gen{\"u}gt dabei den L{\"o}sungsprozess eines Schnappschussproblems zu betrachten. Das Ziel ist eine Reduzierung der ben{\"o}tigten Rechenzeit. Unter Reoptimierung verstehen wir die Konstruktion zul{\"a}ssiger Spalten in den jeweiligen Iterationsrunden der Spaltengenerierung innerhalb eines Schnappschussproblems. Als eine Iterationsrunde bezeichnet wir einer Menge zul{\"a}ssiger Touren mit negativen reduzierten Kosten. Eine effiziente Reoptimierung zeichnet sich durch die Wiederverwendung und Aufbereitung von Informationen aus vorangegangenen Iterationsrunden desselben Schnappschussproblems aus. Zu den wichtigen Informationen geh{\"o}rt der konstruierte Suchbaum der vorherigen Iterationsrunde mit seinen ausgeloteten (abgeschnittenen) Bl{\"a}ttern sowie konstruierten Touren bzw. Spalten, welche in der Iterationsrunde ihrer Konstruktion nicht zur L{\"o}sung des Teilproblems der Spaltengenerierung beitrugen. Eine solche Wiederverwendung und Aufbereitung von Informationen nennen wir Warmstart.}, language = {de} } @misc{ShinanoHeinzVigerskeetal.2013, author = {Shinano, Yuji and Heinz, Stefan and Vigerske, Stefan and Winkler, Michael}, title = {FiberSCIP - A shared memory parallelization of SCIP}, issn = {1438-0064}, doi = {10.1287/ijoc.2017.0762}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42595}, year = {2013}, abstract = {Recently, parallel computing environments have become significantly popular. In order to obtain the benefit of using parallel computing environments, we have to deploy our programs for these effectively. This paper focuses on a parallelization of SCIP (Solving Constraint Integer Programs), which is a MIP solver and constraint integer programming framework available in source code. There is a parallel extension of SCIP named ParaSCIP, which parallelizes SCIP on massively parallel distributed memory computing environments. This paper describes FiberSCIP, which is yet another parallel extension of SCIP to utilize multi-threaded parallel computation on shared memory computing environments, and has the following contributions: First, the basic concept of having two parallel extensions and the relationship between them and the parallelization framework provided by UG (Ubiquity Generator) is presented, including an implementation of deterministic parallelization. Second, the difficulties to achieve a good performance that utilizes all resources on an actual computing environment and the difficulties of performance evaluation of the parallel solvers are discussed. Third, a way to evaluate the performance of new algorithms and parameter settings of the parallel extensions is presented. Finally, current performance of FiberSCIP for solving mixed-integer linear programs (MIPs) and mixed-integer non-linear programs (MINLPs) in parallel is demonstrated.}, language = {en} } @misc{MasingLindnerEbert2023, author = {Masing, Berenike and Lindner, Niels and Ebert, Patricia}, title = {Forward and Line-Based Cycle Bases for Periodic Timetabling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-89731}, year = {2023}, abstract = {The optimization of periodic timetables is an indispensable planning task in public transport. Although the periodic event scheduling problem (PESP) provides an elegant mathematical formulation of the periodic timetabling problem that led to many insights for primal heuristics, it is notoriously hard to solve to optimality. One reason is that for the standard mixed-integer linear programming formulations, linear programming relaxations are weak and the integer variables are of pure technical nature and in general do not correlate with the objective value. While the first problem has been addressed by developing several families of cutting planes, we focus on the second aspect. We discuss integral forward cycle bases as a concept to compute improved dual bounds for PESP instances. To this end, we develop the theory of forward cycle bases on general digraphs. Specifically for the application of timetabling, we devise a generic procedure to construct line-based event-activity networks, and give a simple recipe for an integral forward cycle basis on such networks. Finally, we analyze the 16 railway instances of the benchmark library PESPlib, match them to the line-based structure and use forward cycle bases to compute better dual bounds for 14 out of the 16 instances.}, language = {en} } @misc{Witzig2014, type = {Master Thesis}, author = {Witzig, Jakob}, title = {Reoptimization Techniques in MIP Solvers}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-54067}, pages = {176}, year = {2014}, abstract = {Many optimization problems can be modeled as Mixed Integer Programs (MIPs). In general, MIPs cannot be solved efficiently, since solving MIPs is NP-hard, see, e.g., Schrijver, 2003. Common methods for solving NP-hard problems are branch-and-bound and column generation. In the case of column generation, the original problem becomes decomposed or re-formulated into one ore more smaller subproblems, which are easier to solve. Each of these subproblems is solved separately and recurrently, which can be interpreted as solving a sequence of optimization problems. In this thesis, we consider a sequence of MIPs which only differ in the respective objective functions. Furthermore, we assume each of these MIPs get solved with a branch-and-bound algorithm. This thesis aims to figure out whether the solving process of a given sequence of MIPs can be accelerated by reoptimization. As reoptimization we understand starting the solving process of a MIP of this sequence at a given frontier of a search tree corresponding to another MIP of this sequence. At the beginning we introduce an LP-based branch-and-bound algorithm. This algorithm is inspired by the reoptimizing algorithm of Hiller, Klug, and the author of this thesis, 2013. Since most of the state-of-the-art MIP solvers come to decisions based on dual information, which leads to the loss of feasible solutions after changing the objective function, we present a technique to guarantee optimality despite using these information. A decision is based on a dual information if this decision is valid for at least one feasible solution, whereas a decision is based on a primal information if this decision is valid for all feasible solutions. Afterwards, we consider representing the search frontier of the tree by a set of nodes of a given size. We call this the Tree Compression Problem. Moreover, we present a criterion characterizing the similarity of two objective functions. To evaluate our approach of reoptimization we extend the well-known and well-maintained MIP solver SCIP to an LP-based branch-and-bound framework, introduce two heuristics for solving the Tree Compression Problem, and a primal heuristic which is especially fitted to column generation. Finally, we present computational experiments on several problem classes, e.g., the Vertex Coloring and k-Constrained Shortest Path. Our experiments show, that a straightforward reoptimization, i.e., without additional heuristics, provides no benefit in general. However, in combination with the techniques and methods presented in this thesis, we can accelerate the solving of a given sequence up to the factor 14. For this purpose it is essential to take the differences of the objective functions into account and to restart the reoptimization, i.e., solve the subproblem from scratch, if the objective functions are not similar enough. Finally, we discuss the possibility to parallelize the solving process of the search frontier at the beginning of each solving process.}, language = {en} } @misc{Shinano2017, author = {Shinano, Yuji}, title = {The Ubiquity Generator Framework: 7 Years of Progress in Parallelizing Branch-and-Bound}, issn = {1438-0064}, doi = {10.1007/978-3-319-89920-6_20}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65545}, year = {2017}, abstract = {Mixed integer linear programming (MIP) is a general form to model combinatorial optimization problems and has many industrial applications. The performance of MIP solvers has improved tremendously in the last two decades and these solvers have been used to solve many real-word problems. However, against the backdrop of modern computer technology, parallelization is of pivotal importance. In this way, ParaSCIP is the most successful parallel MIP solver in terms of solving previously unsolvable instances from the well-known benchmark instance set MIPLIB by using supercomputers. It solved two instances from MIPLIB2003 and 12 from MIPLIB2010 for the first time to optimality by using up to 80,000 cores on supercomputers. ParaSCIP has been developed by using the Ubiquity Generator (UG) framework, which is a general software package to parallelize any state-of-the-art branch-and-bound based solver. This paper discusses 7 years of progress in parallelizing branch-and-bound solvers with UG.}, language = {en} }