@misc{BertholdHendelKoch, author = {Berthold, Timo and Hendel, Gregor and Koch, Thorsten}, title = {The Three Phases of MIP Solving}, issn = {1438-0064}, doi = {10.1080/10556788.2017.1392519}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61607}, abstract = {Modern MIP solvers employ dozens of auxiliary algorithmic components to support the branch-and-bound search in finding and improving primal solutions and in strengthening the dual bound. Typically, all components are tuned to minimize the average running time to prove optimality. In this article, we take a different look at the run of a MIP solver. We argue that the solution process consists of three different phases, namely achieving feasibility, improving the incumbent solution, and proving optimality. We first show that the entire solving process can be improved by adapting the search strategy with respect to the phase-specific aims using different control tunings. Afterwards, we provide criteria to predict the transition between the individual phases and evaluate the performance impact of altering the algorithmic behavior of the MIP solver SCIP at the predicted phase transition points.}, language = {en} } @misc{HendelMiltenbergerWitzig, author = {Hendel, Gregor and Miltenberger, Matthias and Witzig, Jakob}, title = {Adaptive Algorithmic Behavior for Solving Mixed Integer Programs Using Bandit Algorithms}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69563}, abstract = {State-of-the-art solvers for mixed integer programs (MIP) govern a variety of algorithmic components. Ideally, the solver adaptively learns to concentrate its computational budget on those components that perform well on a particular problem, especially if they are time consuming. We focus on three such algorithms, namely the classes of large neighborhood search and diving heuristics as well as Simplex pricing strategies. For each class we propose a selection strategy that is updated based on the observed runtime behavior, aiming to ultimately select only the best algorithms for a given instance. We review several common strategies for such a selection scenario under uncertainty, also known as Multi Armed Bandit Problem. In order to apply those bandit strategies, we carefully design reward functions to rank and compare each individual heuristic or pricing algorithm within its respective class. Finally, we discuss the computational benefits of using the proposed adaptive selection within the \scip Optimization Suite on publicly available MIP instances.}, language = {en} }