@misc{ClasenPaarProhaska2011, author = {Clasen, Malte and Paar, Philip and Prohaska, Steffen}, title = {Level of Detail for Trees Using Clustered Ellipsoids}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14251}, number = {11-41}, year = {2011}, abstract = {We present a level of detail method for trees based on ellipsoids and lines. We leverage the Expectation Maximization algorithm with a Gaussian Mixture Model to create a hierarchy of high-quality leaf clusterings, while the branches are simplified using agglomerative bottom-up clustering to preserve the connectivity. The simplification runs in a preprocessing step and requires no human interaction. For a fly by over and through a scene of 10k trees, our method renders on average at 40 ms/frame, up to 6 times faster than billboard clouds with comparable artifacts.}, language = {en} } @misc{WiebelVosHege2011, author = {Wiebel, Alexander and Vos, Frans M. and Hege, Hans-Christian}, title = {Perception-Oriented Picking of Structures in Direct Volumetric Renderings}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14343}, number = {11-45}, year = {2011}, abstract = {Radiologists from all application areas are trained to read slice-based visualizations of 3D medical image data. Despite the numerous examples of sophisticated three-dimensional renderings, especially all variants of direct volume rendering, such methods are often considered not very useful by radiologists who prefer slice-based visualization. Just recently there have been attempts to bridge this gap between 2D and 3D renderings. These attempts include specialized techniques for volume picking that result in repositioning slices. In this paper, we present a new volume picking technique that, in contrast to previous work, does not require pre-segmented data or metadata. The positions picked by our method are solely based on the data itself, the transfer function and, most importantly, on the way the volumetric rendering is perceived by viewers. To demonstrate the usefulness of the proposed method we apply it for automatically repositioning slices in an abdominal MRI scan, a data set from a flow simulation and a number of other volumetric scalar fields. Furthermore we discuss how the method can be implemented in combination with various different volumetric rendering techniques.}, language = {en} } @misc{EhlkeRammLameckeretal.2013, author = {Ehlke, Moritz and Ramm, Heiko and Lamecker, Hans and Hege, Hans-Christian and Zachow, Stefan}, title = {Fast Generation of Virtual X-ray Images from Deformable Tetrahedral Meshes}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-41896}, year = {2013}, abstract = {We propose a novel GPU-based approach to render virtual X-ray projections of deformable tetrahedral meshes. These meshes represent the shape and the internal density distribution of a particular anatomical structure and are derived from statistical shape and intensity models (SSIMs). We apply our method to improve the geometric reconstruction of 3D anatomy (e.g.\ pelvic bone) from 2D X-ray images. For that purpose, shape and density of a tetrahedral mesh are varied and virtual X-ray projections are generated within an optimization process until the similarity between the computed virtual X-ray and the respective anatomy depicted in a given clinical X-ray is maximized. The OpenGL implementation presented in this work deforms and projects tetrahedral meshes of high resolution (200.000+ tetrahedra) at interactive rates. It generates virtual X-rays that accurately depict the density distribution of an anatomy of interest. Compared to existing methods that accumulate X-ray attenuation in deformable meshes, our novel approach significantly boosts the deformation/projection performance. The proposed projection algorithm scales better with respect to mesh resolution and complexity of the density distribution, and the combined deformation and projection on the GPU scales better with respect to the number of deformation parameters. The gain in performance allows for a larger number of cycles in the optimization process. Consequently, it reduces the risk of being stuck in a local optimum. We believe that our approach contributes in orthopedic surgery, where 3D anatomy information needs to be extracted from 2D X-rays to support surgeons in better planning joint replacements.}, language = {en} } @misc{WendeSteinke2013, author = {Wende, Florian and Steinke, Thomas}, title = {Swendsen-Wang Multi-Cluster Algorithm for the 2D/3D Ising Model on Xeon Phi and GPU}, issn = {1438-0064}, doi = {10.1145/2503210.2503254}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42187}, year = {2013}, abstract = {Simulations of the critical Ising model by means of local update algorithms suffer from critical slowing down. One way to partially compensate for the influence of this phenomenon on the runtime of simulations is using increasingly faster and parallel computer hardware. Another approach is using algorithms that do not suffer from critical slowing down, such as cluster algorithms. This paper reports on the Swendsen-Wang multi-cluster algorithm on Intel Xeon Phi coprocessor 5110P, Nvidia Tesla M2090 GPU, and x86 multi-core CPU. We present shared memory versions of the said algorithm for the simulation of the two- and three-dimensional Ising model. We use a combination of local cluster search and global label reduction by means of atomic hardware primitives. Further, we describe an MPI version of the algorithm on Xeon Phi and CPU, respectively. Significant performance improvements over known im plementations of the Swendsen-Wang algorithm are demonstrated.}, language = {en} } @misc{HaslerPetersKottig2013, author = {Hasler, Tim and Peters-Kottig, Wolfgang}, title = {Vorschrift oder Thunfisch? - Zur Langzeitverf{\"u}gbarkeit von Forschungsdaten}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43010}, year = {2013}, abstract = {„Ich mache ihm ein Angebot, das er nicht ablehnen kann." Diese Aussage aus einem g{\"a}nzlich anderen Kontext l{\"a}sst sich recht treffend {\"u}bertragen als Wunsch von Dienstleistern und Zweck von Dienstleistungen f{\"u}r Datenproduzenten im Forschungsdatenmanagement. Zwar wirkt Druck zur Daten{\"u}bergabe nicht f{\"o}rderlich, die Er{\"o}ffnung einer Option aber sehr wohl. Im vorliegenden Artikel geht es um das Verst{\"a}ndnis der Nachhaltigkeit von Forschung und ihren Daten anhand der Erkenntnisse und Erfahrungen aus der ersten Phase des DFG-Projekts EWIG. [Fn 01] Eine Auswahl von Fallstricken beim Forschungsdatenmanagement wird anhand der Erkenntnisse aus Expertengespr{\"a}chen und eigenen Erfahrungen beim Aufbau von LZA-Workflows vorgestellt. Erste Konzepte in EWIG zur Daten{\"u}bertragung aus unterschiedlich strukturierten Datenquellen in die „Langfristige Dom{\"a}ne" werden beschrieben.}, language = {de} } @misc{DercksenHegeOberlaender2013, author = {Dercksen, Vincent J. and Hege, Hans-Christian and Oberlaender, Marcel}, title = {The Filament Editor: An Interactive Software Environment for Visualization, Proof-Editing and Analysis of 3D Neuron Morphology}, issn = {1438-0064}, doi = {10.1007/s12021-013-9213-2}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43157}, year = {2013}, abstract = {Neuroanatomical analysis, such as classification of cell types, depends on reliable reconstruction of large numbers of complete 3D dendrite and axon morphologies. At present, the majority of neuron reconstructions are obtained from preparations in a single tissue slice in vitro, thus suffering from cut off dendrites and, more dramatically, cut off axons. In general, axons can innervate volumes of several cubic millimeters and may reach path lengths of tens of centimeters. Thus, their complete reconstruction requires in vivo labeling, histological sectioning and imaging of large fields of view. Unfortunately, anisotropic background conditions across such large tissue volumes, as well as faintly labeled thin neurites, result in incomplete or erroneous automated tracings and even lead experts to make annotation errors during manual reconstructions. Consequently, tracing reliability renders the major bottleneck for reconstructing complete 3D neuron morphologies. Here, we present a novel set of tools, integrated into a software environment named 'Filament Editor', for creating reliable neuron tracings from sparsely labeled in vivo datasets. The Filament Editor allows for simultaneous visualization of complex neuronal tracings and image data in a 3D viewer, proof-editing of neuronal tracings, alignment and interconnection across sections, and morphometric analysis in relation to 3D anatomical reference structures. We illustrate the functionality of the Filament Editor on the example of in vivo labeled axons and demonstrate that for the exemplary dataset the final tracing results after proof-editing are independent of the expertise of the human operator.}, language = {en} } @misc{QuerDonatiKelleretal.2017, author = {Quer, Jannes and Donati, Luca and Keller, Bettina and Weber, Marcus}, title = {An automatic adaptive importance sampling algorithm for molecular dynamics in reaction coordinates}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-62075}, year = {2017}, abstract = {In this article we propose an adaptive importance sampling scheme for dynamical quantities of high dimensional complex systems which are metastable. The main idea of this article is to combine a method coming from Molecular Dynamics Simulation, Metadynamics, with a theorem from stochastic analysis, Girsanov's theorem. The proposed algorithm has two advantages compared to a standard estimator of dynamic quantities: firstly, it is possible to produce estimators with a lower variance and, secondly, we can speed up the sampling. One of the main problems for building importance sampling schemes for metastable systems is to find the metastable region in order to manipulate the potential accordingly. Our method circumvents this problem by using an assimilated version of the Metadynamics algorithm and thus creates a non-equilibrium dynamics which is used to sample the equilibrium quantities.}, language = {en} } @misc{BaumLindowHegeetal.2017, author = {Baum, Daniel and Lindow, Norbert and Hege, Hans-Christian and Lepper, Verena and Siopi, Tzulia and Kutz, Frank and Mahlow, Kristin and Mahnke, Heinz-Eberhard}, title = {Revealing hidden text in rolled and folded papyri}, issn = {1438-0064}, doi = {10.1007/s00339-017-0808-6}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61826}, year = {2017}, abstract = {Ancient Egyptian papyri are often folded, rolled up or kept as small packages, sometimes even sealed. Physically unrolling or unfolding these packages might severely damage them. We demonstrate a way to get access to the hidden script without physical unfolding by employing computed tomography and mathematical algorithms for virtual unrolling and unfolding. Our algorithmic approaches are combined with manual interaction. This provides the necessary flexibility to enable the unfolding of even complicated and partly damaged papyrus packages. In addition, it allows us to cope with challenges posed by the structure of ancient papyrus, which is rather irregular, compared to other writing substrates like metallic foils or parchment. Unfolding of packages is done in two stages. In the first stage, we virtually invert the physical folding process step by step until the partially unfolded package is topologically equivalent to a scroll or a papyrus sheet folded only along one fold line. To minimize distortions at this stage, we apply the method of moving least squares. In the second stage, the papyrus is simply flattened, which requires the definition of a medial surface. We have applied our software framework to several papyri. In this work, we present the results of applying our approaches to mockup papyri that were either rolled or folded along perpendicular fold lines. In the case of the folded papyrus, our approach represents the first attempt to address the unfolding of such complicated folds.}, language = {en} } @misc{CostaMantonOstrovskyetal.2016, author = {Costa, Marta and Manton, James D. and Ostrovsky, Aaron D. and Prohaska, Steffen and Jefferis, Gregory S.X.E.}, title = {NBLAST: Rapid, sensitive comparison of neuronal structure and construction of neuron family databases}, issn = {1438-0064}, doi = {10.1016/j.neuron.2016.06.012}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59672}, year = {2016}, abstract = {Neural circuit mapping is generating datasets of 10,000s of labeled neurons. New computational tools are needed to search and organize these data. We present NBLAST, a sensitive and rapid algorithm, for measuring pairwise neuronal similarity. NBLAST considers both position and local geometry, decomposing neurons into short segments; matched segments are scored using a probabilistic scoring matrix defined by statistics of matches and non-matches. We validated NBLAST on a published dataset of 16,129 single Drosophila neurons. NBLAST can distinguish neuronal types down to the finest level (single identified neurons) without a priori information. Cluster analysis of extensively studied neuronal classes identified new types and unreported topographical features. Fully automated clustering organized the validation dataset into 1052 clusters, many of which map onto previously described neuronal types. NBLAST supports additional query types including searching neurons against transgene expression patterns. Finally we show that NBLAST is effective with data from other invertebrates and zebrafish.}, language = {en} } @misc{OezelKulkarniHasanetal.2019, author = {{\"O}zel, M. Neset and Kulkarni, Abhishek and Hasan, Amr and Brummer, Josephine and Moldenhauer, Marian and Daumann, Ilsa-Maria and Wolfenberg, Heike and Dercksen, Vincent J. and Kiral, F. Ridvan and Weiser, Martin and Prohaska, Steffen and von Kleist, Max and Hiesinger, Peter Robin}, title = {Serial synapse formation through filopodial competition for synaptic seeding factors}, issn = {1438-0064}, doi = {10.1016/j.devcel.2019.06.014}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74397}, year = {2019}, abstract = {Following axon pathfinding, growth cones transition from stochastic filopodial exploration to the formation of a limited number of synapses. How the interplay of filopodia and synapse assembly ensures robust connectivity in the brain has remained a challenging problem. Here, we developed a new 4D analysis method for filopodial dynamics and a data-driven computational model of synapse formation for R7 photoreceptor axons in developing Drosophila brains. Our live data support a 'serial synapse formation' model, where at any time point only a single 'synaptogenic' filopodium suppresses the synaptic competence of other filopodia through competition for synaptic seeding factors. Loss of the synaptic seeding factors Syd-1 and Liprin-α leads to a loss of this suppression, filopodial destabilization and reduced synapse formation, which is sufficient to cause the destabilization of entire axon terminals. Our model provides a filopodial 'winner-takes-all' mechanism that ensures the formation of an appropriate number of synapses.}, language = {en} } @misc{TateiwaShinanoYasudaetal.2021, author = {Tateiwa, Nariaki and Shinano, Yuji and Yasuda, Masaya and Kaji, Shizuo and Yamamura, Keiichiro and Fujisawa, Katsuki}, title = {Massively parallel sharing lattice basis reduction}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-85209}, year = {2021}, abstract = {For cryptanalysis in lattice-based schemes, the performance evaluation of lattice basis reduction using high-performance computers is becoming increasingly important for the determination of the security level. We propose a distributed and asynchronous parallel reduction algorithm based on randomization and DeepBKZ, which is an improved variant of the block Korkine-Zolotarev (BKZ) reduction algorithm. Randomized copies of a lattice basis are distributed to up to 103,680 cores and independently reduced in parallel, while some basis vectors are shared asynchronously among all processes via MPI. There is a trade-off between randomization and information sharing; if a substantial amount of information is shared, all processes will work on the same problem, thereby diminishing the benefit of parallelization. To monitor this balance between randomness and sharing, we propose a metric to quantify the variety of lattice bases. We empirically find an optimal parameter of sharing for high-dimensional lattices. We demonstrate the efficacy of our proposed parallel algorithm and implementation with respect to both performance and scalability through our experiments.}, language = {en} } @misc{HosodaMaherShinanoetal.2023, author = {Hosoda, Junko and Maher, Stephen J. and Shinano, Yuji and Villumsen, Jonas Christoffer}, title = {A parallel branch-and-bound heuristic for the integrated long-haul and local vehicle routing problem on an adaptive transportation network}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-89700}, year = {2023}, abstract = {Consolidation of commodities and coordination of vehicle routes are fundamental features of supply chain management problems. While locations for consolidation and coordination are typically known a priori, in adaptive transportation networks this is not the case. The identification of such consolidation locations forms part of the decision making process. Supply chain management problems integrating the designation of consolidation locations with the coordination of long haul and local vehicle routing is not only challenging to solve, but also very difficult to formulate mathematically. In this paper, the first mathematical model integrating location clustering with long haul and local vehicle routing is proposed. This mathematical formulation is used to develop algorithms to find high quality solutions. A novel parallel framework is developed that combines exact and heuristic methods to improve the search for high quality solutions and provide valid bounds. The results demonstrate that using exact methods to guide heuristic search is an effective approach to find high quality solutions for difficult supply chain management problems.}, language = {en} } @misc{FujiiItoKimetal.2021, author = {Fujii, Koichi and Ito, Naoki and Kim, Sunyoung and Kojima, Masakazu and Shinano, Yuji and Toh, Kim-Chuan}, title = {Solving Challenging Large Scale QAPs}, issn = {1438-0064}, doi = {10.12752/8130}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-81303}, year = {2021}, abstract = {We report our progress on the project for solving larger scale quadratic assignment problems (QAPs). Our main approach to solve large scale NP-hard combinatorial optimization problems such as QAPs is a parallel branch-and-bound method efficiently implemented on a powerful computer system using the Ubiquity Generator(UG) framework that can utilize more than 100,000 cores. Lower bounding procedures incorporated in the branch-and-bound method play a crucial role in solving the problems. For a strong lower bounding procedure, we employ the Lagrangian doubly nonnegative (DNN) relaxation and the Newton-bracketing method developed by the authors' group. In this report, we describe some basic tools used in the project including the lower bounding procedure and branching rules, and present some preliminary numerical results. Our next target problem is QAPs with dimension at least 50, as we have succeeded to solve tai30a and sko42 from QAPLIB for the first time.}, language = {en} } @misc{FujiiKimKojimaetal.2023, author = {Fujii, Koichi and Kim, Sunyoung and Kojima, Masakazu and Mittelmann, Hans D. and Shinano, Yuji}, title = {An Exceptionally Difficult Binary Quadratic Optimization Problem with Symmetry: a Challenge for The Largest Unsolved QAP Instance Tai256c}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-93072}, year = {2023}, abstract = {Tai256c is the largest unsolved quadratic assignment problem (QAP) instance in QAPLIB. It is known that QAP tai256c can be converted into a 256 dimensional binary quadratic optimization problem (BQOP) with a single cardinality constraint which requires the sum of the binary variables to be 92. As the BQOP is much simpler than the original QAP, the conversion increases the possibility to solve the QAP. Solving exactly the BQOP, however, is still very difficult. Indeed, a 1.48\% gap remains between the best known upper bound (UB) and lower bound (LB) of the unknown optimal value. This paper shows that the BQOP admits a nontrivial symmetry, a property that makes the BQOP very hard to solve. The symmetry induces equivalent subproblems in branch and bound (BB) methods. To effectively improve the LB, we propose an efficient BB method that incorporates a doubly nonnegative relaxation, the standard orbit branching and a technique to prune equivalent subproblems. With this BB method, a new LB with 1.25\% gap is successfully obtained, and computing an LB with 1.0\% gap is shown to be still quite difficult.}, language = {en} } @misc{FujiiItoKimetal.2022, author = {Fujii, Koichi and Ito, Naoki and Kim, Sunyoung and Kojima, Masakazu and Shinano, Yuji and Toh, Kim-Chuan}, title = {大規模二次割当問題への挑戦}, journal = {統計数理研究所共同研究リポート 453 最適化:モデリングとアルゴリズム33 2022年3月 「大規模二次割当問題への挑戦」 p.84-p.92}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-86779}, year = {2022}, abstract = {二次割当問題は線形緩和が弱いことが知られ,強化のため多様な緩和手法が考案されているが,その一つである二重非負値計画緩和( DNN 緩和)及びその解法として近年研究が進んでいるニュートン・ブラケット法を紹介し,それらに基づく分枝限定法の実装及び数値実験結果について報告する.}, language = {ja} } @misc{Shinano2021, author = {Shinano, Yuji}, title = {UG - Ubiquity Generator Framework v1.0.0beta}, doi = {10.12752/8521}, year = {2021}, abstract = {UG is a generic framework to parallelize branch-and-bound based solvers (e.g., MIP, MINLP, ExactIP) in a distributed or shared memory computing environment. It exploits the powerful performance of state-of-the-art "base solvers", such as SCIP, CPLEX, etc. without the need for base solver parallelization. UG framework, ParaSCIP(ug[SCIP,MPI]) and FiberSCIP (ug[SCIP,Pthreads]) are available as a beta version. v1.0.0: new documentation and cmake, generalization of ug framework, implementation of selfsplitrampup for fiber- and parascip, better memory and time limit handling.}, language = {en} } @misc{TateiwaShinanoYamamuraetal.2021, author = {Tateiwa, Nariaki and Shinano, Yuji and Yamamura, Keiichiro and Yoshida, Akihiro and Kaji, Shizuo and Yasuda, Masaya and Fujisawa, Katsuki}, title = {CMAP-LAP: Configurable Massively Parallel Solver for Lattice Problems}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-82802}, year = {2021}, abstract = {Lattice problems are a class of optimization problems that are notably hard. There are no classical or quantum algorithms known to solve these problems efficiently. Their hardness has made lattices a major cryptographic primitive for post-quantum cryptography. Several different approaches have been used for lattice problems with different computational profiles; some suffer from super-exponential time, and others require exponential space. This motivated us to develop a novel lattice problem solver, CMAP-LAP, based on the clever coordination of different algorithms that run massively in parallel. With our flexible framework, heterogeneous modules run asynchronously in parallel on a large-scale distributed system while exchanging information, which drastically boosts the overall performance. We also implement full checkpoint-and-restart functionality, which is vital to high-dimensional lattice problems. Through numerical experiments with up to 103,680 cores, we evaluated the performance and stability of our system and demonstrated its high capability for future massive-scale experiments.}, language = {en} } @misc{ShinanoAchterbergBertholdetal.2015, author = {Shinano, Yuji and Achterberg, Tobias and Berthold, Timo and Heinz, Stefan and Koch, Thorsten and Winkler, Michael}, title = {Solving Open MIP Instances with ParaSCIP on Supercomputers using up to 80,000 Cores}, issn = {1438-0064}, doi = {10.1109/IPDPS.2016.56}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-56404}, year = {2015}, abstract = {This paper describes how we solved 12 previously unsolved mixed-integer program- ming (MIP) instances from the MIPLIB benchmark sets. To achieve these results we used an enhanced version of ParaSCIP, setting a new record for the largest scale MIP computation: up to 80,000 cores in parallel on the Titan supercomputer. In this paper we describe the basic parallelization mechanism of ParaSCIP, improvements of the dynamic load balancing and novel techniques to exploit the power of parallelization for MIP solving. We give a detailed overview of computing times and statistics for solving open MIPLIB instances.}, language = {en} } @misc{MunguiaOxberryRajanetal.2017, author = {Munguia, Lluis-Miquel and Oxberry, Geoffrey and Rajan, Deepak and Shinano, Yuji}, title = {Parallel PIPS-SBB: Multi-Level Parallelism For Stochastic Mixed-Integer Programs}, number = {ZIB-Report 17-58}, issn = {1438-0064}, doi = {10.1007/s10589-019-00074-0}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65517}, year = {2017}, abstract = {PIPS-SBB is a distributed-memory parallel solver with a scalable data distribution paradigm. It is designed to solve MIPs with a dual-block angular structure, which is characteristic of deterministic-equivalent Stochastic Mixed-Integer Programs (SMIPs). In this paper, we present two different parallelizations of Branch \& Bound (B\&B), implementing both as extensions of PIPS-SBB, thus adding an additional layer of parallelism. In the first of the proposed frameworks, PIPS-PSBB, the coordination and load-balancing of the different optimization workers is done in a decentralized fashion. This new framework is designed to ensure all available cores are processing the most promising parts of the B\&B tree. The second, ug[PIPS-SBB,MPI], is a parallel implementation using the Ubiquity Generator (UG), a universal framework for parallelizing B\&B tree search that has been successfully applied to other MIP solvers. We show the effects of leveraging multiple levels of parallelism in potentially improving scaling performance beyond thousands of cores.}, language = {en} } @misc{ShinanoRehfeldtKoch2018, author = {Shinano, Yuji and Rehfeldt, Daniel and Koch, Thorsten}, title = {Building Optimal Steiner Trees on Supercomputers by using up to 43,000 Cores}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-71118}, year = {2018}, abstract = {SCIP-JACK is a customized, branch-and-cut based solver for Steiner tree and related problems. ug [SCIP-JACK, MPI] extends SCIP-JACK to a massively par- allel solver by using the Ubiquity Generator (UG) framework. ug [SCIP-JACK, MPI] was the only solver that could run on a distributed environment at the (latest) 11th DIMACS Challenge in 2014. Furthermore, it could solve three well-known open instances and updated 14 best known solutions to instances from the bench- mark libary STEINLIB. After the DIMACS Challenge, SCIP-JACK has been con- siderably improved. However, the improvements were not reflected on ug [SCIP- JACK, MPI]. This paper describes an updated version of ug [SCIP-JACK, MPI], especially branching on constrains and a customized racing ramp-up. Furthermore, the different stages of the solution process on a supercomputer are described in detail. We also show the latest results on open instances from the STEINLIB.}, language = {en} }